Skip to main content

asmkit/aarch64/
assembler.rs

1#![allow(clippy::eq_op, clippy::erasing_op, dead_code, unused)]
2use crate::AsmError;
3use crate::aarch64::operands::*;
4use crate::aarch64::{Gp, Reg, ShiftOp, instdb::*};
5use crate::core::buffer::{Constant, LabelUse, Reloc, RelocDistance, RelocTarget};
6use crate::core::globals::CondCode;
7use crate::core::operand::*;
8use crate::core::{buffer::CodeBuffer, emitter::Emitter, patch::PatchSiteId};
9
10macro_rules! B {
11    ($e: expr) => {
12        1 << $e
13    };
14}
15
16macro_rules! check_signature {
17    ($op0: expr, $op1: expr) => {
18        $op0.signature() == $op1.signature()
19    };
20    ($op0: expr, $op1: expr, $op2: expr) => {
21        $op0.signature() == $op1.signature() && $op1.signature() == $op2.signature()
22    };
23
24    ($op0: expr, $op1: expr, $op2: expr, $op3: expr) => {
25        $op0.signature() == $op1.signature()
26            && $op1.signature() == $op2.signature()
27            && $op2.signature() == $op3.signature()
28    };
29}
30
31pub struct Assembler<'a> {
32    pub buffer: &'a mut CodeBuffer,
33    last_error: Option<AsmError>,
34}
35
36macro_rules! enc_ops {
37    ($op0: ident) => {
38        OperandType::$op0 as u32
39    };
40
41    ($op0: ident, $op1: ident) => {
42        OperandType::$op0 as u32 | (OperandType::$op1 as u32) << 3
43    };
44    ($op0: ident, $op1: ident, $op2: ident) => {
45        OperandType::$op0 as u32 | (OperandType::$op1 as u32) << 3 | (OperandType::$op2 as u32) << 6
46    };
47    ($op0: ident, $op1: ident, $op2: ident, $op3: ident) => {
48        OperandType::$op0 as u32
49            | (OperandType::$op1 as u32) << 3
50            | (OperandType::$op2 as u32) << 6
51            | (OperandType::$op3 as u32) << 9
52    };
53}
54
55pub trait LoadConstantEmitter<DST, SRC> {
56    fn load_constant(&mut self, dst: DST, src: SRC);
57}
58
59impl LoadConstantEmitter<Gp, Constant> for Assembler<'_> {
60    fn load_constant(&mut self, dst: Gp, src: Constant) {
61        let label = self.buffer.get_label_for_constant(src);
62        self.load_constant(dst, label);
63    }
64}
65
66impl LoadConstantEmitter<Gp, Label> for Assembler<'_> {
67    fn load_constant(&mut self, dst: Gp, src: Label) {
68        let label_id = src.id();
69        let offset = self.buffer.label_offset(src);
70
71        self.adrp(dst, src);
72        self.buffer
73            .use_label_at_offset(self.buffer.cur_offset(), src, LabelUse::A64AddAbsLo12);
74        self.add(dst, dst, imm(0));
75    }
76}
77
78impl LoadConstantEmitter<Gp, Sym> for Assembler<'_> {
79    fn load_constant(&mut self, dst: Gp, src: Sym) {
80        let distance = self.buffer.symbol_distance(src);
81
82        if self.buffer.env().pic() {
83            // When PIC is enabled, all syms are referenced through the GOT.
84            self.buffer
85                .add_reloc(Reloc::Aarch64AdrGotPage21, RelocTarget::Sym(src), 0);
86            self.adrp(dst, imm(0));
87            self.buffer
88                .add_reloc(Reloc::Aarch64Ld64GotLo12Nc, RelocTarget::Sym(src), 0);
89            self.ldr(dst, ptr(dst, 0));
90        }
91
92        match distance {
93            RelocDistance::Near => {
94                self.buffer
95                    .add_reloc(Reloc::Aarch64AdrPrelPgHi21, RelocTarget::Sym(src), 0);
96                self.adrp(dst, imm(0));
97                self.buffer
98                    .add_reloc(Reloc::Aarch64AddAbsLo12Nc, RelocTarget::Sym(src), 0);
99                self.add(dst, dst, imm(0));
100                return;
101            }
102
103            RelocDistance::Far => {
104                // With absolute offsets we set up a load from a preallocated space, and then jump
105                // over it.
106                //
107                // Emit the following code:
108                //   ldr     rd, #8
109                //   b       #0x10
110                //   <8 byte space>
111                let constant_start = self.buffer.get_label();
112                let constant_end = self.buffer.get_label();
113                self.ldr(dst, label_ptr(constant_start, 0));
114                self.b(constant_end);
115                self.buffer.bind_label(constant_start);
116                self.buffer.add_reloc(Reloc::Abs8, RelocTarget::Sym(src), 0);
117                self.buffer.write_u64(0);
118                self.buffer.bind_label(constant_end);
119            }
120        }
121    }
122}
123
124impl<'a> Assembler<'a> {
125    pub fn new(buffer: &'a mut CodeBuffer) -> Self {
126        Self {
127            buffer,
128            last_error: None,
129        }
130    }
131
132    pub fn get_label(&mut self) -> Label {
133        self.buffer.get_label()
134    }
135
136    pub fn bind_label(&mut self, label: Label) {
137        self.buffer.bind_label(label);
138    }
139    /// A helper to load a constant address into a register.
140    ///
141    /// Supported variants are:
142    /// ```text
143    /// +------------------+
144    /// |  DST  |  SRC     |
145    /// +------------------+
146    /// |  Gp   | Label    |
147    /// |  Gp   | Sym      |
148    /// |  Gp   | Constant |
149    /// +------------------+
150    /// ```
151    ///
152    /// Note that `Sym` is loaded based on `self.buffer.pic()` and its distance. If PIC is enabled
153    /// then GOT is always used. Otherwise, if symbol is near it uses `adrp` + `add` combination, and
154    /// for far symbols Abs8 reloc is used and data is embedded right into code.
155    pub fn load_constant<DST, SRC>(&mut self, dst: DST, src: SRC)
156    where
157        Self: LoadConstantEmitter<DST, SRC>,
158    {
159        <Self as LoadConstantEmitter<DST, SRC>>::load_constant(self, dst, src);
160    }
161
162    pub fn last_error(&self) -> Option<AsmError> {
163        self.last_error.clone()
164    }
165
166    pub fn emit_n(&mut self, id: impl Into<u32>, ops: &[&Operand]) {
167        let id = id.into();
168        let inst_cc = InstId::extract_cc(id);
169        let inst_id = InstId::extract_real_id(id);
170
171        let inst_info = &INST_INFO_TABLE[inst_id as usize];
172        let mut encoding_index = inst_info.encoding_data_index as usize;
173
174        let mut opcode = Opc(0);
175
176        let isign4;
177        let inst_flags;
178
179        const NOREG: &Operand = &Operand::new();
180
181        let op0 = *ops.get(0).unwrap_or(&NOREG);
182        let op1 = *ops.get(1).unwrap_or(&NOREG);
183        let op2 = *ops.get(2).unwrap_or(&NOREG);
184        let op3 = *ops.get(3).unwrap_or(&NOREG);
185        let op4 = *ops.get(4).unwrap_or(&NOREG);
186        let op5 = *ops.get(5).unwrap_or(&NOREG);
187
188        let mut multiple_op_data = [0u32; 4];
189        let mut multiple_op_count = 0;
190
191        isign4 = op0.op_type() as u32
192            + ((op1.op_type() as u32) << 3)
193            + ((op2.op_type() as u32) << 6)
194            + ((op3.op_type() as u32) << 9);
195        inst_flags = inst_info.flags;
196        let mut offset_format = OffsetFormat::new(OffsetType::SignedOffset, 0, 0, 0, 0, 0, 0, 0);
197        let mut offset_value = 0;
198        let mut label_use: Option<(u32, LabelUse)> = None;
199        let mut reloc: Option<Reloc> = None;
200        let mut rm_rel = &Operand::new();
201
202        macro_rules! emit_disp_imm {
203            () => {
204                if (offset_value & ((1 << offset_format.imm_discard_lsb()) - 1)) != 0 {
205                    self.last_error = Some(AsmError::InvalidOperand);
206                    return;
207                }
208
209                let disp_imm64 = (offset_value as i64) >> offset_format.imm_discard_lsb() as i64;
210                let disp_imm32 = (disp_imm64 & (1 << offset_format.imm_bit_count()) - 1) as u32;
211
212                match offset_format.typ() {
213                    OffsetType::SignedOffset => {
214                        opcode.add_imm(disp_imm32 as _, offset_format.imm_bit_shift() as _);
215                        return self.buffer.write_u32(opcode.get());
216                    }
217
218                    _ => {
219                        let imm_lo = disp_imm32 & 0x3;
220                        let imm_hi = disp_imm32 >> 2;
221                        opcode.add_imm(imm_lo, 29);
222                        opcode.add_imm(imm_hi, 5);
223                        return self.buffer.write_u32(opcode.get());
224                    }
225                }
226            };
227        }
228
229        macro_rules! emit_rel {
230            () => {
231                if rm_rel.is_label() || (rm_rel.is_mem() && rm_rel.as_::<Mem>().has_base_label()) {
232                    let label_id;
233                    let mut label_offset = 0;
234
235                    if rm_rel.is_label() {
236                        label_id = rm_rel.as_::<Label>().id();
237                    } else {
238                        label_id = rm_rel.as_::<Mem>().base_id();
239                        label_offset = rm_rel.as_::<Mem>().offset();
240                    }
241
242                    if self.buffer.is_bound(Label::from_id(label_id)) {
243                        offset_value = self.buffer.label_offset(Label::from_id(label_id)) as i64
244                            + label_offset
245                            - self.buffer.cur_offset() as i64;
246                        emit_disp_imm!();
247                    } else {
248                        let offset = self.buffer.cur_offset();
249                        self.buffer.use_label_at_offset(
250                            offset,
251                            Label::from_id(label_id),
252                            match offset_format.typ() {
253                                OffsetType::Adrp => LabelUse::A64Adrp21,
254                                OffsetType::Adr => LabelUse::A64Adr21,
255                                OffsetType::Ldr => LabelUse::A64Ldr19,
256                                OffsetType::SignedOffset => {
257                                    if offset_format.imm_bit_count() == 26 {
258                                        LabelUse::A64Branch26
259                                    } else if offset_format.imm_bit_count() == 19 {
260                                        LabelUse::A64Branch19
261                                    } else if offset_format.imm_bit_count() == 14 {
262                                        LabelUse::A64Branch14
263                                    } else {
264                                        panic!("Invalid offset format for label use")
265                                    }
266                                }
267                            },
268                        );
269
270                        return self.buffer.write_u32(opcode.get());
271                    }
272                }
273
274                if rm_rel.is_imm() {
275                    let target_offset = rm_rel.as_::<Imm>().value_as::<u64>();
276                    let mut pc = self.buffer.cur_offset() as u64 + 4;
277                    if offset_format.typ() == OffsetType::Adrp {
278                        pc &= !(4096 - 1);
279                    }
280                    offset_value = target_offset as i64 - pc as i64;
281                    emit_disp_imm!();
282                }
283            };
284        }
285
286        macro_rules! emit_rd0 {
287            () => {
288                opcode.add_reg(op0.id(), 0);
289                return self.buffer.write_u32(opcode.get());
290            };
291        }
292
293        macro_rules! emit_rn5 {
294            () => {
295                opcode.add_reg(op0.id(), 5);
296                return self.buffer.write_u32(opcode.get());
297            };
298        }
299
300        macro_rules! emit_rn5_rm16 {
301            () => {
302                opcode.add_reg(op0.id(), 5);
303                opcode.add_reg(op1.id(), 16);
304                return self.buffer.write_u32(opcode.get());
305            };
306        }
307
308        macro_rules! emit_rd0_rn5 {
309            () => {
310                opcode.add_reg(op0.id(), 0);
311                opcode.add_reg(op1.id(), 5);
312                return self.buffer.write_u32(opcode.get());
313            };
314        }
315
316        macro_rules! emit_rd0_rn5_rm16_ra10 {
317            () => {
318                opcode.add_reg(op0.id(), 0);
319                opcode.add_reg(op1.id(), 5);
320                opcode.add_reg(op2.id(), 16);
321                opcode.add_reg(op3.id(), 10);
322                return self.buffer.write_u32(opcode.get());
323            };
324        }
325
326        macro_rules! emit_rd0_rn5_rm16 {
327            () => {
328                opcode.add_reg(op0.id(), 0);
329                opcode.add_reg(op1.id(), 5);
330                opcode.add_reg(op2.id(), 16);
331                return self.buffer.write_u32(opcode.get());
332            };
333        }
334
335        macro_rules! emit_mem_base_rn5 {
336            () => {
337                opcode.add_reg(op0.as_::<Mem>().base_id(), 5);
338                return self.buffer.write_u32(opcode.get());
339            };
340        }
341
342        macro_rules! emit_mem_base_index_rn5_rm16 {
343            () => {
344                opcode.add_reg(op0.as_::<Mem>().base_id(), 5);
345                opcode.add_reg(op0.as_::<Mem>().index_id(), 16);
346                return self.buffer.write_u32(opcode.get());
347            };
348        }
349
350        macro_rules! emit_mem_base_no_imm_rn5 {
351            () => {
352                opcode.add_reg(rm_rel.as_::<Mem>().base_id(), 5);
353                return self.buffer.write_u32(opcode.get());
354            };
355        }
356
357        let mut encoding = Encoding::try_from(inst_info.encoding).expect("Invalid encoding index");
358
359        macro_rules! simd_insn {
360            () => {
361                if isign4 == enc_ops!(Reg, Reg) && op0.as_::<Reg>().is_vec128() {
362                    if !op0.as_::<Vec>().has_element_index() {
363                        self.last_error = Some(AsmError::InvalidInstruction);
364                        return;
365                    }
366                    let element_type = op0.as_::<Vec>().element_type() as u32;
367                    let dst_index = op0.as_::<Vec>().element_index();
368                    let lsb_index = element_type - 1;
369                    let imm5 = ((dst_index << 1) | 1) << lsb_index;
370                    if imm5 > 31 {
371                        self.last_error = Some(AsmError::InvalidOperand);
372                        return;
373                    }
374                    if op1.as_::<Reg>().is_gp() {
375                        // INS - Vec[N] <- GP register.
376                        opcode.reset(0b0100111000000000000111 << 10);
377                        opcode.add_imm(imm5, 16);
378                        emit_rd0_rn5!();
379                        return;
380                    } else if op1.as_::<Reg>().is_vec128() && op1.as_::<Vec>().has_element_index() {
381                        // INS - Vec[N] <- Vec[M].
382                        if op0.as_::<Vec>().element_type() != op1.as_::<Vec>().element_type() {
383                            self.last_error = Some(AsmError::InvalidInstruction);
384                            return;
385                        }
386                        let src_index = op1.as_::<Vec>().element_index();
387                        if op0.as_::<Reg>().reg_type() != op1.as_::<Reg>().reg_type() {
388                            self.last_error = Some(AsmError::InvalidInstruction);
389                            return;
390                        }
391                        let imm4 = src_index << lsb_index;
392                        if imm4 > 15 {
393                            self.last_error = Some(AsmError::InvalidOperand);
394                            return;
395                        }
396                        opcode.reset(0b0110111000000000000001 << 10);
397                        opcode.add_imm(imm5, 16);
398                        opcode.add_imm(imm4, 11);
399                        emit_rd0_rn5!();
400                        return;
401                    }
402                }
403            };
404        }
405
406        macro_rules! simd_dup {
407            () => {
408                if isign4 == enc_ops!(Reg, Reg) {
409                    let k_valid_encodings = B!(VecElementType::B as u32 + 0)
410                        | B!(VecElementType::H as u32 + 0)
411                        | B!(VecElementType::S as u32 + 0)
412                        | B!(VecElementType::B as u32 + 8)
413                        | B!(VecElementType::H as u32 + 8)
414                        | B!(VecElementType::S as u32 + 8)
415                        | B!(VecElementType::D as u32 + 8);
416
417                    let q = op0.as_::<Reg>().reg_type() as u32 - RegType::Vec64 as u32;
418
419                    if op1.as_::<Reg>().is_gp() {
420                        let element_type = op0.as_::<Vec>().element_type() as u32;
421                        if q > 1 || !bit_test(k_valid_encodings, (q << 3) | element_type) {
422                            self.last_error = Some(AsmError::InvalidInstruction);
423                            return;
424                        }
425
426                        let lsb_index = element_type - 1;
427                        let imm5 = 1u32 << lsb_index;
428
429                        opcode.reset(0b0000111000000000000011 << 10);
430                        opcode.add_imm(q, 30);
431                        opcode.add_imm(imm5, 16);
432                        emit_rd0_rn5!();
433                    } else if !op1.as_::<Reg>().is_vec() || !op1.as_::<Vec>().has_element_index() {
434                        self.last_error = Some(AsmError::InvalidInstruction);
435                        return;
436                    } else {
437                        let dst_index = op1.as_::<Vec>().element_index();
438                        if !op0.as_::<Vec>().has_element_type() {
439                            let lsb_index =
440                                op0.as_::<Reg>().reg_type() as u32 - RegType::Vec8 as u32;
441                            if lsb_index
442                                != op1.as_::<Vec>().element_type() as u32 - VecElementType::B as u32
443                                || lsb_index > 3
444                            {
445                                self.last_error = Some(AsmError::InvalidInstruction);
446                                return;
447                            }
448
449                            let imm5 = ((dst_index << 1) | 1u32) << lsb_index;
450                            if imm5 > 31 {
451                                self.last_error = Some(AsmError::InvalidOperand);
452                                return;
453                            }
454
455                            opcode.reset(0b0101111000000000000001 << 10);
456                            opcode.add_imm(imm5, 16);
457                            emit_rd0_rn5!();
458                        } else {
459                            let element_type = op0.as_::<Vec>().element_type() as u32;
460                            if q > 1 || !bit_test(k_valid_encodings, (q << 3) | element_type) {
461                                self.last_error = Some(AsmError::InvalidInstruction);
462                                return;
463                            }
464
465                            let lsb_index = element_type - 1;
466                            let imm5 = ((dst_index << 1) | 1u32) << lsb_index;
467                            if imm5 > 31 {
468                                self.last_error = Some(AsmError::InvalidOperand);
469                                return;
470                            }
471
472                            opcode.reset(0b0000111000000000000001 << 10);
473                            opcode.add_imm(q, 30);
474                            opcode.add_imm(imm5, 16);
475                            emit_rd0_rn5!();
476                        }
477                    }
478                }
479            };
480        }
481
482        macro_rules! simd_umov {
483            () => {
484                let op_data = &SIMD_SMOV_UMOV[encoding_index];
485                if isign4 == enc_ops!(Reg, Reg)
486                    && op0.as_::<Reg>().is_gp()
487                    && op1.as_::<Reg>().is_vec()
488                {
489                    let size_op = element_type_to_size_op(
490                        op_data.vec_op_type,
491                        op1.as_::<Reg>().reg_type(),
492                        op1.as_::<Vec>().element_type(),
493                    );
494                    if !size_op.is_valid() {
495                        self.last_error = Some(AsmError::InvalidInstruction);
496                        return;
497                    }
498                    if !op1.as_::<Vec>().has_element_index() {
499                        self.last_error = Some(AsmError::InvalidInstruction);
500                        return;
501                    }
502                    let x = op0.as_::<Gp>().is_gp64() as u32;
503                    let gp_must_be_x = (size_op.size() >= 3u32 - op_data.is_signed as u32) as u32;
504                    if op_data.is_signed != 0 {
505                        if gp_must_be_x != 0 && x == 0 {
506                            self.last_error = Some(AsmError::InvalidInstruction);
507                            return;
508                        }
509                    } else {
510                        if x != gp_must_be_x {
511                            self.last_error = Some(AsmError::InvalidInstruction);
512                            return;
513                        }
514                    }
515                    let element_index = op1.as_::<Vec>().element_index();
516                    let max_element_index = 15u32 >> size_op.size();
517                    if element_index > max_element_index {
518                        self.last_error = Some(AsmError::InvalidOperand);
519                        return;
520                    }
521                    let imm5 = (1u32 | (element_index << 1)) << size_op.size();
522                    opcode.reset((op_data.opcode as u32) << 10);
523                    opcode.add_imm(x, 30);
524                    opcode.add_imm(imm5, 16);
525                    emit_rd0_rn5!();
526                    return;
527                }
528            };
529        }
530
531        match encoding {
532            Encoding::BaseOp => {
533                let op_data = &BASE_OP[encoding_index];
534                if isign4 == 0 {
535                    opcode.reset(op_data.opcode);
536                    return self.buffer.write_u32(opcode.get());
537                }
538            }
539
540            Encoding::BaseOpX16 => {
541                let op_data = &BASE_OP_X16[encoding_index];
542                if isign4 == enc_ops!(Reg) && op0.as_::<Reg>().is_gp64() && op0.id() == 16 {
543                    opcode.reset(op_data.opcode);
544                    return self.buffer.write_u32(opcode.get());
545                }
546            }
547
548            Encoding::BaseOpImm => {
549                let op_data = &BASE_OP_IMM[encoding_index];
550                if isign4 == enc_ops!(Imm) {
551                    let imm = op0.as_::<Imm>().value();
552                    let imm_max = 1i64 << op_data.imm_bits as u32;
553                    if imm >= imm_max as i64 {
554                        self.last_error = Some(AsmError::TooLarge);
555                        return;
556                    }
557                    opcode.reset(op_data.opcode);
558                    opcode.add_imm(imm as i32 as u32, op_data.imm_offset as _);
559                    return self.buffer.write_u32(opcode.get());
560                }
561            }
562
563            Encoding::BaseR => {
564                let op_data = &BASE_R[encoding_index];
565                if isign4 == enc_ops!(Reg) {
566                    opcode.reset(op_data.opcode);
567                    opcode.add_reg(op0.id(), op_data.r_shift);
568                    return self.buffer.write_u32(opcode.get());
569                }
570            }
571
572            Encoding::BaseRR => {
573                let op_data = &BASE_RR[encoding_index];
574                if isign4 == enc_ops!(Reg, Reg) {
575                    let mut x = 0;
576                    if !check_gp_typex(op0, op_data.a_type, &mut x) {
577                        self.last_error = Some(AsmError::InvalidOperand);
578                        return;
579                    }
580                    if !check_gp_type(op1, op_data.b_type) {
581                        self.last_error = Some(AsmError::InvalidOperand);
582                        return;
583                    }
584
585                    if op_data.uniform != 0 && !check_signature!(op0, op1) {
586                        self.last_error = Some(AsmError::InvalidOperand);
587                        return;
588                    }
589
590                    if !check_gp_id(op0, op_data.a_hi_id) || !check_gp_id(op1, op_data.b_hi_id) {
591                        self.last_error = Some(AsmError::InvalidOperand);
592                        return;
593                    }
594                    opcode.reset(op_data.opcode);
595                    opcode.add_imm(x, 31);
596                    opcode.add_reg(op1.id(), op_data.b_shift);
597                    opcode.add_reg(op0.id(), op_data.a_shift);
598                    return self.buffer.write_u32(opcode.get());
599                }
600            }
601
602            Encoding::BaseRRR => {
603                let op_data = &BASE_RRR[encoding_index];
604                if isign4 == enc_ops!(Reg, Reg, Reg) {
605                    let mut x = 0;
606                    if !check_gp_typex(op0, op_data.a_type, &mut x) {
607                        self.last_error = Some(AsmError::InvalidOperand);
608                        return;
609                    }
610                    if !check_gp_type(op1, op_data.b_type) || !check_gp_type(op2, op_data.c_type) {
611                        self.last_error = Some(AsmError::InvalidOperand);
612                        return;
613                    }
614
615                    if op_data.uniform != 0 && !check_signature!(op0, op1, op2) {
616                        self.last_error = Some(AsmError::InvalidInstruction);
617                        return;
618                    }
619
620                    if !check_gp_id(op0, op_data.a_hi_id)
621                        || !check_gp_id(op1, op_data.b_hi_id)
622                        || !check_gp_id(op2, op_data.c_hi_id)
623                    {
624                        self.last_error = Some(AsmError::InvalidOperand);
625                        return;
626                    }
627                    opcode.reset(op_data.opcode());
628                    opcode.add_imm(x, 31);
629                    opcode.add_reg(op2.id(), 16);
630                    opcode.add_reg(op1.id(), 5);
631                    opcode.add_reg(op0.id(), 0);
632                    return self.buffer.write_u32(opcode.get());
633                }
634            }
635
636            Encoding::BaseRRRR => {
637                let op_data = &BASE_RRRR[encoding_index];
638                if isign4 == enc_ops!(Reg, Reg, Reg, Reg) {
639                    let mut x = 0;
640                    if !check_gp_typex(op0, op_data.a_type, &mut x) {
641                        self.last_error = Some(AsmError::InvalidOperand);
642                        return;
643                    }
644                    if !check_gp_type(op1, op_data.b_type)
645                        || !check_gp_type(op2, op_data.c_type)
646                        || !check_gp_type(op3, op_data.d_type)
647                    {
648                        self.last_error = Some(AsmError::InvalidOperand);
649                        return;
650                    }
651
652                    if op_data.uniform != 0 && !check_signature!(op0, op1, op2, op3) {
653                        self.last_error = Some(AsmError::InvalidInstruction);
654                        return;
655                    }
656
657                    if !check_gp_id(op0, op_data.a_hi_id)
658                        || !check_gp_id(op1, op_data.b_hi_id)
659                        || !check_gp_id(op2, op_data.c_hi_id)
660                        || !check_gp_id(op3, op_data.d_hi_id)
661                    {
662                        self.last_error = Some(AsmError::InvalidOperand);
663                        return;
664                    }
665                    opcode.reset(op_data.opcode());
666                    opcode.add_imm(x, 31);
667                    opcode.add_reg(op2.id(), 16);
668                    opcode.add_reg(op3.id(), 10);
669                    opcode.add_reg(op1.id(), 5);
670                    opcode.add_reg(op0.id(), 0);
671                    return self.buffer.write_u32(opcode.get());
672                }
673            }
674
675            Encoding::BaseRRII => {
676                let op_data = &BASE_RRII[encoding_index];
677                if isign4 == enc_ops!(Reg, Reg, Imm, Imm) {
678                    let mut x = 0;
679                    if !check_gp_typex(op0, op_data.a_type, &mut x) {
680                        self.last_error = Some(AsmError::InvalidOperand);
681                        return;
682                    }
683                    if !check_gp_type(op1, op_data.b_type) {
684                        self.last_error = Some(AsmError::InvalidOperand);
685                        return;
686                    }
687
688                    if !check_gp_id(op0, op_data.a_hi_id) || !check_gp_id(op1, op_data.b_hi_id) {
689                        self.last_error = Some(AsmError::InvalidOperand);
690                        return;
691                    }
692
693                    let imm2 = op2.as_::<Imm>().value();
694                    let imm3 = op3.as_::<Imm>().value();
695
696                    if imm2 >= (op_data.a_imm_size + op_data.a_imm_discard_lsb) as u32 as i64
697                        || imm3 >= (op_data.b_imm_size + op_data.b_imm_discard_lsb) as u32 as i64
698                    {
699                        self.last_error = Some(AsmError::TooLarge);
700                        return;
701                    }
702
703                    let a_imm = imm2 as u32 >> op_data.a_imm_discard_lsb;
704                    let b_imm = imm3 as u32 >> op_data.b_imm_discard_lsb;
705
706                    opcode.reset(op_data.opcode());
707                    opcode.add_imm(a_imm, op_data.a_imm_offset);
708                    opcode.add_imm(b_imm, op_data.b_imm_offset);
709                    opcode.add_reg(op1.id(), 5);
710                    opcode.add_reg(op0.id(), 0);
711                    return self.buffer.write_u32(opcode.get());
712                }
713            }
714
715            Encoding::BaseMov => {
716                let x = (op0.as_::<Reg>().typ() as u32).wrapping_sub(RegType::Gp32 as u32);
717                if x > 1 {
718                    self.last_error = Some(AsmError::InvalidOperand);
719                    return;
720                }
721
722                if isign4 == enc_ops!(Reg, Reg) {
723                    if !op0.as_::<Reg>().is_gp() || !op1.as_::<Reg>().is_gp() {
724                        self.last_error = Some(AsmError::InvalidOperand);
725                        return;
726                    }
727
728                    if !check_signature!(op0, op1) {
729                        self.last_error = Some(AsmError::InvalidInstruction);
730                        return;
731                    }
732
733                    let has_sp = op0.as_::<Reg>().is_sp() || op1.as_::<Reg>().is_sp();
734
735                    if has_sp {
736                        if !check_gp_id2(op0, op1, 31) {
737                            self.last_error = Some(AsmError::InvalidOperand);
738                            return;
739                        }
740                        opcode.reset(0b00010001000000000000000000000000);
741                        opcode.add_imm(x, 31);
742                        opcode.add_reg(op1.id(), 5);
743                        opcode.add_reg(op0.id(), 0);
744                        return self.buffer.write_u32(opcode.get());
745                    } else {
746                        if !check_gp_id2(op0, op1, 63) {
747                            self.last_error = Some(AsmError::InvalidOperand);
748                            return;
749                        }
750                        opcode.reset(0b00101010000000000000001111100000);
751                        opcode.add_imm(x, 31);
752                        opcode.add_reg(op1.id(), 16);
753                        opcode.add_reg(op0.id(), 0);
754                        return self.buffer.write_u32(opcode.get());
755                    }
756                }
757
758                if isign4 == enc_ops!(Reg, Imm) {
759                    if !op0.as_::<Reg>().is_gp() {
760                        self.last_error = Some(AsmError::InvalidOperand);
761                        return;
762                    }
763
764                    let mut imm_value = op1.as_::<Imm>().value_as::<u64>();
765                    if x == 0 {
766                        imm_value &= 0xFFFFFFFF;
767                    }
768
769                    // Prefer a single MOVN/MOVZ instruction over a logical instruction.
770                    multiple_op_count =
771                        encode_mov_sequence64(&mut multiple_op_data, imm_value, op0.id() & 31, x);
772                    if multiple_op_count == 1 && !op0.as_::<Gp>().is_sp() {
773                        opcode.reset(multiple_op_data[0]);
774                        return self.buffer.write_u32(opcode.get());
775                    }
776
777                    if !op0.as_::<Gp>().is_zr() {
778                        if let Some(logical_imm) =
779                            encode_logical_imm(imm_value, if x != 0 { 64 } else { 32 })
780                        {
781                            opcode.reset(0b00110010000000000000001111100000);
782                            opcode.add_imm(x, 31);
783                            opcode.add_logical_imm(&logical_imm);
784                            opcode.add_reg(op0.id(), 0);
785                            return self.buffer.write_u32(opcode.get());
786                        }
787                    }
788
789                    for i in 0..multiple_op_count {
790                        self.buffer.write_u32(multiple_op_data[i]);
791                    }
792                    return;
793                }
794            }
795
796            Encoding::BaseMovKNZ => {
797                let op_data = &BASE_MOV_KNZ[encoding_index];
798
799                let x = op0.as_::<Reg>().typ() as u32 - RegType::Gp32 as u32;
800                if x > 1 {
801                    self.last_error = Some(AsmError::InvalidInstruction);
802                    return;
803                }
804
805                if !check_gp_id(op0, 63) {
806                    self.last_error = Some(AsmError::InvalidOperand);
807                    return;
808                }
809
810                opcode.reset(op_data.opcode);
811                opcode.add_imm(x, 31);
812
813                if isign4 == enc_ops!(Reg, Imm) {
814                    let imm16 = op1.as_::<Imm>().value_as::<u64>();
815                    if imm16 > 0xFFFF {
816                        self.last_error = Some(AsmError::TooLarge);
817                        return;
818                    }
819
820                    opcode.add_imm(imm16 as u32, 5);
821                    opcode.add_reg(op0.id(), 0);
822                    return self.buffer.write_u32(opcode.get());
823                }
824
825                if isign4 == enc_ops!(Reg, Imm, Imm) {
826                    let imm16 = op1.as_::<Imm>().value_as::<u64>();
827                    let shift_type = op2.as_::<Imm>().predicate();
828                    let shift_value = op2.as_::<Imm>().value_as::<u64>();
829
830                    if imm16 > 0xFFFF || shift_value > 48 || shift_type != ShiftOp::LSL as u32 {
831                        self.last_error = Some(AsmError::TooLarge);
832                        return;
833                    }
834
835                    let hw = (shift_value as u32) >> 4;
836
837                    if hw << 4 != shift_value as u32 {
838                        self.last_error = Some(AsmError::InvalidOperand);
839                        return;
840                    }
841
842                    opcode.add_imm(hw, 21);
843                    opcode.add_imm(imm16 as u32, 5);
844                    opcode.add_reg(op0.id(), 0);
845
846                    if x == 0 && hw > 1 {
847                        self.last_error = Some(AsmError::InvalidOperand);
848                        return;
849                    }
850
851                    return self.buffer.write_u32(opcode.get());
852                }
853            }
854
855            Encoding::BaseAdr => {
856                let op_data = &BASE_ADR[encoding_index];
857                if isign4 == enc_ops!(Reg, Label)
858                    || isign4 == enc_ops!(Reg, Sym)
859                    || isign4 == enc_ops!(Reg, Imm)
860                {
861                    if !op0.as_::<Reg>().is_gp() {
862                        self.last_error = Some(AsmError::InvalidOperand);
863                        return;
864                    }
865
866                    if !check_gp_id(op0, 63) {
867                        self.last_error = Some(AsmError::InvalidOperand);
868                        return;
869                    }
870
871                    opcode.reset(op_data.opcode());
872                    opcode.add_reg(op0.id(), 0);
873                    offset_format.reset_to_imm_type(
874                        OffsetType::try_from(op_data.offset_type).expect("Invalid offset type"),
875                        4,
876                        5,
877                        21,
878                        0,
879                    );
880
881                    if inst_id == InstId::Adrp as u32 {
882                        offset_format.imm_discard_lsb = 12;
883                    }
884                    rm_rel = op1;
885                    emit_rel!();
886                }
887            }
888
889            Encoding::BaseAddSub => {
890                let op_data = &BASE_ADD_SUB[encoding_index];
891
892                let mut x = 0;
893                if !check_gp_typex2(op0, op1, 3, &mut x) {
894                    self.last_error = Some(AsmError::InvalidOperand);
895                    return;
896                }
897
898                if isign4 == enc_ops!(Reg, Reg, Imm) || isign4 == enc_ops!(Reg, Reg, Imm, Imm) {
899                    opcode.reset((op_data.immediate_op as u32) << 24);
900
901                    // ADD | SUB (immediate) - ZR is not allowed.
902                    // ADDS|SUBS (immediate) - ZR allowed in Rd, SP allowed in Rn.
903                    let a_hi_id = if opcode.get() & 1 << 29 != 0 { 63 } else { 31 };
904                    let b_hi_id = 31;
905                    if !check_gp_id(op0, a_hi_id) || !check_gp_id(op1, b_hi_id) {
906                        self.last_error = Some(AsmError::InvalidOperand);
907                        return;
908                    }
909
910                    let mut imm = op2.as_::<Imm>().value_as::<u64>();
911                    let mut shift = 0;
912
913                    if isign4 == enc_ops!(Reg, Reg, Imm, Imm) {
914                        if op3.as_::<Imm>().predicate() != ShiftOp::LSL as u32 {
915                            self.last_error = Some(AsmError::InvalidOperand);
916                            return;
917                        }
918
919                        if op3.as_::<Imm>().value() != 0 && op3.as_::<Imm>().value() != 12 {
920                            self.last_error = Some(AsmError::InvalidOperand);
921                            return;
922                        }
923
924                        shift = (op3.as_::<Imm>().value() != 0) as u32;
925                    }
926
927                    if imm > 0xfff {
928                        if shift != 0 || (imm & !(0xfff << 12)) != 0 {
929                            self.last_error = Some(AsmError::TooLarge);
930                            return;
931                        }
932
933                        shift = 1;
934                        imm >>= 12;
935                    }
936
937                    opcode.add_imm(x, 31);
938                    opcode.add_imm(shift, 12);
939                    opcode.add_imm(imm as u32, 10);
940                    opcode.add_reg(op0.id(), 5);
941                    opcode.add_reg(op1.id(), 0);
942                    return self.buffer.write_u32(opcode.get());
943                }
944
945                if isign4 == enc_ops!(Reg, Reg, Reg) || isign4 == enc_ops!(Reg, Reg, Reg, Imm) {
946                    let op_size = if x != 0 { 64 } else { 32 };
947                    let mut shift = 0;
948                    let mut shift_type = ShiftOp::LSL as u32;
949
950                    if isign4 == enc_ops!(Reg, Reg, Reg, Imm) {
951                        shift_type = op3.as_::<Imm>().predicate();
952                        shift = op3.as_::<Imm>().value() as u32;
953                    }
954
955                    if !check_gp_id(op2, 63) {
956                        self.last_error = Some(AsmError::InvalidOperand);
957                        return;
958                    }
959
960                    if shift_type <= ShiftOp::ASR as u32 {
961                        let has_sp = op0.as_::<Gp>().is_sp() || op1.as_::<Gp>().is_sp();
962
963                        if !has_sp {
964                            if !check_signature!(op1, op2) {
965                                self.last_error = Some(AsmError::InvalidInstruction);
966                                return;
967                            }
968
969                            if !check_gp_id3(op0, op1, op2, 63) {
970                                self.last_error = Some(AsmError::InvalidOperand);
971                                return;
972                            }
973
974                            if shift >= op_size {
975                                self.last_error = Some(AsmError::InvalidOperand);
976                                return;
977                            }
978
979                            opcode.reset((op_data.shifted_op as u32) << 21);
980                            opcode.add_imm(x, 31);
981                            opcode.add_imm(shift_type, 22);
982                            opcode.add_reg(op2.id(), 16);
983                            opcode.add_imm(shift, 10);
984                            opcode.add_reg(op1.id(), 5);
985                            opcode.add_reg(op0.id(), 0);
986                            return self.buffer.write_u32(opcode.get());
987                        }
988
989                        if shift_type != ShiftOp::LSL as u32 {
990                            self.last_error = Some(AsmError::InvalidOperand);
991                            return;
992                        }
993
994                        shift_type = if x != 0 {
995                            ShiftOp::UXTX as u32
996                        } else {
997                            ShiftOp::UXTW as u32
998                        };
999                    }
1000
1001                    opcode.reset((op_data.extended_op as u32) << 21);
1002                    shift_type -= ShiftOp::UXTB as u32;
1003
1004                    if shift_type > 7 || shift > 4 {
1005                        self.last_error = Some(AsmError::InvalidOperand);
1006                        return;
1007                    }
1008
1009                    if (opcode.get() & (1 << 29)) == 0 {
1010                        if !check_gp_id2(op0, op1, 31) {
1011                            self.last_error = Some(AsmError::InvalidOperand);
1012                            return;
1013                        }
1014                    } else {
1015                        if !check_gp_id(op0, 63) || !check_gp_id(op1, 31) {
1016                            self.last_error = Some(AsmError::InvalidOperand);
1017                            return;
1018                        }
1019                    }
1020
1021                    opcode.add_imm(x, 31);
1022                    opcode.add_reg(op2.id(), 16);
1023                    opcode.add_imm(shift_type, 13);
1024                    opcode.add_imm(shift, 10);
1025                    opcode.add_reg(op1.id(), 5);
1026                    opcode.add_reg(op0.id(), 0);
1027                    return self.buffer.write_u32(opcode.get());
1028                }
1029            }
1030
1031            Encoding::BaseLogical => {
1032                let op_data = &BASE_LOGICAL[encoding_index];
1033
1034                let mut x = 0;
1035                if !check_gp_typex2(op0, op1, 3, &mut x) {
1036                    self.last_error = Some(AsmError::InvalidOperand);
1037                    return;
1038                }
1039
1040                if !check_signature!(op0, op1) {
1041                    self.last_error = Some(AsmError::InvalidInstruction);
1042                    return;
1043                }
1044
1045                let op_size = if x != 0 { 64 } else { 32 };
1046
1047                if isign4 == enc_ops!(Reg, Reg, Imm) && op_data.immediate_op != 0 {
1048                    opcode.reset((op_data.immediate_op as u32) << 23);
1049
1050                    let imm_mask = lsb_mask::<u64>(op_size);
1051                    let mut imm_value = op2.as_::<Imm>().value_as::<u64>();
1052
1053                    if op_data.negate_imm != 0 {
1054                        imm_value ^= imm_mask;
1055                    }
1056
1057                    let Some(logical_imm) = encode_logical_imm(imm_value, op_size) else {
1058                        self.last_error = Some(AsmError::InvalidOperand);
1059                        return;
1060                    };
1061
1062                    let op_ands = 0x3 << 29;
1063                    let is_ands = (opcode.get() & op_ands) == op_ands;
1064
1065                    if !check_gp_id(op0, if is_ands { 63 } else { 31 }) || !check_gp_id(op1, 63) {
1066                        self.last_error = Some(AsmError::InvalidOperand);
1067                        return;
1068                    }
1069
1070                    opcode.add_imm(x, 31);
1071                    opcode.add_logical_imm(&logical_imm);
1072                    opcode.add_reg(op1.id(), 5);
1073                    opcode.add_reg(op0.id(), 0);
1074                    return self.buffer.write_u32(opcode.get());
1075                }
1076
1077                if !check_signature!(op1, op2) {
1078                    self.last_error = Some(AsmError::InvalidInstruction);
1079                    return;
1080                }
1081
1082                if isign4 == enc_ops!(Reg, Reg, Reg) {
1083                    if !check_gp_id3(op0, op1, op2, 63) {
1084                        self.last_error = Some(AsmError::InvalidOperand);
1085                        return;
1086                    }
1087
1088                    opcode.reset((op_data.shifted_op as u32) << 21);
1089                    opcode.add_imm(x, 31);
1090                    opcode.add_reg(op2.id(), 16);
1091                    opcode.add_reg(op1.id(), 5);
1092                    opcode.add_reg(op0.id(), 0);
1093                    return self.buffer.write_u32(opcode.get());
1094                }
1095
1096                if isign4 == enc_ops!(Reg, Reg, Reg, Imm) {
1097                    if !check_gp_id3(op0, op1, op2, 63) {
1098                        self.last_error = Some(AsmError::InvalidOperand);
1099                        return;
1100                    }
1101
1102                    let shift_type = op3.as_::<Imm>().predicate();
1103                    let op_shift = op3.as_::<Imm>().value() as u32;
1104
1105                    if shift_type > 0x3 || op_shift >= op_size {
1106                        self.last_error = Some(AsmError::InvalidOperand);
1107                        return;
1108                    }
1109
1110                    opcode.reset((op_data.shifted_op as u32) << 21);
1111                    opcode.add_imm(x, 31);
1112                    opcode.add_imm(shift_type, 22);
1113                    opcode.add_reg(op2.id(), 16);
1114                    opcode.add_imm(op_shift, 10);
1115                    opcode.add_reg(op1.id(), 5);
1116                    opcode.add_reg(op0.id(), 0);
1117                    return self.buffer.write_u32(opcode.get());
1118                }
1119            }
1120
1121            Encoding::BaseCmpCmn => {
1122                let op_data = &BASE_CMP_CMN[encoding_index];
1123
1124                let mut x = 0;
1125                if !check_gp_typex(op0, 3, &mut x) {
1126                    self.last_error = Some(AsmError::InvalidOperand);
1127                    return;
1128                }
1129
1130                if isign4 == enc_ops!(Reg, Imm) {
1131                    if !check_gp_id(op0, 31) {
1132                        self.last_error = Some(AsmError::InvalidOperand);
1133                        return;
1134                    }
1135
1136                    let imm12 = op1.as_::<Imm>();
1137                    let mut imm_shift = 0;
1138                    let mut imm_value = imm12.value_as::<u64>();
1139
1140                    if imm_value > 0xfff {
1141                        if (imm_value & !(0xfff << 12)) != 0 {
1142                            self.last_error = Some(AsmError::TooLarge);
1143                            return;
1144                        }
1145                        imm_shift = 1;
1146                        imm_value >>= 12;
1147                    }
1148
1149                    opcode.reset((op_data.immediate_op as u32) << 24);
1150                    opcode.add_imm(x, 31);
1151                    opcode.add_imm(imm_shift, 22);
1152                    opcode.add_imm(imm_value as u32, 10);
1153                    opcode.add_reg(op0.id(), 5);
1154                    opcode.add_reg(63, 0);
1155                    return self.buffer.write_u32(opcode.get());
1156                }
1157
1158                if isign4 == enc_ops!(Reg, Reg) || isign4 == enc_ops!(Reg, Reg, Imm) {
1159                    let op_size = if x != 0 { 64 } else { 32 };
1160                    let mut shift_type = 0;
1161                    let mut shift_value = 0;
1162
1163                    if isign4 == enc_ops!(Reg, Reg, Imm) {
1164                        shift_type = op2.as_::<Imm>().predicate();
1165                        shift_value = op2.as_::<Imm>().value() as u32;
1166                    }
1167
1168                    let has_sp = op0.as_::<Gp>().is_sp() || op1.as_::<Gp>().is_sp();
1169
1170                    if shift_type <= ShiftOp::ASR as u32 {
1171                        if !has_sp {
1172                            if !check_signature!(op0, op1) {
1173                                self.last_error = Some(AsmError::InvalidInstruction);
1174                                return;
1175                            }
1176
1177                            if shift_value >= op_size {
1178                                self.last_error = Some(AsmError::InvalidOperand);
1179                                return;
1180                            }
1181
1182                            opcode.reset((op_data.shifted_op as u32) << 21);
1183                            opcode.add_imm(x, 31);
1184                            opcode.add_imm(shift_type, 22);
1185                            opcode.add_reg(op1.id(), 5);
1186                            opcode.add_imm(shift_value, 10);
1187                            opcode.add_reg(op0.id(), 5);
1188                            opcode.add_reg(63, 0);
1189                            return self.buffer.write_u32(opcode.get());
1190                        }
1191
1192                        if shift_type != ShiftOp::LSL as u32 {
1193                            self.last_error = Some(AsmError::InvalidOperand);
1194                            return;
1195                        }
1196
1197                        shift_type = if x != 0 {
1198                            ShiftOp::UXTX as u32
1199                        } else {
1200                            ShiftOp::UXTW as u32
1201                        }
1202                    }
1203
1204                    shift_type -= ShiftOp::UXTB as u32;
1205
1206                    if shift_type > 7 || shift_value > 4 {
1207                        self.last_error = Some(AsmError::InvalidOperand);
1208                        return;
1209                    }
1210
1211                    opcode.reset((op_data.extended_op as u32) << 21);
1212                    opcode.add_imm(x, 31);
1213                    opcode.add_reg(op1.id(), 16);
1214                    opcode.add_imm(shift_type, 13);
1215                    opcode.add_imm(shift_value, 10);
1216                    opcode.add_reg(op0.id(), 5);
1217                    opcode.add_reg(63, 0);
1218                    return self.buffer.write_u32(opcode.get());
1219                }
1220            }
1221
1222            Encoding::BaseMvnNeg => {
1223                let op_data = &BASE_MVN_NEG[encoding_index];
1224
1225                let mut x = 0;
1226                if !check_gp_typex2(op0, op1, 3, &mut x) {
1227                    self.last_error = Some(AsmError::InvalidOperand);
1228                    return;
1229                }
1230
1231                opcode.reset(op_data.opcode);
1232                opcode.add_imm(x, 31);
1233                opcode.add_reg(op1.id(), 16);
1234                opcode.add_reg(op0.id(), 0);
1235
1236                if isign4 == enc_ops!(Reg, Reg) {
1237                    if !check_gp_id2(op0, op1, 63) {
1238                        self.last_error = Some(AsmError::InvalidOperand);
1239                        return;
1240                    }
1241
1242                    return self.buffer.write_u32(opcode.get());
1243                }
1244
1245                if isign4 == enc_ops!(Reg, Reg, Imm) {
1246                    if !check_gp_id2(op0, op1, 63) {
1247                        self.last_error = Some(AsmError::InvalidOperand);
1248                        return;
1249                    }
1250
1251                    let op_size = if x != 0 { 64 } else { 32 };
1252                    let shift_type = op2.as_::<Imm>().predicate();
1253                    let shift_value = op2.as_::<Imm>().value() as u32;
1254
1255                    if shift_type > ShiftOp::ROR as u32 || shift_value >= op_size {
1256                        self.last_error = Some(AsmError::InvalidOperand);
1257                        return;
1258                    }
1259
1260                    opcode.add_imm(shift_type, 22);
1261                    opcode.add_imm(shift_value, 10);
1262                    return self.buffer.write_u32(opcode.get());
1263                }
1264            }
1265
1266            Encoding::BaseTst => {
1267                let op_data = &BASE_TST[encoding_index];
1268
1269                let mut x = 0;
1270                if !check_gp_typex(op0, 3, &mut x) {
1271                    self.last_error = Some(AsmError::InvalidOperand);
1272                    return;
1273                }
1274
1275                if isign4 == enc_ops!(Reg, Imm) && op_data.immediate_op != 0 {
1276                    if !check_gp_id(op0, 63) {
1277                        self.last_error = Some(AsmError::InvalidOperand);
1278                        return;
1279                    }
1280                    let op_size = if x != 0 { 64 } else { 32 };
1281                    let imm_mask = lsb_mask::<u64>(op_size);
1282                    let imm_value = op1.as_::<Imm>().value_as::<u64>();
1283
1284                    let Some(logical_imm) = encode_logical_imm(imm_value & imm_mask, op_size)
1285                    else {
1286                        self.last_error = Some(AsmError::InvalidOperand);
1287                        return;
1288                    };
1289
1290                    opcode.reset((op_data.immediate_op as u32) << 22);
1291                    opcode.add_logical_imm(&logical_imm);
1292                    opcode.add_imm(x, 31);
1293                    opcode.add_reg(op0.id(), 5);
1294                    opcode.add_reg(63, 0);
1295                    return self.buffer.write_u32(opcode.get());
1296                }
1297
1298                opcode.reset((op_data.shifted_op as u32) << 21);
1299                opcode.add_imm(x, 31);
1300                opcode.add_reg(op1.id(), 16);
1301                opcode.add_reg(op0.id(), 5);
1302                opcode.add_reg(63, 0);
1303
1304                if isign4 == enc_ops!(Reg, Reg) {
1305                    if !check_gp_id2(op0, op1, 63) {
1306                        self.last_error = Some(AsmError::InvalidOperand);
1307                        return;
1308                    }
1309
1310                    return self.buffer.write_u32(opcode.get());
1311                }
1312
1313                if isign4 == enc_ops!(Reg, Reg, Imm) {
1314                    if !check_gp_id2(op0, op1, 63) {
1315                        self.last_error = Some(AsmError::InvalidOperand);
1316                        return;
1317                    }
1318
1319                    let shift_type = op2.as_::<Imm>().predicate();
1320                    let shift_value = op2.as_::<Imm>().value() as u32;
1321
1322                    if shift_type > 0x3 || shift_value >= (if x != 0 { 64 } else { 32 }) {
1323                        self.last_error = Some(AsmError::InvalidOperand);
1324                        return;
1325                    }
1326
1327                    opcode.add_imm(shift_type, 22);
1328                    opcode.add_imm(shift_value, 10);
1329                    return self.buffer.write_u32(opcode.get());
1330                }
1331            }
1332
1333            Encoding::BaseBfc => {
1334                let op_data = &BASE_BFC[encoding_index];
1335                if isign4 == enc_ops!(Reg, Imm, Imm) {
1336                    let mut x = 0;
1337                    if !check_gp_typex(op0, 3, &mut x) {
1338                        self.last_error = Some(AsmError::InvalidOperand);
1339                        return;
1340                    }
1341
1342                    let lsb = op1.as_::<Imm>().value_as::<u64>();
1343                    let width = op2.as_::<Imm>().value_as::<u64>();
1344                    let op_size = if x != 0 { 64 } else { 32 };
1345
1346                    if lsb >= op_size || width == 0 || width > op_size {
1347                        self.last_error = Some(AsmError::InvalidOperand);
1348                        return;
1349                    }
1350
1351                    let lsb32 = 0u32.wrapping_sub(lsb as u32) & (op_size as u32 - 1);
1352                    let width32 = width as u32 - 1;
1353
1354                    opcode.reset(op_data.opcode);
1355                    opcode.add_imm(x, 31);
1356                    opcode.add_imm(x, 22);
1357                    opcode.add_imm(lsb32, 16);
1358                    opcode.add_imm(width32, 10);
1359                    opcode.add_reg(op0.id(), 0);
1360                    return self.buffer.write_u32(opcode.get());
1361                }
1362            }
1363
1364            Encoding::BaseBfi => {
1365                let op_data = &BASE_BFI[encoding_index];
1366                if isign4 == enc_ops!(Reg, Reg, Imm, Imm) {
1367                    let mut x = 0;
1368                    if !check_gp_typex(op0, 3, &mut x) {
1369                        self.last_error = Some(AsmError::InvalidOperand);
1370                        return;
1371                    }
1372
1373                    if !check_signature!(op0, op1) {
1374                        self.last_error = Some(AsmError::InvalidInstruction);
1375                        return;
1376                    }
1377
1378                    if !check_gp_id2(op0, op1, 63) {
1379                        self.last_error = Some(AsmError::InvalidOperand);
1380                        return;
1381                    }
1382
1383                    let lsb = op2.as_::<Imm>().value_as::<u64>();
1384                    let width = op3.as_::<Imm>().value_as::<u64>();
1385                    let op_size = if x != 0 { 64 } else { 32 };
1386
1387                    if lsb >= op_size as u64 || width == 0 || width > op_size as u64 {
1388                        self.last_error = Some(AsmError::InvalidOperand);
1389                        return;
1390                    }
1391
1392                    let imm_l = 0u32.wrapping_sub(lsb as u32) & (op_size as u32 - 1);
1393                    let imm_w = width as u32 - 1;
1394
1395                    opcode.reset(op_data.opcode);
1396                    opcode.add_imm(x, 31);
1397                    opcode.add_imm(x, 22);
1398                    opcode.add_imm(imm_l, 16);
1399                    opcode.add_imm(imm_w, 10);
1400                    opcode.add_reg(op1.id(), 5);
1401                    opcode.add_reg(op0.id(), 0);
1402                    return self.buffer.write_u32(opcode.get());
1403                }
1404            }
1405
1406            Encoding::BaseBfm => {
1407                let op_data = &BASE_BFM[encoding_index];
1408                if isign4 == enc_ops!(Reg, Reg, Imm, Imm) {
1409                    let mut x = 0;
1410                    if !check_gp_typex(op0, 3, &mut x) {
1411                        self.last_error = Some(AsmError::InvalidInstruction);
1412                        return;
1413                    }
1414
1415                    if !check_signature!(op0, op1) {
1416                        self.last_error = Some(AsmError::InvalidInstruction);
1417                        return;
1418                    }
1419
1420                    if !check_gp_id2(op0, op1, 63) {
1421                        self.last_error = Some(AsmError::InvalidOperand);
1422                        return;
1423                    }
1424
1425                    let imm_r = op2.as_::<Imm>().value_as::<u64>();
1426                    let imm_s = op3.as_::<Imm>().value_as::<u64>();
1427                    let op_size = if x != 0 { 64 } else { 32 };
1428
1429                    if (imm_r | imm_s) >= op_size as u64 {
1430                        self.last_error = Some(AsmError::InvalidOperand);
1431                        return;
1432                    }
1433
1434                    opcode.reset(op_data.opcode);
1435                    opcode.add_imm(x, 31);
1436                    opcode.add_imm(x, 22);
1437                    opcode.add_imm(imm_r as u32, 16);
1438                    opcode.add_imm(imm_s as u32, 10);
1439                    opcode.add_reg(op1.id(), 5);
1440                    opcode.add_reg(op0.id(), 0);
1441                    return self.buffer.write_u32(opcode.get());
1442                }
1443            }
1444
1445            Encoding::BaseBfx => {
1446                let op_data = &BASE_BFX[encoding_index];
1447                if isign4 == enc_ops!(Reg, Reg, Imm, Imm) {
1448                    let mut x = 0;
1449                    if !check_gp_typex(op0, 3, &mut x) {
1450                        self.last_error = Some(AsmError::InvalidInstruction);
1451                        return;
1452                    }
1453
1454                    if !check_signature!(op0, op1) {
1455                        self.last_error = Some(AsmError::InvalidInstruction);
1456                        return;
1457                    }
1458
1459                    if !check_gp_id2(op0, op1, 63) {
1460                        self.last_error = Some(AsmError::InvalidOperand);
1461                        return;
1462                    }
1463
1464                    let lsb = op2.as_::<Imm>().value_as::<u64>();
1465                    let width = op3.as_::<Imm>().value_as::<u64>();
1466                    let op_size = if x != 0 { 64 } else { 32 };
1467
1468                    if lsb >= op_size as u64 || width == 0 || width > op_size as u64 {
1469                        self.last_error = Some(AsmError::InvalidOperand);
1470                        return;
1471                    }
1472
1473                    let lsb32 = lsb as u32;
1474                    let width32 = lsb32 + width as u32 - 1;
1475
1476                    if width32 >= op_size as u32 {
1477                        self.last_error = Some(AsmError::InvalidOperand);
1478                        return;
1479                    }
1480
1481                    opcode.reset(op_data.opcode);
1482                    opcode.add_imm(x, 31);
1483                    opcode.add_imm(x, 22);
1484                    opcode.add_imm(lsb32, 16);
1485                    opcode.add_imm(width32, 10);
1486                    opcode.add_reg(op1.id(), 5);
1487                    opcode.add_reg(op0.id(), 0);
1488                    return self.buffer.write_u32(opcode.get());
1489                }
1490            }
1491
1492            Encoding::BaseExtend => {
1493                let op_data = &BASE_EXTEND[encoding_index];
1494
1495                if isign4 == enc_ops!(Reg, Reg) {
1496                    let mut x = 0;
1497                    if !check_gp_typex(op0, op_data.reg_type, &mut x) {
1498                        self.last_error = Some(AsmError::InvalidOperand);
1499                        return;
1500                    }
1501
1502                    if !op1.as_::<Reg>().is_gp32() {
1503                        self.last_error = Some(AsmError::InvalidOperand);
1504                        return;
1505                    }
1506
1507                    if !check_gp_id2(op0, op1, 63) {
1508                        self.last_error = Some(AsmError::InvalidOperand);
1509                        return;
1510                    }
1511
1512                    opcode.reset(op_data.opcode());
1513                    opcode.add_imm(x, 31);
1514                    opcode.add_imm(x, 22);
1515                    opcode.add_reg(op1.id(), 5);
1516                    opcode.add_reg(op0.id(), 0);
1517                    return self.buffer.write_u32(opcode.get());
1518                }
1519            }
1520
1521            Encoding::BaseExtract => {
1522                let op_data = &BASE_EXTRACT[encoding_index];
1523
1524                if isign4 == enc_ops!(Reg, Reg, Reg, Imm) {
1525                    let mut x = 0;
1526                    if !check_gp_typex(op0, 3, &mut x) {
1527                        self.last_error = Some(AsmError::InvalidInstruction);
1528                        return;
1529                    }
1530
1531                    if !check_signature!(op0, op1, op2) {
1532                        self.last_error = Some(AsmError::InvalidInstruction);
1533                        return;
1534                    }
1535
1536                    if !check_gp_id3(op0, op1, op2, 63) {
1537                        self.last_error = Some(AsmError::InvalidOperand);
1538                        return;
1539                    }
1540
1541                    let lsb = op3.as_::<Imm>().value_as::<u64>();
1542                    let op_size = if x != 0 { 64 } else { 32 };
1543
1544                    if lsb >= op_size as u64 {
1545                        self.last_error = Some(AsmError::InvalidOperand);
1546                        return;
1547                    }
1548
1549                    opcode.reset(op_data.opcode);
1550                    opcode.add_imm(x, 31);
1551                    opcode.add_imm(x, 22);
1552                    opcode.add_reg(op2.id(), 16);
1553                    opcode.add_imm(lsb as u32, 10);
1554                    opcode.add_reg(op1.id(), 5);
1555                    opcode.add_reg(op0.id(), 0);
1556                    return self.buffer.write_u32(opcode.get());
1557                }
1558            }
1559
1560            Encoding::BaseRev => {
1561                if isign4 == enc_ops!(Reg, Reg) {
1562                    let mut x = 0;
1563                    if !check_gp_typex(op0, 3, &mut x) {
1564                        self.last_error = Some(AsmError::InvalidInstruction);
1565                        return;
1566                    }
1567
1568                    if !check_signature!(op0, op1) {
1569                        self.last_error = Some(AsmError::InvalidInstruction);
1570                        return;
1571                    }
1572
1573                    if !check_gp_id2(op0, op1, 63) {
1574                        self.last_error = Some(AsmError::InvalidOperand);
1575                        return;
1576                    }
1577
1578                    opcode.reset(0b01011010110000000000100000000000);
1579                    opcode.add_imm(x, 31);
1580                    opcode.add_imm(x, 10);
1581                    opcode.add_reg(op1.id(), 5);
1582                    opcode.add_reg(op0.id(), 0);
1583                    return self.buffer.write_u32(opcode.get());
1584                }
1585            }
1586
1587            Encoding::BaseShift => {
1588                let op_data = &BASE_SHIFT[encoding_index];
1589
1590                let mut x = 0;
1591                if !check_gp_typex(op0, 3, &mut x) {
1592                    self.last_error = Some(AsmError::InvalidInstruction);
1593                    return;
1594                }
1595
1596                if isign4 == enc_ops!(Reg, Reg, Reg) {
1597                    if !check_signature!(op0, op1, op2) {
1598                        self.last_error = Some(AsmError::InvalidInstruction);
1599                        return;
1600                    }
1601
1602                    if !check_gp_id3(op0, op1, op2, 63) {
1603                        self.last_error = Some(AsmError::InvalidOperand);
1604                        return;
1605                    }
1606
1607                    opcode.reset(op_data.register_op);
1608                    opcode.add_imm(x, 31);
1609                    opcode.add_reg(op2.id(), 16);
1610                    opcode.add_reg(op1.id(), 5);
1611                    opcode.add_reg(op0.id(), 0);
1612                    return self.buffer.write_u32(opcode.get());
1613                }
1614
1615                if isign4 == enc_ops!(Reg, Reg, Imm) && op_data.immediate_op != 0 {
1616                    if !check_signature!(op0, op1) {
1617                        self.last_error = Some(AsmError::InvalidInstruction);
1618                        return;
1619                    }
1620
1621                    if !check_gp_id2(op0, op1, 63) {
1622                        self.last_error = Some(AsmError::InvalidOperand);
1623                        return;
1624                    }
1625
1626                    let imm_r = op2.as_::<Imm>().value_as::<u64>();
1627                    let op_size = if x != 0 { 64 } else { 32 };
1628
1629                    if imm_r >= op_size as u64 {
1630                        self.last_error = Some(AsmError::InvalidOperand);
1631                        return;
1632                    }
1633
1634                    opcode.reset(op_data.immediate_op);
1635                    opcode.add_imm(x, 31);
1636                    opcode.add_imm(x, 22);
1637                    opcode.add_reg(op1.id(), 5);
1638                    opcode.add_reg(op0.id(), 0);
1639
1640                    if opcode.get() & (1 << 10) != 0 {
1641                        opcode.add_imm(x, 15);
1642                        opcode.add_imm(imm_r as u32, 16);
1643                        return self.buffer.write_u32(opcode.get());
1644                    }
1645
1646                    if op_data.ror == 0 {
1647                        let ubfm_imm_r = (0u32).wrapping_sub(imm_r as u32) & (op_size as u32 - 1);
1648                        let ubfm_imm_s = op_size as u32 - 1 - imm_r as u32;
1649
1650                        opcode.add_imm(ubfm_imm_r, 16);
1651                        opcode.add_imm(ubfm_imm_s, 10);
1652                        return self.buffer.write_u32(opcode.get());
1653                    } else {
1654                        opcode.add_imm(imm_r as u32, 10);
1655                        opcode.add_reg(op1.id(), 16);
1656                        return self.buffer.write_u32(opcode.get());
1657                    }
1658                }
1659            }
1660
1661            Encoding::BaseCCmp => {
1662                let op_data = &BASE_C_CMP[encoding_index];
1663
1664                if isign4 == enc_ops!(Reg, Reg, Imm, Imm) || isign4 == enc_ops!(Reg, Imm, Imm, Imm)
1665                {
1666                    let mut x = 0;
1667                    if !check_gp_typex(op0, 3, &mut x) {
1668                        self.last_error = Some(AsmError::InvalidOperand);
1669                        return;
1670                    }
1671
1672                    if !check_gp_id(op0, 31) {
1673                        self.last_error = Some(AsmError::InvalidOperand);
1674                        return;
1675                    }
1676
1677                    let nzcv = op2.as_::<Imm>().value_as::<u64>();
1678                    let cond = op3.as_::<Imm>().value_as::<u64>();
1679
1680                    if (nzcv | cond) > 0xF {
1681                        self.last_error = Some(AsmError::TooLarge);
1682                        return;
1683                    }
1684
1685                    opcode.reset(op_data.opcode);
1686                    opcode.add_imm(x, 31);
1687                    opcode.add_imm(cond_code_to_opcode_field(cond as u32), 12);
1688                    opcode.add_imm(nzcv as u32, 0);
1689
1690                    if isign4 == enc_ops!(Reg, Reg, Imm, Imm) {
1691                        if !check_signature!(op0, op1) {
1692                            self.last_error = Some(AsmError::InvalidInstruction);
1693                            return;
1694                        }
1695
1696                        if !check_gp_id(op1, 31) {
1697                            self.last_error = Some(AsmError::InvalidOperand);
1698                            return;
1699                        }
1700
1701                        opcode.add_reg(op1.id(), 16);
1702                        opcode.add_reg(op0.id(), 5);
1703                        return self.buffer.write_u32(opcode.get());
1704                    } else {
1705                        let imm5 = op1.as_::<Imm>().value_as::<u64>();
1706                        if imm5 > 0x1F {
1707                            self.last_error = Some(AsmError::TooLarge);
1708                            return;
1709                        }
1710
1711                        opcode.add_imm(1, 11);
1712                        opcode.add_imm(imm5 as u32, 16);
1713                        opcode.add_reg(op0.id(), 5);
1714                        return self.buffer.write_u32(opcode.get());
1715                    }
1716                }
1717            }
1718
1719            Encoding::BaseCInc => {
1720                let op_data = &BASE_C_INC[encoding_index];
1721
1722                if isign4 == enc_ops!(Reg, Reg, Imm) {
1723                    let mut x = 0;
1724                    if !check_gp_typex2(op0, op1, 3, &mut x) {
1725                        self.last_error = Some(AsmError::InvalidInstruction);
1726                        return;
1727                    }
1728
1729                    if !check_gp_id2(op0, op1, 31) {
1730                        self.last_error = Some(AsmError::InvalidOperand);
1731                        return;
1732                    }
1733
1734                    let cond = op2.as_::<Imm>().value_as::<u64>();
1735                    if cond.wrapping_sub(2) > 0xE {
1736                        self.last_error = Some(AsmError::TooLarge);
1737                        return;
1738                    }
1739
1740                    opcode.reset(op_data.opcode);
1741                    opcode.add_imm(x, 31);
1742                    opcode.add_reg(op1.id(), 16);
1743                    opcode.add_imm(cond_code_to_opcode_field((cond as u32) ^ 1), 12);
1744                    opcode.add_reg(op1.id(), 5);
1745                    opcode.add_reg(op0.id(), 0);
1746                    return self.buffer.write_u32(opcode.get());
1747                }
1748            }
1749
1750            Encoding::BaseCSel => {
1751                let op_data = &BASE_C_SEL[encoding_index];
1752
1753                if isign4 == enc_ops!(Reg, Reg, Reg, Imm) {
1754                    let mut x = 0;
1755                    if !check_gp_typex(op0, 3, &mut x) {
1756                        self.last_error = Some(AsmError::InvalidInstruction);
1757                        return;
1758                    }
1759
1760                    if !check_signature!(op0, op1, op2) {
1761                        self.last_error = Some(AsmError::InvalidInstruction);
1762                        return;
1763                    }
1764
1765                    if !check_gp_id3(op0, op1, op2, 31) {
1766                        self.last_error = Some(AsmError::InvalidOperand);
1767                        return;
1768                    }
1769
1770                    let cond = op3.as_::<Imm>().value_as::<u64>();
1771                    if cond > 0xF {
1772                        self.last_error = Some(AsmError::TooLarge);
1773                        return;
1774                    }
1775
1776                    opcode.reset(op_data.opcode);
1777                    opcode.add_imm(x, 31);
1778                    opcode.add_reg(op2.id(), 16);
1779                    opcode.add_imm(cond_code_to_opcode_field(cond as u32), 12);
1780                    opcode.add_reg(op1.id(), 5);
1781                    opcode.add_reg(op0.id(), 0);
1782                    return self.buffer.write_u32(opcode.get());
1783                }
1784            }
1785
1786            Encoding::BaseCSet => {
1787                let op_data = &BASE_C_SET[encoding_index];
1788
1789                if isign4 == enc_ops!(Reg, Imm) {
1790                    let mut x = 0;
1791                    if !check_gp_typex(op0, 3, &mut x) {
1792                        self.last_error = Some(AsmError::InvalidInstruction);
1793                        return;
1794                    }
1795
1796                    if !check_gp_id(op0, 31) {
1797                        self.last_error = Some(AsmError::InvalidOperand);
1798                        return;
1799                    }
1800
1801                    let cond = op1.as_::<Imm>().value_as::<u64>();
1802                    if cond.wrapping_sub(2) >= 0xE {
1803                        self.last_error = Some(AsmError::TooLarge);
1804                        return;
1805                    }
1806
1807                    opcode.reset(op_data.opcode);
1808                    opcode.add_imm(x, 31);
1809                    opcode.add_imm(cond_code_to_opcode_field((cond as u32) ^ 1), 12);
1810                    opcode.add_reg(op0.id(), 0);
1811                    return self.buffer.write_u32(opcode.get());
1812                }
1813            }
1814
1815            Encoding::BaseMinMax => {
1816                let op_data = &BASE_MIN_MAX[encoding_index];
1817
1818                if isign4 == enc_ops!(Reg, Reg, Reg) {
1819                    let mut x = 0;
1820                    if !check_gp_typex(op0, 3, &mut x) {
1821                        self.last_error = Some(AsmError::InvalidInstruction);
1822                        return;
1823                    }
1824
1825                    if !check_signature!(op0, op1, op2) {
1826                        self.last_error = Some(AsmError::InvalidInstruction);
1827                        return;
1828                    }
1829
1830                    opcode.reset(op_data.register_op);
1831                    opcode.add_imm(x, 31);
1832                    opcode.add_reg(op2.id(), 16);
1833                    opcode.add_reg(op1.id(), 5);
1834                    opcode.add_reg(op0.id(), 0);
1835                    return self.buffer.write_u32(opcode.get());
1836                }
1837
1838                if isign4 == enc_ops!(Reg, Reg, Imm) {
1839                    let mut x = 0;
1840                    if !check_gp_typex(op0, 3, &mut x) {
1841                        self.last_error = Some(AsmError::InvalidInstruction);
1842                        return;
1843                    }
1844
1845                    if !check_signature!(op0, op1) {
1846                        self.last_error = Some(AsmError::InvalidInstruction);
1847                        return;
1848                    }
1849
1850                    let imm = op2.as_::<Imm>().value_as::<u64>();
1851
1852                    if (op_data.immediate_op & (1u32 << 18)) != 0 {
1853                        if imm > 0xFF {
1854                            self.last_error = Some(AsmError::TooLarge);
1855                            return;
1856                        }
1857                    } else {
1858                        if (imm as i64) < -128 || (imm as i64) > 127 {
1859                            self.last_error = Some(AsmError::TooLarge);
1860                            return;
1861                        }
1862                    }
1863
1864                    opcode.reset(op_data.immediate_op);
1865                    opcode.add_imm(x, 31);
1866                    opcode.add_imm((imm & 0xFF) as u32, 10);
1867                    opcode.add_reg(op1.id(), 5);
1868                    opcode.add_reg(op0.id(), 0);
1869                    return self.buffer.write_u32(opcode.get());
1870                }
1871            }
1872
1873            Encoding::BaseAtDcIcTlbi => {
1874                let op_data = &BASE_AT_DC_IC_TLBI[encoding_index];
1875
1876                if isign4 == enc_ops!(Imm) || isign4 == enc_ops!(Imm, Reg) {
1877                    if op_data.mandatory_reg != 0 && isign4 != enc_ops!(Imm, Reg) {
1878                        self.last_error = Some(AsmError::InvalidInstruction);
1879                        return;
1880                    }
1881
1882                    if op0.as_::<Imm>().value_as::<u64>() > 0x7FFF {
1883                        self.last_error = Some(AsmError::TooLarge);
1884                        return;
1885                    }
1886
1887                    let imm = op0.as_::<Imm>().value_as::<u32>();
1888                    if (imm & op_data.imm_verify_mask) != op_data.imm_verify_data {
1889                        self.last_error = Some(AsmError::InvalidOperand);
1890                        return;
1891                    }
1892
1893                    let mut rt = 31;
1894                    if op1.is_reg() {
1895                        if !op1.as_::<Reg>().is_gp64() {
1896                            self.last_error = Some(AsmError::InvalidInstruction);
1897                            return;
1898                        }
1899
1900                        if !check_gp_id(op1, 63) {
1901                            self.last_error = Some(AsmError::InvalidOperand);
1902                            return;
1903                        }
1904
1905                        rt = op1.id() & 31;
1906                    }
1907
1908                    opcode.reset(0b11010101000010000000000000000000);
1909                    opcode.add_imm(imm, 5);
1910                    opcode.add_reg(rt, 0);
1911                    return self.buffer.write_u32(opcode.get());
1912                }
1913            }
1914
1915            Encoding::BaseMrs => {
1916                if isign4 == enc_ops!(Reg, Imm) {
1917                    if !op0.as_::<Reg>().is_gp64() {
1918                        self.last_error = Some(AsmError::InvalidInstruction);
1919                        return;
1920                    }
1921
1922                    if !check_gp_id(op0, 63) {
1923                        self.last_error = Some(AsmError::InvalidOperand);
1924                        return;
1925                    }
1926
1927                    if op1.as_::<Imm>().value_as::<u64>() > 0xFFFF {
1928                        self.last_error = Some(AsmError::TooLarge);
1929                        return;
1930                    }
1931
1932                    let imm = op1.as_::<Imm>().value_as::<u32>();
1933                    if (imm & (1 << 15)) == 0 {
1934                        self.last_error = Some(AsmError::InvalidOperand);
1935                        return;
1936                    }
1937
1938                    opcode.reset(0b11010101001100000000000000000000);
1939                    opcode.add_imm(imm, 5);
1940                    opcode.add_reg(op0.id(), 0);
1941                    return self.buffer.write_u32(opcode.get());
1942                }
1943            }
1944
1945            Encoding::BaseMsr => {
1946                if isign4 == enc_ops!(Imm, Reg) {
1947                    if !op1.as_::<Reg>().is_gp64() {
1948                        self.last_error = Some(AsmError::InvalidInstruction);
1949                        return;
1950                    }
1951
1952                    if op0.as_::<Imm>().value_as::<u64>() > 0xFFFF {
1953                        self.last_error = Some(AsmError::TooLarge);
1954                        return;
1955                    }
1956
1957                    let imm = op0.as_::<Imm>().value_as::<u32>();
1958                    if (imm & (1 << 15)) == 0 {
1959                        self.last_error = Some(AsmError::InvalidOperand);
1960                        return;
1961                    }
1962
1963                    if !check_gp_id(op1, 63) {
1964                        self.last_error = Some(AsmError::InvalidOperand);
1965                        return;
1966                    }
1967
1968                    opcode.reset(0b11010101000100000000000000000000);
1969                    opcode.add_imm(imm, 5);
1970                    opcode.add_reg(op1.id(), 0);
1971                    return self.buffer.write_u32(opcode.get());
1972                }
1973
1974                if isign4 == enc_ops!(Imm, Imm) {
1975                    if op0.as_::<Imm>().value_as::<u64>() > 0x1F {
1976                        self.last_error = Some(AsmError::TooLarge);
1977                        return;
1978                    }
1979
1980                    if op1.as_::<Imm>().value_as::<u64>() > 0xF {
1981                        self.last_error = Some(AsmError::TooLarge);
1982                        return;
1983                    }
1984
1985                    let op = op0.as_::<Imm>().value_as::<u32>();
1986                    let crm = op1.as_::<Imm>().value_as::<u32>();
1987
1988                    let op1_val = op >> 3;
1989                    let op2_val = op & 0x7;
1990
1991                    opcode.reset(0b11010101000000000100000000011111);
1992                    opcode.add_imm(op1_val, 16);
1993                    opcode.add_imm(crm, 8);
1994                    opcode.add_imm(op2_val, 5);
1995                    return self.buffer.write_u32(opcode.get());
1996                }
1997            }
1998
1999            Encoding::BaseSys => {
2000                if isign4 == enc_ops!(Imm, Imm, Imm, Imm) {
2001                    if op0.as_::<Imm>().value_as::<u64>() > 0x7
2002                        || op1.as_::<Imm>().value_as::<u64>() > 0xF
2003                        || op2.as_::<Imm>().value_as::<u64>() > 0xF
2004                        || op3.as_::<Imm>().value_as::<u64>() > 0x7
2005                    {
2006                        self.last_error = Some(AsmError::TooLarge);
2007                        return;
2008                    }
2009
2010                    let op1_val = op0.as_::<Imm>().value_as::<u32>();
2011                    let crn = op1.as_::<Imm>().value_as::<u32>();
2012                    let crm = op2.as_::<Imm>().value_as::<u32>();
2013                    let op2_val = op3.as_::<Imm>().value_as::<u32>();
2014                    let mut rt = 31;
2015
2016                    let op4 = *ops.get(4).unwrap_or(&NOREG);
2017                    if op4.is_reg() {
2018                        if !op4.as_::<Reg>().is_gp64() {
2019                            self.last_error = Some(AsmError::InvalidInstruction);
2020                            return;
2021                        }
2022
2023                        if !check_gp_id(op4, 63) {
2024                            self.last_error = Some(AsmError::InvalidOperand);
2025                            return;
2026                        }
2027
2028                        rt = op4.id() & 31;
2029                    } else if !op4.is_none() {
2030                        self.last_error = Some(AsmError::InvalidInstruction);
2031                        return;
2032                    }
2033
2034                    opcode.reset(0b11010101000010000000000000000000);
2035                    opcode.add_imm(op1_val, 16);
2036                    opcode.add_imm(crn, 12);
2037                    opcode.add_imm(crm, 8);
2038                    opcode.add_imm(op2_val, 5);
2039                    opcode.add_reg(rt, 0);
2040                    return self.buffer.write_u32(opcode.get());
2041                }
2042            }
2043
2044            Encoding::BaseBranchReg => {
2045                let op_data = &BASE_BRANCH_REG[encoding_index];
2046                if isign4 == enc_ops!(Reg) {
2047                    if !op0.as_::<Reg>().is_gp64() {
2048                        self.last_error = Some(AsmError::InvalidInstruction);
2049                        return;
2050                    }
2051
2052                    if !check_gp_id(op0, 63) {
2053                        self.last_error = Some(AsmError::InvalidOperand);
2054                        return;
2055                    }
2056
2057                    opcode.reset(op_data.opcode);
2058                    opcode.add_reg(op0.id(), 5);
2059                    return self.buffer.write_u32(opcode.get());
2060                }
2061            }
2062
2063            Encoding::BaseBranchRel => {
2064                let op_data = &BASE_BRANCH_REL[encoding_index];
2065                if isign4 == enc_ops!(Label) || isign4 == enc_ops!(Imm) {
2066                    opcode.reset(op_data.opcode);
2067                    rm_rel = op0;
2068
2069                    if inst_cc as u32 != 0 || (opcode.0 & (1 << 30)) != 0 {
2070                        if opcode.has_x() {
2071                            self.last_error = Some(AsmError::InvalidInstruction);
2072                            return;
2073                        }
2074
2075                        opcode.0 |= 1 << 30;
2076                        opcode.add_imm(cond_code_to_opcode_field(inst_cc as u32), 0);
2077                        offset_format.reset_to_imm_type(OffsetType::SignedOffset, 4, 5, 19, 2);
2078                        rm_rel = op0;
2079                        emit_rel!();
2080                    }
2081
2082                    offset_format.reset_to_imm_type(OffsetType::SignedOffset, 4, 0, 26, 2);
2083                    rm_rel = op0;
2084                    emit_rel!();
2085                }
2086            }
2087
2088            Encoding::BaseBranchCmp => {
2089                let op_data = &BASE_BRANCH_CMP[encoding_index];
2090                if isign4 == enc_ops!(Reg, Label) || isign4 == enc_ops!(Reg, Imm) {
2091                    let mut x = 0;
2092                    if !check_gp_typex(op0, 3, &mut x) {
2093                        self.last_error = Some(AsmError::InvalidInstruction);
2094                        return;
2095                    }
2096
2097                    if !check_gp_id(op0, 31) {
2098                        self.last_error = Some(AsmError::InvalidOperand);
2099                        return;
2100                    }
2101
2102                    opcode.reset(op_data.opcode);
2103                    opcode.add_imm(x, 31);
2104                    opcode.add_reg(op0.id(), 0);
2105                    offset_format.reset_to_imm_type(OffsetType::SignedOffset, 4, 5, 19, 2);
2106
2107                    rm_rel = op1;
2108                    emit_rel!();
2109                }
2110            }
2111
2112            Encoding::BaseBranchTst => {
2113                let op_data = &BASE_BRANCH_TST[encoding_index];
2114                if isign4 == enc_ops!(Reg, Imm, Label) || isign4 == enc_ops!(Reg, Imm, Imm) {
2115                    let mut x = 0;
2116                    if !check_gp_typex(op0, 3, &mut x) {
2117                        self.last_error = Some(AsmError::InvalidInstruction);
2118                        return;
2119                    }
2120
2121                    if !check_gp_id(op0, 31) {
2122                        self.last_error = Some(AsmError::InvalidOperand);
2123                        return;
2124                    }
2125
2126                    let mut imm = op1.as_::<Imm>().value_as::<u64>();
2127
2128                    opcode.reset(op_data.opcode);
2129                    if imm >= 32 {
2130                        if x == 0 {
2131                            self.last_error = Some(AsmError::InvalidOperand);
2132                            return;
2133                        }
2134                        opcode.add_imm(x, 31);
2135                        imm &= 0x1F;
2136                    }
2137
2138                    opcode.add_reg(op0.id(), 0);
2139                    opcode.add_imm(imm as u32, 19);
2140                    offset_format.reset_to_imm_type(OffsetType::SignedOffset, 4, 5, 14, 2);
2141
2142                    rm_rel = op2;
2143                    emit_rel!();
2144                }
2145            }
2146
2147            Encoding::BasePrfm => {
2148                let op_data = &BASE_PRFM[encoding_index];
2149                if isign4 == enc_ops!(Imm, Mem) {
2150                    let m = op1.as_::<Mem>();
2151                    rm_rel = op1;
2152
2153                    let imm_shift = 3u32;
2154
2155                    if op0.as_::<Imm>().value_as::<u64>() > 0x1F {
2156                        self.last_error = Some(AsmError::TooLarge);
2157                        return;
2158                    }
2159
2160                    let offset = m.offset();
2161                    let prfop = op0.as_::<Imm>().value_as::<u32>();
2162
2163                    if m.has_base_reg() {
2164                        if m.has_index() {
2165                            let opt = SHIFT_OP_TO_LD_ST_OP_MAP[m.shift_op() as usize];
2166                            if opt == 0xFF {
2167                                self.last_error = Some(AsmError::InvalidOperand);
2168                                return;
2169                            }
2170
2171                            let shift = m.shift();
2172                            let s = if shift != 0 { 1 } else { 0 };
2173
2174                            if s != 0 && shift != imm_shift {
2175                                self.last_error = Some(AsmError::InvalidOperand);
2176                                return;
2177                            }
2178
2179                            opcode.reset((op_data.register_op as u32) << 21);
2180                            opcode.add_imm(opt as u32, 13);
2181                            opcode.add_imm(s, 12);
2182                            opcode.0 |= 1 << 11;
2183                            opcode.add_imm(prfop, 0);
2184                            opcode.add_reg(m.base_id(), 5);
2185                            opcode.add_reg(m.index_id(), 16);
2186                            return self.buffer.write_u32(opcode.get());
2187                        }
2188
2189                        let offset32 = offset as i32;
2190                        let imm12 = (offset32 as u32) >> imm_shift;
2191
2192                        if imm12 < (1 << 12) && ((imm12 << imm_shift) as i32) == offset32 {
2193                            opcode.reset((op_data.s_offset_op as u32) << 22);
2194                            opcode.add_imm(imm12, 10);
2195                            opcode.add_imm(prfop, 0);
2196                            opcode.add_reg(m.base_id(), 5);
2197                            return self.buffer.write_u32(opcode.get());
2198                        }
2199
2200                        if offset32 >= -256 && offset32 < 256 {
2201                            opcode.reset((op_data.u_offset_op as u32) << 21);
2202                            opcode.add_imm((offset32 as u32) & 0x1FF, 12);
2203                            opcode.add_imm(prfop, 0);
2204                            opcode.add_reg(m.base_id(), 5);
2205                            return self.buffer.write_u32(opcode.get());
2206                        }
2207
2208                        self.last_error = Some(AsmError::InvalidOperand);
2209                        return;
2210                    } else {
2211                        opcode.reset((op_data.literal_op as u32) << 24);
2212                        opcode.add_imm(prfop, 0);
2213                        offset_format.reset_to_imm_type(OffsetType::SignedOffset, 4, 5, 19, 2);
2214                        rm_rel = op1;
2215                        emit_rel!();
2216                    }
2217                }
2218            }
2219
2220            Encoding::BaseLdSt => {
2221                let op_data = &BASE_LD_ST[encoding_index];
2222                if isign4 == enc_ops!(Reg, Mem) {
2223                    let m = op1.as_::<Mem>();
2224                    rm_rel = op1;
2225
2226                    let mut x = 0;
2227                    if !check_gp_typex(op0, op_data.reg_type, &mut x) {
2228                        self.last_error = Some(AsmError::InvalidOperand);
2229                        return;
2230                    }
2231
2232                    if !check_gp_id(op0, 31) {
2233                        self.last_error = Some(AsmError::InvalidOperand);
2234                        return;
2235                    }
2236
2237                    let x_shift_mask = if op_data.u_offset_shift == 2 { 1 } else { 0 };
2238                    let imm_shift = (op_data.u_offset_shift as u32) + (x & x_shift_mask);
2239
2240                    let offset = m.offset();
2241
2242                    if m.has_base_reg() {
2243                        if m.has_index() {
2244                            let opt = SHIFT_OP_TO_LD_ST_OP_MAP[m.shift_op() as usize];
2245                            if opt == 0xFF {
2246                                self.last_error = Some(AsmError::InvalidOperand);
2247                                return;
2248                            }
2249
2250                            let shift = m.shift();
2251                            let s = if shift != 0 { 1 } else { 0 };
2252
2253                            if s != 0 && shift != imm_shift {
2254                                self.last_error = Some(AsmError::InvalidOperand);
2255                                return;
2256                            }
2257
2258                            opcode.reset((op_data.register_op as u32) << 21);
2259                            opcode.xor_imm(x, op_data.x_offset as u32);
2260                            opcode.add_imm(opt as u32, 13);
2261                            opcode.add_imm(s, 12);
2262                            opcode.0 |= 1 << 11;
2263                            opcode.add_reg(op0.id(), 0);
2264                            opcode.add_reg(m.base_id(), 5);
2265                            opcode.add_reg(m.index_id(), 16);
2266                            return self.buffer.write_u32(opcode.get());
2267                        }
2268
2269                        let offset32 = offset as i32;
2270                        let imm12 = (offset32 as u32) >> imm_shift;
2271
2272                        if imm12 < (1 << 12) && ((imm12 << imm_shift) as i32) == offset32 {
2273                            opcode.reset((op_data.u_offset_op as u32) << 22);
2274                            opcode.xor_imm(x, op_data.x_offset as u32);
2275                            opcode.add_imm(imm12, 10);
2276                            opcode.add_reg(op0.id(), 0);
2277                            opcode.add_reg(m.base_id(), 5);
2278                            return self.buffer.write_u32(opcode.get());
2279                        }
2280
2281                        if offset32 >= -256 && offset32 < 256 {
2282                            opcode.reset((op_data.u_offset_op as u32) << 22);
2283                            opcode.xor_imm(x, op_data.x_offset as u32);
2284                            opcode.add_imm((offset32 as u32) & 0x1FF, 12);
2285                            opcode.add_reg(op0.id(), 0);
2286                            opcode.add_reg(m.base_id(), 5);
2287                            return self.buffer.write_u32(opcode.get());
2288                        }
2289
2290                        self.last_error = Some(AsmError::InvalidOperand);
2291                        return;
2292                    } else {
2293                        if op_data.literal_op == 0 {
2294                            self.last_error = Some(AsmError::InvalidInstruction);
2295                            return;
2296                        }
2297
2298                        opcode.reset((op_data.literal_op as u32) << 24);
2299                        opcode.xor_imm(x, op_data.x_offset);
2300                        opcode.add_reg(op0.id(), 0);
2301                        offset_format.reset_to_imm_type(OffsetType::Ldr, 4, 5, 19, 2);
2302                        emit_rel!();
2303                    }
2304                }
2305            }
2306
2307            Encoding::BaseLdpStp => {
2308                let op_data = &BASE_LDP_STP[encoding_index];
2309                if isign4 == enc_ops!(Reg, Reg, Mem) {
2310                    let m = op2.as_::<Mem>();
2311                    rm_rel = op2;
2312
2313                    let mut x = 0;
2314                    if !check_gp_typex2(op0, op1, op_data.reg_type, &mut x) {
2315                        self.last_error = Some(AsmError::InvalidOperand);
2316                        return;
2317                    }
2318
2319                    if !check_gp_id2(op0, op1, 31) {
2320                        self.last_error = Some(AsmError::InvalidOperand);
2321                        return;
2322                    }
2323
2324                    let offset_shift = op_data.offset_shift as u32 + x;
2325                    let offset32 = (m.offset_lo32() as i32) >> offset_shift;
2326
2327                    if (offset32 as u32) << offset_shift != m.offset_lo32() as u32 {
2328                        self.last_error = Some(AsmError::InvalidOperand);
2329                        return;
2330                    }
2331
2332                    const I7_MAX: i32 = (1 << 6) - 1;
2333                    const I7_MIN: i32 = -(1 << 6);
2334
2335                    if offset32 < I7_MIN || offset32 > I7_MAX {
2336                        self.last_error = Some(AsmError::InvalidOperand);
2337                        return;
2338                    }
2339
2340                    if m.is_pre_or_post() && offset32 != 0 {
2341                        if op_data.pre_post_op == 0 {
2342                            self.last_error = Some(AsmError::InvalidInstruction);
2343                            return;
2344                        }
2345
2346                        opcode.reset((op_data.pre_post_op as u32) << 22);
2347                        opcode.add_imm(m.is_pre_index() as u32, 24);
2348                    } else {
2349                        opcode.reset((op_data.offset_op as u32) << 22);
2350                    }
2351                    opcode.add_imm(x, op_data.x_offset as u32);
2352                    opcode.add_imm((offset32 as u32) & 0x7F, 15);
2353                    opcode.add_reg(op1.id(), 10);
2354                    opcode.add_reg(op0.id(), 0);
2355                    opcode.add_reg(m.base_id(), 5);
2356                    return self.buffer.write_u32(opcode.get());
2357                }
2358            }
2359
2360            Encoding::BaseStx => {
2361                let op_data = &BASE_STX[encoding_index];
2362                if isign4 == enc_ops!(Reg, Reg, Mem) {
2363                    let m = op2.as_::<Mem>();
2364                    let mut x = 0;
2365                    if !op0.as_::<Reg>().is_gp32() || !check_gp_typex(op1, op_data.reg_type, &mut x)
2366                    {
2367                        self.last_error = Some(AsmError::InvalidOperand);
2368                        return;
2369                    }
2370                    if !check_gp_id2(op0, op1, 31) {
2371                        self.last_error = Some(AsmError::InvalidOperand);
2372                        return;
2373                    }
2374                    opcode.reset(op_data.opcode());
2375                    opcode.add_imm(x, op_data.x_offset as _);
2376                    opcode.add_reg(op0.id(), 16);
2377                    opcode.add_reg(op1.id(), 0);
2378                    rm_rel = op2;
2379                    opcode.add_reg(m.base_id(), 5);
2380                    return self.buffer.write_u32(opcode.get());
2381                }
2382            }
2383
2384            Encoding::BaseLdxp => {
2385                let op_data = &BASE_LDXP[encoding_index];
2386                if isign4 == enc_ops!(Reg, Reg, Mem) {
2387                    let m = op2.as_::<Mem>();
2388                    let mut x = 0;
2389                    if !check_gp_typex(op0, op_data.reg_type, &mut x) || !check_signature!(op0, op1)
2390                    {
2391                        self.last_error = Some(AsmError::InvalidOperand);
2392                        return;
2393                    }
2394                    if !check_gp_id2(op0, op1, 31) {
2395                        self.last_error = Some(AsmError::InvalidOperand);
2396                        return;
2397                    }
2398                    opcode.reset(op_data.opcode());
2399                    opcode.add_imm(x, op_data.x_offset as _);
2400                    opcode.add_reg(op1.id(), 10);
2401                    opcode.add_reg(op0.id(), 0);
2402                    rm_rel = op2;
2403                    opcode.add_reg(m.base_id(), 5);
2404                    return self.buffer.write_u32(opcode.get());
2405                }
2406            }
2407
2408            Encoding::BaseStxp => {
2409                let op_data = &BASE_STXP[encoding_index];
2410                if isign4 == enc_ops!(Reg, Reg, Reg, Mem) {
2411                    let m = op3.as_::<Mem>();
2412                    let mut x = 0;
2413                    if !op0.as_::<Reg>().is_gp32()
2414                        || !check_gp_typex(op1, op_data.reg_type, &mut x)
2415                        || !check_signature!(op1, op2)
2416                    {
2417                        self.last_error = Some(AsmError::InvalidOperand);
2418                        return;
2419                    }
2420                    if !check_gp_id3(op0, op1, op2, 31) {
2421                        self.last_error = Some(AsmError::InvalidOperand);
2422                        return;
2423                    }
2424                    opcode.reset(op_data.opcode());
2425                    opcode.add_imm(x, op_data.x_offset as _);
2426                    opcode.add_reg(op0.id(), 16);
2427                    opcode.add_reg(op2.id(), 10);
2428                    opcode.add_reg(op1.id(), 0);
2429                    rm_rel = op3;
2430                    opcode.add_reg(m.base_id(), 5);
2431                    return self.buffer.write_u32(opcode.get());
2432                }
2433            }
2434
2435            Encoding::BaseRMNoImm => {
2436                let op_data = &BASE_RM_NO_IMM[encoding_index];
2437                if isign4 == enc_ops!(Reg, Mem) {
2438                    let m = op1.as_::<Mem>();
2439                    let mut x = 0;
2440                    if !check_gp_typex(op0, op_data.reg_type, &mut x) {
2441                        self.last_error = Some(AsmError::InvalidOperand);
2442                        return;
2443                    }
2444                    if !check_gp_id(op0, op_data.reg_hi_id) {
2445                        self.last_error = Some(AsmError::InvalidOperand);
2446                        return;
2447                    }
2448                    opcode.reset(op_data.opcode());
2449                    opcode.add_imm(x, op_data.x_offset as _);
2450                    opcode.add_reg(op0.id(), 0);
2451                    rm_rel = op1;
2452
2453                    emit_mem_base_no_imm_rn5!();
2454                }
2455            }
2456
2457            Encoding::BaseRMSImm9 => {
2458                let op_data = &BASE_RM_SIMM9[encoding_index];
2459                if isign4 == enc_ops!(Reg, Mem) {
2460                    let m = op1.as_::<Mem>();
2461                    let mut x = 0;
2462                    if !check_gp_typex(op0, op_data.reg_type, &mut x) {
2463                        self.last_error = Some(AsmError::InvalidOperand);
2464                        return;
2465                    }
2466                    if !check_gp_id(op0, op_data.reg_hi_id) {
2467                        self.last_error = Some(AsmError::InvalidOperand);
2468                        return;
2469                    }
2470                    if m.has_base_reg() && !m.has_index() {
2471                        let offset32 = m.offset() as i32 >> op_data.imm_shift;
2472                        if (offset32 << op_data.imm_shift) != m.offset() as i32 {
2473                            self.last_error = Some(AsmError::InvalidOperand);
2474                            return;
2475                        }
2476                        if offset32 < -256 || offset32 > 255 {
2477                            self.last_error = Some(AsmError::InvalidOperand);
2478                            return;
2479                        }
2480                        if m.is_fixed_offset() {
2481                            opcode.reset(op_data.offset_op());
2482                        } else {
2483                            opcode.reset(op_data.pre_post_op());
2484                            opcode.xor_imm(m.is_pre_index() as u32, 11);
2485                        }
2486                        opcode.xor_imm(x, op_data.x_offset as u32);
2487                        opcode.add_imm((offset32 as u32) & 0x1FF, 12);
2488                        opcode.add_reg(op0.id(), 0);
2489                        opcode.add_reg(m.base_id(), 5);
2490                        return self.buffer.write_u32(opcode.get());
2491                    }
2492                    self.last_error = Some(AsmError::InvalidOperand);
2493                    return;
2494                }
2495            }
2496
2497            Encoding::BaseRMSImm10 => {
2498                let op_data = &BASE_RM_SIMM10[encoding_index];
2499                if isign4 == enc_ops!(Reg, Mem) {
2500                    let m = op1.as_::<Mem>();
2501                    let mut x = 0;
2502                    if !check_gp_typex(op0, op_data.reg_type, &mut x) {
2503                        self.last_error = Some(AsmError::InvalidOperand);
2504                        return;
2505                    }
2506                    if !check_gp_id(op0, op_data.reg_hi_id) {
2507                        self.last_error = Some(AsmError::InvalidOperand);
2508                        return;
2509                    }
2510                    if m.has_base_reg() && !m.has_index() {
2511                        let offset32 = m.offset() as i32 >> op_data.imm_shift;
2512                        if (offset32 << op_data.imm_shift) != m.offset() as i32 {
2513                            self.last_error = Some(AsmError::InvalidOperand);
2514                            return;
2515                        }
2516                        if offset32 < -512 || offset32 > 511 {
2517                            self.last_error = Some(AsmError::InvalidOperand);
2518                            return;
2519                        }
2520                        let offset32 = (offset32 as u32) & 0x3FF;
2521                        opcode.reset(op_data.opcode());
2522                        opcode.xor_imm(m.is_pre_index() as u32, 11);
2523                        opcode.xor_imm(x, op_data.x_offset as u32);
2524                        opcode.add_imm(offset32 >> 9, 22);
2525                        opcode.add_imm(offset32, 12);
2526                        opcode.add_reg(op0.id(), 0);
2527                        opcode.add_reg(m.base_id(), 5);
2528                        return self.buffer.write_u32(opcode.get());
2529                    }
2530                    self.last_error = Some(AsmError::InvalidOperand);
2531                    return;
2532                }
2533            }
2534
2535            Encoding::BaseAtomicOp => {
2536                let op_data = &BASE_ATOMIC_OP[encoding_index];
2537                if isign4 == enc_ops!(Reg, Reg, Mem) {
2538                    let m = op2.as_::<Mem>();
2539                    let mut x = 0;
2540                    if !check_gp_typex(op0, op_data.reg_type, &mut x) || !check_signature!(op0, op1)
2541                    {
2542                        self.last_error = Some(AsmError::InvalidOperand);
2543                        return;
2544                    }
2545                    if !check_gp_id2(op0, op1, 31) {
2546                        self.last_error = Some(AsmError::InvalidOperand);
2547                        return;
2548                    }
2549                    opcode.reset(op_data.opcode());
2550                    opcode.add_imm(x, op_data.x_offset as _);
2551                    opcode.add_reg(op0.id(), 16);
2552                    opcode.add_reg(op1.id(), 0);
2553                    rm_rel = op2;
2554                    opcode.add_reg(m.base_id(), 5);
2555                    return self.buffer.write_u32(opcode.get());
2556                }
2557            }
2558
2559            Encoding::BaseAtomicSt => {
2560                let op_data = &BASE_ATOMIC_ST[encoding_index];
2561                if isign4 == enc_ops!(Reg, Mem) {
2562                    let m = op1.as_::<Mem>();
2563                    let mut x = 0;
2564                    if !check_gp_typex(op0, op_data.reg_type, &mut x) {
2565                        self.last_error = Some(AsmError::InvalidOperand);
2566                        return;
2567                    }
2568                    if !check_gp_id(op0, 31) {
2569                        self.last_error = Some(AsmError::InvalidOperand);
2570                        return;
2571                    }
2572                    opcode.reset(op_data.opcode());
2573                    opcode.add_imm(x, op_data.x_offset as _);
2574                    opcode.add_reg(op0.id(), 16);
2575                    opcode.add_reg(31, 0);
2576                    rm_rel = op1;
2577                    opcode.add_reg(m.base_id(), 5);
2578                    return self.buffer.write_u32(opcode.get());
2579                }
2580            }
2581
2582            Encoding::BaseAtomicCasp => {
2583                let op_data = &BASE_ATOMIC_CASP[encoding_index];
2584                if isign4 == enc_ops!(Reg, Reg, Reg, Reg) {
2585                    let op4 = *ops.get(4).unwrap_or(&NOREG);
2586                    if op4.is_mem() {
2587                        let m = op4.as_::<Mem>();
2588                        let mut x = 0;
2589                        if !check_gp_typex(op0, op_data.reg_type, &mut x) {
2590                            self.last_error = Some(AsmError::InvalidOperand);
2591                            return;
2592                        }
2593                        if !check_signature!(op0, op1, op2, op3) {
2594                            self.last_error = Some(AsmError::InvalidOperand);
2595                            return;
2596                        }
2597                        let id0 = op0.id();
2598                        let id2 = op2.id();
2599                        if (id0 & 1) != 0 || (id2 & 1) != 0 || id0 == id2 || id0 > 30 || id2 > 30 {
2600                            self.last_error = Some(AsmError::InvalidOperand);
2601                            return;
2602                        }
2603                        if (id0 + 1) != op1.id() || (id2 + 1) != op3.id() {
2604                            self.last_error = Some(AsmError::InvalidOperand);
2605                            return;
2606                        }
2607                        opcode.reset(op_data.opcode());
2608                        opcode.add_imm(x, op_data.x_offset as _);
2609                        opcode.add_reg(op0.id(), 16);
2610                        opcode.add_reg(op2.id(), 0);
2611                        rm_rel = &op4;
2612                        emit_mem_base_no_imm_rn5!();
2613                    }
2614                }
2615            }
2616
2617            Encoding::FSimdSV => {
2618                let op_data = &F_SIMD_SV[encoding_index];
2619
2620                if isign4 == enc_ops!(Reg, Reg) {
2621                    let q = op1.as_::<Reg>().typ() as u32;
2622                    let q = if q >= RegType::Vec64 as u32 {
2623                        q - RegType::Vec64 as u32
2624                    } else {
2625                        u32::MAX
2626                    };
2627                    if q > 1 {
2628                        self.last_error = Some(AsmError::InvalidInstruction);
2629                        return;
2630                    }
2631
2632                    if op0.as_::<Vec>().has_element_type() {
2633                        self.last_error = Some(AsmError::InvalidInstruction);
2634                        return;
2635                    }
2636
2637                    let sz = op0.as_::<Reg>().typ() as u32;
2638                    let sz = if sz >= RegType::Vec16 as u32 {
2639                        sz - RegType::Vec16 as u32
2640                    } else {
2641                        u32::MAX
2642                    };
2643                    let element_sz = op1.as_::<Vec>().element_type() as u32;
2644                    let element_sz = if element_sz >= VecElementType::H as u32 {
2645                        element_sz - VecElementType::H as u32
2646                    } else {
2647                        u32::MAX
2648                    };
2649
2650                    if (sz | element_sz) > 1 || sz != element_sz {
2651                        self.last_error = Some(AsmError::InvalidInstruction);
2652                        return;
2653                    }
2654
2655                    if sz != 0 && q == 0 {
2656                        self.last_error = Some(AsmError::InvalidInstruction);
2657                        return;
2658                    }
2659
2660                    opcode.reset((op_data.opcode as u32) << 10);
2661                    if sz == 0 {
2662                        opcode.0 ^= 1 << 29;
2663                    }
2664                    opcode.add_imm(q, 30);
2665                    opcode.add_reg(op0.id(), 0);
2666                    opcode.add_reg(op1.id(), 5);
2667                    return self.buffer.write_u32(opcode.get());
2668                }
2669            }
2670
2671            Encoding::FSimdVV => {
2672                let op_data = &F_SIMD_VV[encoding_index];
2673
2674                if isign4 == enc_ops!(Reg, Reg) {
2675                    if !check_signature!(op0, op1) {
2676                        self.last_error = Some(AsmError::InvalidInstruction);
2677                        return;
2678                    }
2679
2680                    if let Some(fp_opcode) = pick_fp_opcode(
2681                        op0.as_::<Vec>(),
2682                        op_data.scalar_op(),
2683                        op_data.scalar_hf(),
2684                        op_data.vector_op(),
2685                        op_data.vector_hf(),
2686                        &mut 0,
2687                    ) {
2688                        opcode.reset(fp_opcode.0);
2689                        emit_rd0_rn5!();
2690                    }
2691
2692                    self.last_error = Some(AsmError::InvalidInstruction);
2693                    return;
2694                }
2695            }
2696
2697            Encoding::FSimdVVV => {
2698                let op_data = &F_SIMD_VVV[encoding_index];
2699
2700                if isign4 == enc_ops!(Reg, Reg, Reg) {
2701                    if !check_signature!(op0, op1, op2) {
2702                        self.last_error = Some(AsmError::InvalidInstruction);
2703                        return;
2704                    }
2705
2706                    if let Some(fp_opcode) = pick_fp_opcode(
2707                        op0.as_::<Vec>(),
2708                        op_data.scalar_op(),
2709                        op_data.scalar_hf(),
2710                        op_data.vector_op(),
2711                        op_data.vector_hf(),
2712                        &mut 0,
2713                    ) {
2714                        opcode.reset(fp_opcode.0);
2715                        emit_rd0_rn5_rm16!();
2716                    }
2717
2718                    self.last_error = Some(AsmError::InvalidInstruction);
2719                    return;
2720                }
2721            }
2722
2723            Encoding::FSimdVVVe => {
2724                let op_data = &F_SIMD_VVVE[encoding_index];
2725
2726                if isign4 == enc_ops!(Reg, Reg, Reg) {
2727                    if !op2.as_::<Vec>().has_element_index() {
2728                        if !check_signature!(op0, op1, op2) {
2729                            self.last_error = Some(AsmError::InvalidInstruction);
2730                            return;
2731                        }
2732
2733                        if let Some(fp_opcode) = pick_fp_opcode(
2734                            op0.as_::<Vec>(),
2735                            op_data.scalar_op(),
2736                            op_data.scalar_hf(),
2737                            op_data.vector_op(),
2738                            op_data.vector_hf(),
2739                            &mut 0,
2740                        ) {
2741                            opcode.reset(fp_opcode.0);
2742
2743                            emit_rd0_rn5_rm16!();
2744                        }
2745
2746                        self.last_error = Some(AsmError::InvalidInstruction);
2747                        return;
2748                    } else {
2749                        if !check_signature!(op0, op1) {
2750                            self.last_error = Some(AsmError::InvalidInstruction);
2751                            return;
2752                        }
2753
2754                        let q = op1.as_::<Reg>().is_vec128() as u32;
2755                        let mut sz = 0;
2756                        if let Some((fp_opcode)) = pick_fp_opcode(
2757                            op0.as_::<Vec>(),
2758                            op_data.element_scalar_op(),
2759                            5,
2760                            op_data.element_vector_op(),
2761                            5,
2762                            &mut sz,
2763                        ) {
2764                            if sz == 0 && op2.as_::<Reg>().id() > 15 {
2765                                self.last_error = Some(AsmError::InvalidOperand);
2766                                return;
2767                            }
2768
2769                            let element_index = op2.as_::<Vec>().element_index();
2770                            if element_index > (7u32 >> sz) {
2771                                self.last_error = Some(AsmError::InvalidOperand);
2772                                return;
2773                            }
2774
2775                            let hlm = element_index << sz;
2776                            opcode.reset(fp_opcode.0);
2777                            opcode.add_imm(q, 30);
2778                            opcode.add_imm(hlm & 3, 20);
2779                            opcode.add_imm(hlm >> 2, 11);
2780                            emit_rd0_rn5_rm16!();
2781                        }
2782
2783                        self.last_error = Some(AsmError::InvalidInstruction);
2784                        return;
2785                    }
2786                }
2787            }
2788
2789            Encoding::FSimdVVVV => {
2790                let op_data = &F_SIMD_VVVV[encoding_index];
2791
2792                if isign4 == enc_ops!(Reg, Reg, Reg, Reg) {
2793                    if !check_signature!(op0, op1, op2, op3) {
2794                        self.last_error = Some(AsmError::InvalidInstruction);
2795                        return;
2796                    }
2797
2798                    if let Some(fp_opcode) = pick_fp_opcode(
2799                        op0.as_::<Vec>(),
2800                        op_data.scalar_op(),
2801                        op_data.scalar_hf(),
2802                        op_data.vector_op(),
2803                        op_data.vector_hf(),
2804                        &mut 0,
2805                    ) {
2806                        opcode.reset(fp_opcode.0);
2807                        emit_rd0_rn5_rm16_ra10!();
2808                    }
2809
2810                    self.last_error = Some(AsmError::InvalidInstruction);
2811                    return;
2812                }
2813            }
2814
2815            Encoding::SimdFcadd => {
2816                let op_data = &SIMD_FCADD[encoding_index];
2817
2818                if isign4 == enc_ops!(Reg, Reg, Reg, Imm) {
2819                    if !check_signature!(op0, op1, op2) || op0.as_::<Vec>().has_element_index() {
2820                        self.last_error = Some(AsmError::InvalidInstruction);
2821                        return;
2822                    }
2823
2824                    let q = (op0.as_::<Reg>().is_vec128() as u32).saturating_sub(1);
2825                    if q > 1 {
2826                        self.last_error = Some(AsmError::InvalidInstruction);
2827                        return;
2828                    }
2829
2830                    let mut sz = op0.as_::<Vec>().element_type() as u32;
2831                    sz = sz.saturating_sub(1);
2832                    if sz == 0 || sz > 3 {
2833                        self.last_error = Some(AsmError::InvalidInstruction);
2834                        return;
2835                    }
2836
2837                    let mut rot = 0u32;
2838                    let imm_val = op3.as_::<Imm>().value();
2839                    if imm_val == 270 {
2840                        rot = 1;
2841                    } else if imm_val != 90 {
2842                        self.last_error = Some(AsmError::InvalidOperand);
2843                        return;
2844                    }
2845
2846                    opcode.reset(op_data.opcode());
2847                    opcode.add_imm(q, 30);
2848                    opcode.add_imm(sz, 22);
2849                    opcode.add_imm(rot, 12);
2850                    emit_rd0_rn5_rm16!();
2851                }
2852            }
2853
2854            Encoding::SimdFccmpFccmpe => {
2855                let op_data = &SIMD_FCCMP_FCCMPE[encoding_index];
2856
2857                if isign4 == enc_ops!(Reg, Reg, Imm, Imm) {
2858                    let mut sz = op0.as_::<Reg>().typ() as u32;
2859                    sz = sz.saturating_sub(4);
2860                    if sz > 2 {
2861                        self.last_error = Some(AsmError::InvalidInstruction);
2862                        return;
2863                    }
2864
2865                    if !check_signature!(op0, op1) || op0.as_::<Vec>().has_element_type() {
2866                        self.last_error = Some(AsmError::InvalidInstruction);
2867                        return;
2868                    }
2869
2870                    let nzcv = op2.as_::<Imm>().value();
2871                    let cond = op3.as_::<Imm>().value();
2872
2873                    if (nzcv | cond) > 0xFi64 {
2874                        self.last_error = Some(AsmError::InvalidOperand);
2875                        return;
2876                    }
2877
2878                    let type_field = (sz - 1) & 0x3u32;
2879
2880                    opcode.reset(op_data.opcode());
2881                    opcode.add_imm(type_field, 22);
2882                    opcode.add_imm(cond_code_to_opcode_field(cond as u32), 12);
2883                    opcode.add_imm(nzcv as u32, 0);
2884                    emit_rn5_rm16!();
2885                }
2886            }
2887
2888            Encoding::SimdFcm => {
2889                let op_data = &SIMD_FCM[encoding_index];
2890
2891                if isign4 == enc_ops!(Reg, Reg, Reg) && op_data.has_register_op() {
2892                    if !check_signature!(op0, op1, op2) {
2893                        self.last_error = Some(AsmError::InvalidInstruction);
2894                        return;
2895                    }
2896
2897                    if let Some(fp_opcode) = pick_fp_opcode(
2898                        op0.as_::<Vec>(),
2899                        op_data.register_scalar_op(),
2900                        op_data.register_scalar_hf(),
2901                        op_data.register_vector_op(),
2902                        op_data.register_vector_hf(),
2903                        &mut 0,
2904                    ) {
2905                        opcode.reset(fp_opcode.0);
2906                        emit_rd0_rn5_rm16!();
2907                    }
2908
2909                    self.last_error = Some(AsmError::InvalidInstruction);
2910                    return;
2911                }
2912
2913                if isign4 == enc_ops!(Reg, Reg, Imm) && op_data.has_zero_op() {
2914                    if !check_signature!(op0, op1) {
2915                        self.last_error = Some(AsmError::InvalidInstruction);
2916                        return;
2917                    }
2918
2919                    if op2.as_::<Imm>().value() != 0 {
2920                        self.last_error = Some(AsmError::InvalidOperand);
2921                        return;
2922                    }
2923
2924                    if let Some(fp_opcode) = pick_fp_opcode(
2925                        op0.as_::<Vec>(),
2926                        op_data.zero_scalar_op(),
2927                        5,
2928                        op_data.zero_vector_op(),
2929                        5,
2930                        &mut 0,
2931                    ) {
2932                        opcode.reset(fp_opcode.0);
2933                        emit_rd0_rn5!();
2934                    }
2935
2936                    self.last_error = Some(AsmError::InvalidInstruction);
2937                    return;
2938                }
2939            }
2940
2941            Encoding::SimdFcmla => {
2942                let op_data = &SIMD_FCMLA[encoding_index];
2943
2944                if isign4 == enc_ops!(Reg, Reg, Reg, Imm) {
2945                    if !check_signature!(op0, op1) {
2946                        self.last_error = Some(AsmError::InvalidInstruction);
2947                        return;
2948                    }
2949
2950                    let q = (op0.as_::<Reg>().is_vec128() as u32).saturating_sub(1);
2951                    if q > 1 {
2952                        self.last_error = Some(AsmError::InvalidInstruction);
2953                        return;
2954                    }
2955
2956                    let mut sz = op0.as_::<Vec>().element_type() as u32;
2957                    sz = sz.saturating_sub(1);
2958                    if sz == 0 || sz > 3 {
2959                        self.last_error = Some(AsmError::InvalidInstruction);
2960                        return;
2961                    }
2962
2963                    let mut rot = 0u32;
2964                    match op3.as_::<Imm>().value() {
2965                        0 => rot = 0,
2966                        90 => rot = 1,
2967                        180 => rot = 2,
2968                        270 => rot = 3,
2969                        _ => {
2970                            self.last_error = Some(AsmError::InvalidOperand);
2971                            return;
2972                        }
2973                    }
2974
2975                    if !op2.as_::<Vec>().has_element_index() {
2976                        if !check_signature!(op1, op2) {
2977                            self.last_error = Some(AsmError::InvalidInstruction);
2978                            return;
2979                        }
2980
2981                        opcode.reset(op_data.regular_op());
2982                        opcode.add_imm(q, 30);
2983                        opcode.add_imm(sz, 22);
2984                        opcode.add_imm(rot, 11);
2985                        emit_rd0_rn5_rm16!();
2986                    } else {
2987                        if op0.as_::<Vec>().element_type() != op2.as_::<Vec>().element_type() {
2988                            self.last_error = Some(AsmError::InvalidOperand);
2989                            return;
2990                        }
2991
2992                        if !((sz == 1) || (q == 1 && sz == 2)) {
2993                            self.last_error = Some(AsmError::InvalidOperand);
2994                            return;
2995                        }
2996
2997                        let element_index = op2.as_::<Vec>().element_index();
2998                        let hl_field_shift = if sz == 1 { 0u32 } else { 1u32 };
2999                        let max_element_index = if q == 1 && sz == 1 { 3u32 } else { 1u32 };
3000
3001                        if element_index > max_element_index {
3002                            self.last_error = Some(AsmError::InvalidOperand);
3003                            return;
3004                        }
3005
3006                        let hl = element_index << hl_field_shift;
3007
3008                        opcode.reset(op_data.element_op());
3009                        opcode.add_imm(q, 30);
3010                        opcode.add_imm(sz, 22);
3011                        opcode.add_imm(hl & 1u32, 21);
3012                        opcode.add_imm(hl >> 1, 11);
3013                        opcode.add_imm(rot, 13);
3014                        emit_rd0_rn5_rm16!();
3015                    }
3016                }
3017            }
3018
3019            Encoding::SimdFcmpFcmpe => {
3020                let op_data = &SIMD_FCMP_FCMPE[encoding_index];
3021
3022                let sz = (op0.as_::<Reg>().typ() as u32).saturating_sub(RegType::Vec16 as u32);
3023                let type_field = (sz - 1) & 0x3u32;
3024
3025                if sz > 2 {
3026                    self.last_error = Some(AsmError::InvalidInstruction);
3027                    return;
3028                }
3029
3030                if op0.as_::<Vec>().has_element_type() {
3031                    self.last_error = Some(AsmError::InvalidInstruction);
3032                    return;
3033                }
3034
3035                opcode.reset(op_data.opcode());
3036                opcode.add_imm(type_field, 22);
3037
3038                if isign4 == enc_ops!(Reg, Reg) {
3039                    if !check_signature!(op0, op1) {
3040                        self.last_error = Some(AsmError::InvalidInstruction);
3041                        return;
3042                    }
3043
3044                    emit_rd0_rn5_rm16!();
3045                } else if isign4 == enc_ops!(Reg, Imm) {
3046                    if op1.as_::<Imm>().value() != 0 {
3047                        self.last_error = Some(AsmError::InvalidOperand);
3048                        return;
3049                    }
3050
3051                    opcode.0 |= 0x8;
3052                    emit_rd0_rn5!();
3053                }
3054            }
3055
3056            Encoding::SimdFcsel => {
3057                if isign4 == enc_ops!(Reg, Reg, Reg, Imm) {
3058                    if !check_signature!(op0, op1, op2) {
3059                        self.last_error = Some(AsmError::InvalidInstruction);
3060                        return;
3061                    }
3062
3063                    let sz = (op0.as_::<Reg>().typ() as u32).saturating_sub(RegType::Vec16 as u32);
3064                    let type_field = (sz - 1) & 0x3u32;
3065
3066                    if sz > 2 || op0.as_::<Vec>().has_element_type() {
3067                        self.last_error = Some(AsmError::InvalidInstruction);
3068                        return;
3069                    }
3070
3071                    let cond = op3.as_::<Imm>().value() as u32;
3072                    if cond > 0xFu32 {
3073                        self.last_error = Some(AsmError::InvalidImmediate);
3074                        return;
3075                    }
3076
3077                    opcode.reset(0b00011110001000000000110000000000u32);
3078                    opcode.add_imm(type_field, 22);
3079                    opcode.add_imm(cond, 12);
3080                    emit_rd0_rn5_rm16!();
3081                }
3082            }
3083
3084            Encoding::SimdFcvt => {
3085                if isign4 == enc_ops!(Reg, Reg) {
3086                    let dst_sz =
3087                        (op0.as_::<Reg>().reg_type() as u32).saturating_sub(RegType::Vec16 as u32);
3088                    let src_sz =
3089                        (op1.as_::<Reg>().reg_type() as u32).saturating_sub(RegType::Vec16 as u32);
3090
3091                    if (dst_sz | src_sz) > 3 {
3092                        self.last_error = Some(AsmError::InvalidInstruction);
3093                        return;
3094                    }
3095
3096                    if op0.as_::<Vec>().has_element_type() || op1.as_::<Vec>().has_element_type() {
3097                        self.last_error = Some(AsmError::InvalidInstruction);
3098                        return;
3099                    }
3100
3101                    // Table that provides 'type' and 'opc' according to the dst/src combination.
3102                    let table: [u8; 16] = [
3103                        0xFFu8, // H <- H (Invalid).
3104                        0x03u8, // H <- S (type=00 opc=11).
3105                        0x13u8, // H <- D (type=01 opc=11).
3106                        0xFFu8, // H <- Q (Invalid).
3107                        0x30u8, // S <- H (type=11 opc=00).
3108                        0xFFu8, // S <- S (Invalid).
3109                        0x10u8, // S <- D (type=01 opc=00).
3110                        0xFFu8, // S <- Q (Invalid).
3111                        0x31u8, // D <- H (type=11 opc=01).
3112                        0x01u8, // D <- S (type=00 opc=01).
3113                        0xFFu8, // D <- D (Invalid).
3114                        0xFFu8, // D <- Q (Invalid).
3115                        0xFFu8, // Q <- H (Invalid).
3116                        0xFFu8, // Q <- S (Invalid).
3117                        0xFFu8, // Q <- D (Invalid).
3118                        0xFFu8, // Q <- Q (Invalid).
3119                    ];
3120
3121                    let type_opc = table[((dst_sz << 2) | src_sz) as usize];
3122                    if type_opc == 0xFFu8 {
3123                        self.last_error = Some(AsmError::InvalidInstruction);
3124                        return;
3125                    }
3126
3127                    opcode.reset(0b0001111000100010010000 << 10);
3128                    opcode.add_imm((type_opc as u32) >> 4, 22);
3129                    opcode.add_imm((type_opc as u32) & 15, 15);
3130                    emit_rd0_rn5!();
3131                }
3132            }
3133
3134            Encoding::SimdFcvtLN => {
3135                let op_data = &SIMD_FCVT_LN[encoding_index];
3136
3137                if isign4 == enc_ops!(Reg, Reg) {
3138                    // Scalar form - only FCVTXN.
3139                    if op0.as_::<Vec>().is_vec32() && op1.as_::<Vec>().is_vec64() {
3140                        if op_data.has_scalar() == 0 {
3141                            self.last_error = Some(AsmError::InvalidInstruction);
3142                            return;
3143                        }
3144
3145                        if op0.as_::<Vec>().has_element_type()
3146                            || op1.as_::<Vec>().has_element_type()
3147                        {
3148                            self.last_error = Some(AsmError::InvalidInstruction);
3149                            return;
3150                        }
3151
3152                        opcode.reset(op_data.scalar_op());
3153                        opcode.0 |= 0x400000; // sz bit must be 1
3154                        emit_rd0_rn5!();
3155                        return;
3156                    }
3157
3158                    opcode.reset(op_data.vector_op());
3159
3160                    let is_long = (inst_flags & InstFlag::Long as u16) != 0;
3161                    let (rl, rn) = if is_long {
3162                        (op0.as_::<Vec>(), op1.as_::<Vec>())
3163                    } else {
3164                        (op1.as_::<Vec>(), op0.as_::<Vec>())
3165                    };
3166
3167                    let q = (rn.reg_type() as u32).saturating_sub(RegType::Vec64 as u32);
3168                    if (opcode.has_q() as u32) != q {
3169                        self.last_error = Some(AsmError::InvalidInstruction);
3170                        return;
3171                    }
3172
3173                    if rl.is_vec_s4()
3174                        && rn.element_type() == VecElementType::H
3175                        && op_data.is_cvtxn() == 0
3176                    {
3177                        emit_rd0_rn5!();
3178                        return;
3179                    }
3180
3181                    if rl.is_vec_d2() && rn.element_type() == VecElementType::S {
3182                        opcode.0 |= 0x400000;
3183                        emit_rd0_rn5!();
3184                        return;
3185                    }
3186
3187                    self.last_error = Some(AsmError::InvalidInstruction);
3188                    return;
3189                }
3190            }
3191
3192            Encoding::SimdFcvtSV => {
3193                let op_data = &SIMD_FCVT_SV[encoding_index];
3194
3195                // So we can support both IntToFloat and FloatToInt conversions.
3196                let is_float_to_int = op_data.is_float_to_int();
3197                let (op_gp, op_vec) = if is_float_to_int != 0 {
3198                    (&op0, &op1)
3199                } else {
3200                    (&op1, &op0)
3201                };
3202
3203                if isign4 == enc_ops!(Reg, Reg) {
3204                    if op_gp.as_::<Reg>().is_gp() && op_vec.as_::<Reg>().is_vec() {
3205                        let x = op_gp.as_::<Reg>().is_gp64() as u32;
3206                        let type_field = (op_vec.as_::<Reg>().reg_type() as u32)
3207                            .saturating_sub(RegType::Vec16 as u32);
3208
3209                        if type_field > 2u32 {
3210                            self.last_error = Some(AsmError::InvalidInstruction);
3211                            return;
3212                        }
3213
3214                        let type_val = (type_field - 1u32) & 0x3;
3215                        opcode.reset(op_data.general_op());
3216                        opcode.add_imm(type_val, 22);
3217                        opcode.add_imm(x, 31);
3218                        emit_rd0_rn5!();
3219                    } else if op0.as_::<Reg>().is_vec() && op1.as_::<Reg>().is_vec() {
3220                        if !check_signature!(op0, op1) {
3221                            self.last_error = Some(AsmError::InvalidInstruction);
3222                            return;
3223                        }
3224
3225                        if let Some(fp_opcode) = pick_fp_opcode(
3226                            op0.as_::<Vec>(),
3227                            op_data.scalar_int_op(),
3228                            5,
3229                            op_data.vector_int_op(),
3230                            5,
3231                            &mut 0,
3232                        ) {
3233                            opcode.reset(fp_opcode.0);
3234                            emit_rd0_rn5!();
3235                        } else {
3236                            self.last_error = Some(AsmError::InvalidInstruction);
3237                            return;
3238                        }
3239                    }
3240                } else if isign4 == enc_ops!(Reg, Reg, Imm) && op_data.is_fixed_point() {
3241                    let scale_val = op2.as_::<Imm>().value() as u32;
3242                    if scale_val >= 64 {
3243                        self.last_error = Some(AsmError::InvalidImmediate);
3244                        return;
3245                    }
3246
3247                    if scale_val == 0 {
3248                        self.last_error = Some(AsmError::InvalidOperand);
3249                        return;
3250                    }
3251
3252                    if op_gp.as_::<Reg>().is_gp() && op_vec.as_::<Reg>().is_vec() {
3253                        let x = op_gp.as_::<Reg>().is_gp64() as u32;
3254                        let type_field = (op_vec.as_::<Reg>().reg_type() as u32)
3255                            .saturating_sub(RegType::Vec16 as u32);
3256
3257                        let scale_limit = 32u32 << x;
3258                        if scale_val > scale_limit {
3259                            self.last_error = Some(AsmError::InvalidOperand);
3260                            return;
3261                        }
3262
3263                        let type_val = (type_field - 1u32) & 0x3;
3264                        opcode.reset(op_data.general_op() ^ 0x200000);
3265                        opcode.add_imm(type_val, 22);
3266                        opcode.add_imm(x, 31);
3267                        opcode.add_imm(64u32 - scale_val, 10);
3268                        emit_rd0_rn5!();
3269                    } else if op0.as_::<Reg>().is_vec() && op1.as_::<Reg>().is_vec() {
3270                        if !check_signature!(op0, op1) {
3271                            self.last_error = Some(AsmError::InvalidInstruction);
3272                            return;
3273                        }
3274
3275                        let mut sz = 0u32;
3276                        if let Some(fp_opcode) = pick_fp_opcode(
3277                            op0.as_::<Vec>(),
3278                            op_data.scalar_fp_op(),
3279                            5,
3280                            op_data.vector_fp_op(),
3281                            5,
3282                            &mut sz,
3283                        ) {
3284                            let scale_limit = 16u32 << sz;
3285                            if scale_val > scale_limit {
3286                                self.last_error = Some(AsmError::InvalidOperand);
3287                                return;
3288                            }
3289
3290                            let imm = (!(scale_val) + 1) & ((1u32 << (sz + 4 + 1)) - 1);
3291                            opcode.reset(fp_opcode.0);
3292                            opcode.add_imm(imm, 16);
3293                            emit_rd0_rn5!();
3294                        } else {
3295                            self.last_error = Some(AsmError::InvalidInstruction);
3296                            return;
3297                        }
3298                    }
3299                }
3300            }
3301
3302            Encoding::SimdFmlal => {
3303                let op_data = &SIMD_FMLAL[encoding_index];
3304
3305                if isign4 == enc_ops!(Reg, Reg, Reg) {
3306                    let mut q =
3307                        (op0.as_::<Reg>().reg_type() as u32).saturating_sub(RegType::Vec64 as u32);
3308                    let q_is_optional = op_data.optional_q() != 0;
3309
3310                    if q_is_optional {
3311                        if q > 1 {
3312                            self.last_error = Some(AsmError::InvalidInstruction);
3313                            return;
3314                        }
3315                    } else {
3316                        if q != 1 {
3317                            self.last_error = Some(AsmError::InvalidInstruction);
3318                            return;
3319                        }
3320
3321                        q = 0;
3322                    }
3323
3324                    if (op0.as_::<Reg>().reg_type() as u32)
3325                        != (op1.as_::<Reg>().reg_type() as u32) + if q_is_optional { 1 } else { 0 }
3326                        || (op0.as_::<Vec>().element_type() as u32) != op_data.ta as u32
3327                        || (op1.as_::<Vec>().element_type() as u32) != op_data.tb as u32
3328                    {
3329                        self.last_error = Some(AsmError::InvalidInstruction);
3330                        return;
3331                    }
3332
3333                    if !op2.as_::<Vec>().has_element_index() {
3334                        if !check_signature!(&op1, &op2) {
3335                            self.last_error = Some(AsmError::InvalidInstruction);
3336                            return;
3337                        }
3338
3339                        opcode.reset(op_data.vector_op());
3340                        opcode.add_imm(q, 30);
3341                        emit_rd0_rn5_rm16!();
3342                    } else {
3343                        if (op2.as_::<Vec>().element_type() as u32) != op_data.t_element as u32 {
3344                            self.last_error = Some(AsmError::InvalidInstruction);
3345                            return;
3346                        }
3347
3348                        if op2.as_::<Reg>().id() > 15 {
3349                            self.last_error = Some(AsmError::InvalidOperand);
3350                            return;
3351                        }
3352
3353                        let element_index = op2.as_::<Vec>().element_index();
3354                        if element_index > 7u32 {
3355                            self.last_error = Some(AsmError::InvalidOperand);
3356                            return;
3357                        }
3358
3359                        opcode.reset(op_data.element_op());
3360                        opcode.add_imm(q, 30);
3361                        opcode.add_imm(element_index & 3u32, 20);
3362                        opcode.add_imm(element_index >> 2, 11);
3363                        emit_rd0_rn5_rm16!();
3364                    }
3365                }
3366            }
3367
3368            Encoding::SimdFmov => {
3369                if isign4 == enc_ops!(Reg, Reg) {
3370                    // FMOV Gp <-> Vec opcode:
3371                    opcode.reset(0b00011110001001100000000000000000);
3372
3373                    if (op0.as_::<Reg>().is_gp() && op1.as_::<Reg>().is_vec()) {
3374                        // FMOV Wd, Hn      (sf=0 type=11 rmode=00 op=110)
3375                        // FMOV Xd, Hn      (sf=1 type=11 rmode=00 op=110)
3376                        // FMOV Wd, Sn      (sf=0 type=00 rmode=00 op=110)
3377                        // FMOV Xd, Dn      (sf=1 type=11 rmode=00 op=110)
3378                        // FMOV Xd, Vn.d[1] (sf=1 type=10 rmode=01 op=110)
3379                        let x = op0.as_::<Reg>().is_gp64();
3380                        let sz = (op1.as_::<Reg>().reg_type() as u32)
3381                            .saturating_sub(RegType::Vec16 as u32);
3382
3383                        let mut typ = (sz - 1) & 0x3;
3384                        let mut r_mode_op = 0b00110;
3385
3386                        if (op1.as_::<Vec>().has_element_index()) {
3387                            // Special case.
3388                            if (!x
3389                                || !op1.as_::<Vec>().is_vec_d2()
3390                                || op1.as_::<Vec>().element_index() != 1)
3391                            {
3392                                self.last_error = Some(AsmError::InvalidInstruction);
3393                                return;
3394                            }
3395                            typ = 0b10;
3396                            r_mode_op = 0b01110;
3397                        } else {
3398                            // Must be scalar.
3399                            if (sz > 2) {
3400                                self.last_error = Some(AsmError::InvalidOperand);
3401                                return;
3402                            }
3403
3404                            if (op1.as_::<Vec>().has_element_type()) {
3405                                self.last_error = Some(AsmError::InvalidInstruction);
3406                                return;
3407                            }
3408
3409                            if (op1.as_::<Vec>().is_vec32() && x) {
3410                                self.last_error = Some(AsmError::InvalidInstruction);
3411                                return;
3412                            }
3413
3414                            if (op1.as_::<Vec>().is_vec64() && !x) {
3415                                self.last_error = Some(AsmError::InvalidInstruction);
3416                                return;
3417                            }
3418                        }
3419
3420                        opcode.add_imm(x as u32, 31);
3421                        opcode.add_imm(typ, 22);
3422                        opcode.add_imm(r_mode_op, 16);
3423
3424                        emit_rd0_rn5!();
3425                    }
3426
3427                    if (op0.as_::<Reg>().is_vec() && op1.as_::<Reg>().is_gp()) {
3428                        // FMOV Hd, Wn      (sf=0 type=11 rmode=00 op=111)
3429                        // FMOV Hd, Xn      (sf=1 type=11 rmode=00 op=111)
3430                        // FMOV Sd, Wn      (sf=0 type=00 rmode=00 op=111)
3431                        // FMOV Dd, Xn      (sf=1 type=11 rmode=00 op=111)
3432                        // FMOV Vd.d[1], Xn (sf=1 type=10 rmode=01 op=111)
3433                        let x = op1.as_::<Reg>().is_gp64();
3434                        let sz = (op0.as_::<Reg>().reg_type() as u32)
3435                            .saturating_sub(RegType::Vec16 as u32);
3436
3437                        let mut typ = (sz - 1) & 0x3;
3438                        let mut r_mode_op = 0b00111;
3439
3440                        if (op0.as_::<Vec>().has_element_index()) {
3441                            // Special case.
3442                            if (!x
3443                                || !op0.as_::<Vec>().is_vec_d2()
3444                                || op0.as_::<Vec>().element_index() != 1)
3445                            {
3446                                self.last_error = Some(AsmError::InvalidInstruction);
3447                                return;
3448                            }
3449                            typ = 0b10;
3450                            r_mode_op = 0b01111;
3451                        } else {
3452                            // Must be scalar.
3453                            if (sz > 2) {
3454                                self.last_error = Some(AsmError::InvalidInstruction);
3455                                return;
3456                            }
3457
3458                            if (op0.as_::<Vec>().has_element_type()) {
3459                                self.last_error = Some(AsmError::InvalidInstruction);
3460                                return;
3461                            }
3462
3463                            if (op0.as_::<Vec>().is_vec32() && x) {
3464                                self.last_error = Some(AsmError::InvalidInstruction);
3465                                return;
3466                            }
3467
3468                            if (op0.as_::<Vec>().is_vec64() && !x) {
3469                                self.last_error = Some(AsmError::InvalidInstruction);
3470                                return;
3471                            }
3472                        }
3473
3474                        opcode.add_imm(x as u32, 31);
3475                        opcode.add_imm(typ, 22);
3476                        opcode.add_imm(r_mode_op, 16);
3477                        emit_rd0_rn5!();
3478                    }
3479
3480                    if check_signature!(op0, op1) {
3481                        let sz = (op0.as_::<Reg>().reg_type() as u32)
3482                            .saturating_sub(RegType::Vec16 as u32);
3483                        if sz > 2 {
3484                            self.last_error = Some(AsmError::InvalidInstruction);
3485                            return;
3486                        }
3487
3488                        if op0.as_::<Vec>().has_element_type() {
3489                            self.last_error = Some(AsmError::InvalidInstruction);
3490                            return;
3491                        }
3492
3493                        let typ = (sz - 1) & 0x3;
3494                        opcode.reset(0b00011110001000000100000000000000);
3495                        opcode.add_imm(typ, 22);
3496                        emit_rd0_rn5!();
3497                    }
3498                }
3499
3500                if isign4 == enc_ops!(Reg, Imm) {
3501                    if op0.as_::<Reg>().is_vec() {
3502                        let fp_value = if op1.as_::<Imm>().is_double() {
3503                            op1.as_::<Imm>().value_f64()
3504                        } else if op1.as_::<Imm>().is_int32() {
3505                            op1.as_::<Imm>().value_as::<i32>() as f64
3506                        } else {
3507                            self.last_error = Some(AsmError::InvalidOperand);
3508                            return;
3509                        };
3510
3511                        if !is_fp64_imm8(fp_value.to_bits()) {
3512                            self.last_error = Some(AsmError::InvalidOperand);
3513                            return;
3514                        }
3515
3516                        let imm8 = encode_fp64_to_imm8(fp_value.to_bits());
3517
3518                        if !op0.as_::<Vec>().has_element_type() {
3519                            let sz = (op0.as_::<Reg>().reg_type() as u32)
3520                                .saturating_sub(RegType::Vec16 as u32);
3521                            let typ = (sz - 1) & 0x3;
3522                            if sz > 2 {
3523                                self.last_error = Some(AsmError::InvalidInstruction);
3524                                return;
3525                            }
3526
3527                            opcode.reset(0b00011110001000000001000000000000);
3528                            opcode.add_imm(typ, 22);
3529                            opcode.add_imm(imm8, 13);
3530                            emit_rd0!();
3531                        } else {
3532                            let q = (op0.as_::<Reg>().reg_type() as u32)
3533                                .saturating_sub(RegType::Vec64 as u32);
3534                            let sz = (op0.as_::<Vec>().element_type() as u32)
3535                                .saturating_sub(VecElementType::H as u32);
3536
3537                            if q > 1 || sz > 2 {
3538                                self.last_error = Some(AsmError::InvalidInstruction);
3539                                return;
3540                            }
3541
3542                            const SZ_BITS_TABLE: [u32; 3] = [1 << 11, 0, 1 << 29];
3543                            opcode.reset(0b00001111000000001111010000000000);
3544                            opcode ^= SZ_BITS_TABLE[sz as usize];
3545                            opcode.add_imm(q, 30);
3546                            opcode.add_imm(imm8 >> 5, 16);
3547                            opcode.add_imm(imm8 & 31, 5);
3548                            emit_rd0!();
3549                        }
3550                    }
3551                }
3552            }
3553
3554            Encoding::FSimdPair => {
3555                let op_data = &F_SIMD_PAIR[encoding_index];
3556
3557                if isign4 == enc_ops!(Reg, Reg) {
3558                    // This operation is only defined for:
3559                    //   hD, vS.2h (16-bit)
3560                    //   sD, vS.2s (32-bit)
3561                    //   dD, vS.2d (64-bit)
3562                    let sz =
3563                        (op0.as_::<Reg>().reg_type() as u32).saturating_sub(RegType::Vec16 as u32);
3564                    if sz > 2 {
3565                        self.last_error = Some(AsmError::InvalidInstruction);
3566                        return;
3567                    }
3568
3569                    const SZ_SIGNATURES: [u32; 3] = [
3570                        A64VecS::SIGNATURE | (Vec::SIGNATURE_ELEMENT_H as u32),
3571                        A64VecD::SIGNATURE | (Vec::SIGNATURE_ELEMENT_S as u32),
3572                        A64VecQ::SIGNATURE | (Vec::SIGNATURE_ELEMENT_D as u32),
3573                    ];
3574
3575                    if op0.signature().bits() != SZ_SIGNATURES[sz as usize] {
3576                        self.last_error = Some(AsmError::InvalidInstruction);
3577                        return;
3578                    }
3579
3580                    const SZ_BITS_TABLE: [u32; 3] = [1 << 29, 0, 1 << 22];
3581
3582                    opcode.reset(op_data.scalar_op());
3583                    opcode ^= SZ_BITS_TABLE[sz as usize];
3584                    emit_rd0_rn5!();
3585                }
3586
3587                if isign4 == enc_ops!(Reg, Reg, Reg) {
3588                    if !check_signature!(op0, op1, op2) {
3589                        self.last_error = Some(AsmError::InvalidInstruction);
3590                        return;
3591                    }
3592
3593                    let q =
3594                        (op0.as_::<Reg>().reg_type() as u32).saturating_sub(RegType::Vec64 as u32);
3595                    if q > 1 {
3596                        self.last_error = Some(AsmError::InvalidInstruction);
3597                        return;
3598                    }
3599
3600                    const SZ_BITS_TABLE: [u32; 3] =
3601                        [(1 << 22) | (1 << 21) | (1 << 15) | (1 << 14), 0, 1 << 22];
3602                    opcode.reset(op_data.scalar_op());
3603                    opcode ^= SZ_BITS_TABLE[q as usize];
3604                    opcode.add_imm(q, 30);
3605                    emit_rd0_rn5_rm16!();
3606                }
3607            }
3608
3609            Encoding::ISimdSV => {
3610                let op_data = &I_SIMD_SV[encoding_index];
3611
3612                if isign4 == enc_ops!(Reg, Reg) {
3613                    let l = (inst_flags & InstFlag::Long as u16) != 0;
3614                    if (op0.as_::<Vec>().reg_type() as u32).saturating_sub(RegType::Vec8 as u32)
3615                        != (op1.as_::<Vec>().element_type() as u32)
3616                            .saturating_sub(VecElementType::B as u32)
3617                            + l as u32
3618                    {
3619                        self.last_error = Some(AsmError::InvalidInstruction);
3620                        return;
3621                    }
3622                    let size_op = element_type_to_size_op(
3623                        op_data.vec_op_type,
3624                        op1.as_::<Reg>().reg_type(),
3625                        op1.as_::<Vec>().element_type(),
3626                    );
3627
3628                    if !size_op.is_valid() {
3629                        self.last_error = Some(AsmError::InvalidInstruction);
3630                        return;
3631                    }
3632
3633                    opcode.reset(op_data.opcode());
3634                    opcode.add_imm(size_op.q(), 30);
3635                    opcode.add_imm(size_op.size(), 22);
3636                    emit_rd0_rn5!();
3637                }
3638            }
3639
3640            Encoding::ISimdVV => {
3641                let op_data = &I_SIMD_VV[encoding_index];
3642
3643                if isign4 == enc_ops!(Reg, Reg) {
3644                    let sop = significant_simd_op(op0, op1, inst_flags as u32);
3645                    if !check_signature!(op0, op1) {
3646                        self.last_error = Some(AsmError::InvalidInstruction);
3647                        return;
3648                    }
3649
3650                    let size_op = element_type_to_size_op(
3651                        op_data.vec_op_type,
3652                        sop.as_::<Reg>().reg_type(),
3653                        sop.as_::<Vec>().element_type(),
3654                    );
3655                    if !size_op.is_valid() {
3656                        self.last_error = Some(AsmError::InvalidInstruction);
3657                        return;
3658                    }
3659
3660                    opcode.reset(op_data.opcode());
3661                    opcode.add_imm(size_op.qs(), 30);
3662                    opcode.add_imm(size_op.scalar(), 28);
3663                    opcode.add_imm(size_op.size(), 22);
3664                    emit_rd0_rn5!();
3665                }
3666            }
3667
3668            Encoding::ISimdVVx => {
3669                let op_data = &I_SIMD_VVX[encoding_index];
3670                if isign4 == enc_ops!(Reg, Reg) {
3671                    if op0.signature().bits() != op_data.op0_signature
3672                        || op1.signature().bits() != op_data.op1_signature
3673                    {
3674                        self.last_error = Some(AsmError::InvalidInstruction);
3675                        return;
3676                    }
3677                    opcode.reset(op_data.opcode());
3678                    emit_rd0_rn5!();
3679                }
3680            }
3681
3682            Encoding::ISimdVVV => {
3683                let op_data = &I_SIMD_VVV[encoding_index];
3684
3685                if isign4 == enc_ops!(Reg, Reg, Reg) {
3686                    let sop = significant_simd_op(op0, op1, inst_flags as u32);
3687                    if !check_signature!(op0, op1, op2) {
3688                        self.last_error = Some(AsmError::InvalidInstruction);
3689                        return;
3690                    }
3691
3692                    let size_op = element_type_to_size_op(
3693                        op_data.vec_op_type,
3694                        sop.as_::<Reg>().reg_type(),
3695                        sop.as_::<Vec>().element_type(),
3696                    );
3697                    if !size_op.is_valid() {
3698                        self.last_error = Some(AsmError::InvalidInstruction);
3699                        return;
3700                    }
3701
3702                    opcode.reset(op_data.opcode());
3703                    opcode.add_imm(size_op.qs(), 30);
3704                    opcode.add_imm(size_op.scalar(), 28);
3705                    opcode.add_imm(size_op.size(), 22);
3706                    emit_rd0_rn5_rm16!();
3707                }
3708            }
3709
3710            Encoding::ISimdVVVx => {
3711                let op_data = &I_SIMD_VVVX[encoding_index];
3712
3713                if isign4 == enc_ops!(Reg, Reg, Reg) {
3714                    if op0.signature().bits() != op_data.op0_signature
3715                        || op1.signature().bits() != op_data.op1_signature
3716                        || op2.signature().bits() != op_data.op2_signature
3717                    {
3718                        self.last_error = Some(AsmError::InvalidInstruction);
3719                        return;
3720                    }
3721
3722                    opcode.reset(op_data.opcode());
3723                    emit_rd0_rn5_rm16!();
3724                }
3725            }
3726
3727            Encoding::ISimdWWV => {
3728                let op_data = &I_SIMD_WWV[encoding_index];
3729                if isign4 == enc_ops!(Reg, Reg, Reg) {
3730                    let size_op = element_type_to_size_op(
3731                        op_data.vec_op_type,
3732                        op2.as_::<Reg>().reg_type(),
3733                        op2.as_::<Vec>().element_type(),
3734                    );
3735                    if !size_op.is_valid() {
3736                        self.last_error = Some(AsmError::InvalidInstruction);
3737                        return;
3738                    }
3739                    if !check_signature!(op0, op1)
3740                        || !op0.as_::<Reg>().is_vec128()
3741                        || (op0.as_::<Vec>().element_type() as u32)
3742                            != (op2.as_::<Vec>().element_type() as u32 + 1)
3743                    {
3744                        self.last_error = Some(AsmError::InvalidInstruction);
3745                        return;
3746                    }
3747                    opcode.reset(op_data.opcode());
3748                    opcode.add_imm(size_op.qs(), 30);
3749                    opcode.add_imm(size_op.scalar(), 28);
3750                    opcode.add_imm(size_op.size(), 22);
3751                    emit_rd0_rn5_rm16!();
3752                }
3753            }
3754
3755            Encoding::ISimdVVVe => {
3756                let op_data = &I_SIMD_VVVE[encoding_index];
3757                if isign4 == enc_ops!(Reg, Reg, Reg) {
3758                    let sop = significant_simd_op(op0, op1, inst_flags as u32);
3759                    if !check_signature!(op0, op1, op2) {
3760                        self.last_error = Some(AsmError::InvalidInstruction);
3761                        return;
3762                    }
3763                    if !op2.as_::<Vec>().has_element_index() {
3764                        let size_op = element_type_to_size_op(
3765                            op_data.regular_vec_type,
3766                            sop.as_::<Reg>().reg_type(),
3767                            sop.as_::<Vec>().element_type(),
3768                        );
3769                        if !size_op.is_valid() {
3770                            self.last_error = Some(AsmError::InvalidInstruction);
3771                            return;
3772                        }
3773                        if !check_signature!(op1, op2) {
3774                            self.last_error = Some(AsmError::InvalidInstruction);
3775                            return;
3776                        }
3777                        opcode.reset((op_data.regular_op as u32) << 10);
3778                        opcode.add_imm(size_op.qs(), 30);
3779                        opcode.add_imm(size_op.scalar(), 28);
3780                        opcode.add_imm(size_op.size(), 22);
3781                        emit_rd0_rn5_rm16!();
3782                    } else {
3783                        let size_op = element_type_to_size_op(
3784                            op_data.element_vec_type,
3785                            sop.as_::<Reg>().reg_type(),
3786                            sop.as_::<Vec>().element_type(),
3787                        );
3788                        if !size_op.is_valid() {
3789                            self.last_error = Some(AsmError::InvalidInstruction);
3790                            return;
3791                        }
3792                        let element_index = op2.as_::<Vec>().element_index();
3793                        let mut lmh = LMHImm {
3794                            lm: 0,
3795                            h: 0,
3796                            max_rm_id: 0,
3797                        };
3798                        if !encode_lmh(size_op.size(), element_index, &mut lmh) {
3799                            self.last_error = Some(AsmError::InvalidOperand);
3800                            return;
3801                        }
3802                        if op2.as_::<Reg>().id() > lmh.max_rm_id {
3803                            self.last_error = Some(AsmError::InvalidOperand);
3804                            return;
3805                        }
3806                        opcode.reset((op_data.element_op as u32) << 10);
3807                        opcode.add_imm(size_op.q(), 30);
3808                        opcode.add_imm(size_op.size(), 22);
3809                        opcode.add_imm(lmh.lm, 20);
3810                        opcode.add_imm(lmh.h, 11);
3811                        emit_rd0_rn5_rm16!();
3812                    }
3813                }
3814            }
3815
3816            Encoding::ISimdVVVI => {
3817                let op_data = &I_SIMD_VVVI[encoding_index];
3818                if isign4 == enc_ops!(Reg, Reg, Reg, Imm) {
3819                    let sop = significant_simd_op(op0, op1, inst_flags as u32);
3820                    if !check_signature!(op0, op1, op2) {
3821                        self.last_error = Some(AsmError::InvalidInstruction);
3822                        return;
3823                    }
3824                    let size_op = element_type_to_size_op(
3825                        op_data.vec_op_type,
3826                        sop.as_::<Reg>().reg_type(),
3827                        sop.as_::<Vec>().element_type(),
3828                    );
3829                    if !size_op.is_valid() {
3830                        self.last_error = Some(AsmError::InvalidInstruction);
3831                        return;
3832                    }
3833                    let imm_value = op3.as_::<Imm>().value_as::<u64>();
3834                    let mut imm_size = op_data.imm_size;
3835                    if op_data.imm64_has_one_bit_less != 0 && size_op.q() == 0 {
3836                        imm_size -= 1;
3837                    }
3838                    let imm_max = 1u64 << imm_size;
3839                    if imm_value >= imm_max {
3840                        self.last_error = Some(AsmError::InvalidImmediate);
3841                        return;
3842                    }
3843                    opcode.reset(op_data.opcode());
3844                    opcode.add_imm(size_op.qs(), 30);
3845                    opcode.add_imm(size_op.scalar(), 28);
3846                    opcode.add_imm(size_op.size(), 22);
3847                    opcode.add_imm(imm_value as u32, op_data.imm_shift);
3848                    emit_rd0_rn5_rm16!();
3849                }
3850            }
3851
3852            Encoding::ISimdVVVV => {
3853                let op_data = &I_SIMD_VVVV[encoding_index];
3854                if isign4 == enc_ops!(Reg, Reg, Reg, Reg) {
3855                    let sop = significant_simd_op(op0, op1, inst_flags as u32);
3856                    if !check_signature!(op0, op1, op2, op3) {
3857                        self.last_error = Some(AsmError::InvalidInstruction);
3858                        return;
3859                    }
3860                    let size_op = element_type_to_size_op(
3861                        op_data.vec_op_type,
3862                        sop.as_::<Reg>().reg_type(),
3863                        sop.as_::<Vec>().element_type(),
3864                    );
3865                    if !size_op.is_valid() {
3866                        self.last_error = Some(AsmError::InvalidInstruction);
3867                        return;
3868                    }
3869                    opcode.reset((op_data.opcode as u32) << 10);
3870                    opcode.add_imm(size_op.qs(), 30);
3871                    opcode.add_imm(size_op.scalar(), 28);
3872                    opcode.add_imm(size_op.size(), 22);
3873                    emit_rd0_rn5_rm16_ra10!();
3874                }
3875            }
3876
3877            Encoding::ISimdVVVVx => {
3878                let op_data = &I_SIMD_VVVVX[encoding_index];
3879                if isign4 == enc_ops!(Reg, Reg, Reg, Reg) {
3880                    if op0.signature().bits() != op_data.op0_signature
3881                        || op1.signature().bits() != op_data.op1_signature
3882                        || op2.signature().bits() != op_data.op2_signature
3883                        || op3.signature().bits() != op_data.op3_signature
3884                    {
3885                        self.last_error = Some(AsmError::InvalidInstruction);
3886                        return;
3887                    }
3888                    opcode.reset((op_data.opcode as u32) << 10);
3889                    emit_rd0_rn5_rm16_ra10!();
3890                }
3891            }
3892
3893            Encoding::ISimdPair => {
3894                let op_data = &I_SIMD_PAIR[encoding_index];
3895                if isign4 == enc_ops!(Reg, Reg) && op_data.opcode2 != 0 {
3896                    if op0.as_::<Vec>().is_vec_d1() && op1.as_::<Vec>().is_vec_d2() {
3897                        opcode.reset((op_data.opcode2 as u32) << 10);
3898                        opcode.add_imm(0x3, 22);
3899                        emit_rd0_rn5!();
3900                    }
3901                }
3902                if isign4 == enc_ops!(Reg, Reg, Reg) {
3903                    if !check_signature!(op0, op1, op2) {
3904                        self.last_error = Some(AsmError::InvalidInstruction);
3905                        return;
3906                    }
3907                    let size_op = element_type_to_size_op(
3908                        op_data.op_type3,
3909                        op0.as_::<Reg>().reg_type(),
3910                        op0.as_::<Vec>().element_type(),
3911                    );
3912                    if !size_op.is_valid() {
3913                        self.last_error = Some(AsmError::InvalidInstruction);
3914                        return;
3915                    }
3916                    opcode.reset((op_data.opcode3 as u32) << 10);
3917                    opcode.add_imm(size_op.qs(), 30);
3918                    opcode.add_imm(size_op.scalar(), 28);
3919                    opcode.add_imm(size_op.size(), 22);
3920                    emit_rd0_rn5_rm16!();
3921                }
3922            }
3923
3924            Encoding::SimdBicOrr => {
3925                let op_data = &SIMD_BIC_ORR[encoding_index];
3926                if isign4 == enc_ops!(Reg, Reg, Reg) {
3927                    if !check_signature!(op0, op1, op2) {
3928                        self.last_error = Some(AsmError::InvalidInstruction);
3929                        return;
3930                    }
3931                    let size_op = element_type_to_size_op(
3932                        0, // kVO_V_B
3933                        op0.as_::<Reg>().reg_type(),
3934                        op0.as_::<Vec>().element_type(),
3935                    );
3936                    if !size_op.is_valid() {
3937                        self.last_error = Some(AsmError::InvalidInstruction);
3938                        return;
3939                    }
3940                    opcode.reset((op_data.register_op as u32) << 10);
3941                    opcode.add_imm(size_op.q(), 30);
3942                    emit_rd0_rn5_rm16!();
3943                }
3944                if isign4 == enc_ops!(Reg, Imm) || isign4 == enc_ops!(Reg, Imm, Imm) {
3945                    let size_op = element_type_to_size_op(
3946                        5, // kVO_V_HS
3947                        op0.as_::<Reg>().reg_type(),
3948                        op0.as_::<Vec>().element_type(),
3949                    );
3950                    if !size_op.is_valid() {
3951                        self.last_error = Some(AsmError::InvalidInstruction);
3952                        return;
3953                    }
3954                    if op1.as_::<Imm>().value_as::<u64>() > 0xFFFFFFFF {
3955                        self.last_error = Some(AsmError::InvalidImmediate);
3956                        return;
3957                    }
3958                    let mut imm = op1.as_::<Imm>().value_as::<u32>();
3959                    let mut shift = 0u32;
3960                    let max_shift = (8u32 << size_op.size()) - 8u32;
3961                    if isign4 == enc_ops!(Reg, Imm, Imm) {
3962                        if op2.as_::<Imm>().predicate() != ShiftOp::LSL as u32 {
3963                            self.last_error = Some(AsmError::InvalidImmediate);
3964                            return;
3965                        }
3966                        if imm > 0xFF || op2.as_::<Imm>().value_as::<u64>() > max_shift as u64 {
3967                            self.last_error = Some(AsmError::InvalidImmediate);
3968                            return;
3969                        }
3970                        shift = op2.as_::<Imm>().value_as::<u32>();
3971                        if (shift & 0x7) != 0 {
3972                            self.last_error = Some(AsmError::InvalidImmediate);
3973                            return;
3974                        }
3975                    } else if imm != 0 {
3976                        shift = imm.trailing_zeros() & !0x7;
3977                        imm >>= shift;
3978                        if imm > 0xFF || shift > max_shift {
3979                            self.last_error = Some(AsmError::InvalidImmediate);
3980                            return;
3981                        }
3982                    }
3983                    let mut cmode = 0x1 | ((shift / 8) << 1);
3984                    if size_op.size() == 1 {
3985                        cmode |= 1 << 3;
3986                    }
3987                    let abc = (imm >> 5) & 0x7;
3988                    let defgh = imm & 0x1F;
3989                    opcode.reset((op_data.immediate_op as u32) << 10);
3990                    opcode.add_imm(size_op.q(), 30);
3991                    opcode.add_imm(abc, 16);
3992                    opcode.add_imm(cmode, 12);
3993                    opcode.add_imm(defgh, 5);
3994                    emit_rd0!();
3995                }
3996            }
3997
3998            Encoding::SimdCmp => {
3999                let op_data = &SIMD_CMP[encoding_index];
4000
4001                if isign4 == enc_ops!(Reg, Reg, Reg) && op_data.register_op != 0 {
4002                    if !check_signature!(op0, op1, op2) {
4003                        self.last_error = Some(AsmError::InvalidInstruction);
4004                        return;
4005                    }
4006
4007                    let size_op = element_type_to_size_op(
4008                        op_data.vec_op_type,
4009                        op0.as_::<Reg>().reg_type(),
4010                        op0.as_::<Vec>().element_type(),
4011                    );
4012                    if !size_op.is_valid() {
4013                        self.last_error = Some(AsmError::InvalidInstruction);
4014                        return;
4015                    }
4016
4017                    opcode.reset((op_data.register_op as u32) << 10);
4018                    opcode.add_imm(size_op.qs(), 30);
4019                    opcode.add_imm(size_op.scalar(), 28);
4020                    opcode.add_imm(size_op.size(), 22);
4021                    emit_rd0_rn5_rm16!();
4022                }
4023
4024                if isign4 == enc_ops!(Reg, Reg, Imm) && op_data.zero_op != 0 {
4025                    if !check_signature!(op0, op1) {
4026                        self.last_error = Some(AsmError::InvalidInstruction);
4027                        return;
4028                    }
4029
4030                    if op2.as_::<Imm>().value() != 0 {
4031                        self.last_error = Some(AsmError::InvalidImmediate);
4032                        return;
4033                    }
4034
4035                    let size_op = element_type_to_size_op(
4036                        op_data.vec_op_type,
4037                        op0.as_::<Reg>().reg_type(),
4038                        op0.as_::<Vec>().element_type(),
4039                    );
4040                    if !size_op.is_valid() {
4041                        self.last_error = Some(AsmError::InvalidInstruction);
4042                        return;
4043                    }
4044
4045                    opcode.reset((op_data.zero_op as u32) << 10);
4046                    opcode.add_imm(size_op.qs(), 30);
4047                    opcode.add_imm(size_op.scalar(), 28);
4048                    opcode.add_imm(size_op.size(), 22);
4049                    emit_rd0_rn5!();
4050                }
4051            }
4052
4053            Encoding::SimdDot => {
4054                let op_data = &SIMD_DOT[encoding_index];
4055
4056                if isign4 == enc_ops!(Reg, Reg, Reg) {
4057                    let q = op0.as_::<Reg>().reg_type() as u32 - RegType::Vec64 as u32;
4058                    let size = 2u32;
4059
4060                    if q > 1 {
4061                        self.last_error = Some(AsmError::InvalidInstruction);
4062                        return;
4063                    }
4064
4065                    if !op2.as_::<Vec>().has_element_index() {
4066                        if op_data.vector_op == 0 {
4067                            self.last_error = Some(AsmError::InvalidInstruction);
4068                            return;
4069                        }
4070
4071                        if op0.as_::<Reg>().reg_type() != op1.as_::<Reg>().reg_type()
4072                            || op1.as_::<Reg>().reg_type() != op2.as_::<Reg>().reg_type()
4073                        {
4074                            self.last_error = Some(AsmError::InvalidInstruction);
4075                            return;
4076                        }
4077
4078                        if op0.as_::<Vec>().element_type() as u32 != op_data.ta as u32
4079                            || op1.as_::<Vec>().element_type() as u32 != op_data.tb as u32
4080                            || op2.as_::<Vec>().element_type() as u32 != op_data.tb as u32
4081                        {
4082                            self.last_error = Some(AsmError::InvalidInstruction);
4083                            return;
4084                        }
4085
4086                        opcode.reset((op_data.vector_op as u32) << 10);
4087                        opcode.add_imm(q, 30);
4088                        emit_rd0_rn5_rm16!();
4089                    } else {
4090                        if op_data.element_op == 0 {
4091                            self.last_error = Some(AsmError::InvalidInstruction);
4092                            return;
4093                        }
4094
4095                        if op0.as_::<Reg>().reg_type() != op1.as_::<Reg>().reg_type()
4096                            || !op2.as_::<Reg>().is_vec128()
4097                        {
4098                            self.last_error = Some(AsmError::InvalidInstruction);
4099                            return;
4100                        }
4101
4102                        if op0.as_::<Vec>().element_type() as u32 != op_data.ta as u32
4103                            || op1.as_::<Vec>().element_type() as u32 != op_data.tb as u32
4104                            || op2.as_::<Vec>().element_type() as u32 != op_data.t_element as u32
4105                        {
4106                            self.last_error = Some(AsmError::InvalidInstruction);
4107                            return;
4108                        }
4109
4110                        let element_index = op2.as_::<Vec>().element_index();
4111                        let mut lmh = LMHImm {
4112                            lm: 0,
4113                            h: 0,
4114                            max_rm_id: 0,
4115                        };
4116                        if !encode_lmh(size, element_index, &mut lmh) {
4117                            self.last_error = Some(AsmError::InvalidOperand);
4118                            return;
4119                        }
4120
4121                        if op2.as_::<Reg>().id() > lmh.max_rm_id {
4122                            self.last_error = Some(AsmError::InvalidOperand);
4123                            return;
4124                        }
4125
4126                        opcode.reset((op_data.element_op as u32) << 10);
4127                        opcode.add_imm(q, 30);
4128                        opcode.add_imm(lmh.lm, 20);
4129                        opcode.add_imm(lmh.h, 11);
4130                        emit_rd0_rn5_rm16!();
4131                    }
4132                }
4133            }
4134
4135            Encoding::SimdDup => {
4136                simd_dup!();
4137            }
4138
4139            Encoding::SimdIns => {
4140                simd_insn!();
4141            }
4142
4143            Encoding::SimdMov => {
4144                if isign4 == enc_ops!(Reg, Reg) {
4145                    if op0.as_::<Reg>().is_vec() && op1.as_::<Reg>().is_vec() {
4146                        // INS v.x[index], v.x[index].
4147                        if op0.as_::<Vec>().has_element_index()
4148                            && op1.as_::<Vec>().has_element_index()
4149                        {
4150                            // SimdIns encoding.
4151
4152                            encoding = Encoding::SimdIns;
4153                            // Recurse to SimdIns.
4154                            simd_insn!();
4155                            return;
4156                        }
4157                        // DUP {b|h|s|d}, v.{b|h|s|d}[index].
4158                        if op1.as_::<Vec>().has_element_index() {
4159                            encoding = Encoding::SimdDup;
4160                            simd_dup!();
4161                            return;
4162                        }
4163                        if !check_signature!(op0, op1) {
4164                            self.last_error = Some(AsmError::InvalidInstruction);
4165                            return;
4166                        }
4167                        let q = op0.as_::<Reg>().reg_type() as u32 - RegType::Vec64 as u32;
4168                        if q > 1 {
4169                            self.last_error = Some(AsmError::InvalidInstruction);
4170                            return;
4171                        }
4172                        opcode.reset(0b0000111010100000000111 << 10);
4173                        opcode.add_imm(q, 30);
4174                        opcode.add_reg(op1.id(), 16);
4175                        emit_rd0_rn5!();
4176                        return;
4177                    }
4178                    if op0.as_::<Reg>().is_vec() && op1.as_::<Reg>().is_gp() {
4179                        // INS v.x[index], Rn.
4180                        if op0.as_::<Vec>().has_element_index() {
4181                            encoding = Encoding::SimdIns;
4182                            simd_insn!();
4183                            return;
4184                        }
4185                        self.last_error = Some(AsmError::InvalidInstruction);
4186                        return;
4187                    }
4188                    if op0.as_::<Reg>().is_gp() && op1.as_::<Reg>().is_vec() {
4189                        // UMOV Rd, V.{s|d}[index].
4190                        encoding_index = 1;
4191                        encoding = Encoding::SimdSmovUmov;
4192                        simd_umov!();
4193                        return;
4194                    }
4195                }
4196            }
4197
4198            Encoding::SimdMoviMvni => {
4199                let op_data = &SIMD_MOVI_MVNI[encoding_index];
4200                if isign4 == enc_ops!(Reg, Imm) || isign4 == enc_ops!(Reg, Imm, Imm) {
4201                    let mut size_op = element_type_to_size_op(
4202                        20,
4203                        op0.as_::<Reg>().reg_type(),
4204                        op0.as_::<Vec>().element_type(),
4205                    );
4206                    if !size_op.is_valid() {
4207                        self.last_error = Some(AsmError::InvalidInstruction);
4208                        return;
4209                    }
4210                    let mut imm64 = op1.as_::<Imm>().value_as::<u64>();
4211                    let mut imm8 = 0u32;
4212                    let mut cmode = 0u32;
4213                    let inverted = op_data.inverted;
4214                    let mut op = 0u32;
4215                    let mut shift = 0u32;
4216                    let mut shift_op = ShiftOp::LSL as u32;
4217                    if size_op.size() == 3 {
4218                        if op2.is_imm() && op2.as_::<Imm>().value() != 0 {
4219                            self.last_error = Some(AsmError::InvalidImmediate);
4220                            return;
4221                        }
4222                        if is_byte_mask_imm(imm64) {
4223                            imm8 = encode_imm64_byte_mask_to_imm8(imm64);
4224                        } else {
4225                            if (imm64 >> 32) == (imm64 & 0xFFFFFFFF) {
4226                                imm64 &= 0xFFFFFFFF;
4227                                size_op.decrement_size();
4228                            } else {
4229                                self.last_error = Some(AsmError::InvalidImmediate);
4230                                return;
4231                            }
4232                        }
4233                    }
4234                    if size_op.size() < 3 {
4235                        if imm64 > 0xFFFFFFFF {
4236                            self.last_error = Some(AsmError::InvalidImmediate);
4237                            return;
4238                        }
4239                        imm8 = imm64 as u32;
4240                        if size_op.size() == 2 {
4241                            if (imm8 >> 16) == (imm8 & 0xFFFF) {
4242                                imm8 >>= 16;
4243                                size_op.decrement_size();
4244                            }
4245                        }
4246                        if size_op.size() == 1 {
4247                            if imm8 > 0xFFFF {
4248                                self.last_error = Some(AsmError::InvalidImmediate);
4249                                return;
4250                            }
4251                            if (imm8 >> 8) == (imm8 & 0xFF) {
4252                                imm8 >>= 8;
4253                                size_op.decrement_size();
4254                            }
4255                        }
4256                        let max_shift = (8u32 << size_op.size()) - 8u32;
4257                        if op2.is_imm() {
4258                            if imm8 > 0xFF || op2.as_::<Imm>().value_as::<u64>() > max_shift as u64
4259                            {
4260                                self.last_error = Some(AsmError::InvalidImmediate);
4261                                return;
4262                            }
4263                            shift = op2.as_::<Imm>().value_as::<u32>();
4264                            shift_op = op2.as_::<Imm>().predicate();
4265                        } else if imm8 != 0 {
4266                            shift = imm8.trailing_zeros() & !0x7;
4267                            imm8 >>= shift;
4268                            if imm8 > 0xFF || shift > max_shift {
4269                                self.last_error = Some(AsmError::InvalidImmediate);
4270                                return;
4271                            }
4272                        }
4273                        if (shift & 0x7) != 0 {
4274                            self.last_error = Some(AsmError::InvalidImmediate);
4275                            return;
4276                        }
4277                    }
4278                    shift /= 8;
4279                    match size_op.size() {
4280                        0 => {
4281                            if shift_op != ShiftOp::LSL as u32 {
4282                                self.last_error = Some(AsmError::InvalidImmediate);
4283                                return;
4284                            }
4285                            if inverted != 0 {
4286                                imm8 = !imm8 & 0xFF;
4287                            }
4288                            cmode = B!(3) | B!(2) | B!(1);
4289                        }
4290                        1 => {
4291                            if shift_op != ShiftOp::LSL as u32 {
4292                                self.last_error = Some(AsmError::InvalidImmediate);
4293                                return;
4294                            }
4295                            cmode = B!(3) | (shift << 1);
4296                            op = inverted;
4297                        }
4298                        2 => {
4299                            if shift_op == ShiftOp::LSL as u32 {
4300                                cmode = shift << 1;
4301                            } else if shift_op == ShiftOp::MSL as u32 {
4302                                if shift == 0 || shift > 2 {
4303                                    self.last_error = Some(AsmError::InvalidImmediate);
4304                                    return;
4305                                }
4306                                cmode = B!(3) | B!(2) | (shift - 1);
4307                            } else {
4308                                self.last_error = Some(AsmError::InvalidImmediate);
4309                                return;
4310                            }
4311                            op = inverted;
4312                        }
4313                        3 => {
4314                            if inverted != 0 {
4315                                imm8 = !imm8 & 0xFF;
4316                            }
4317                            op = 1;
4318                            cmode = B!(3) | B!(2) | B!(1);
4319                        }
4320                        _ => {}
4321                    }
4322                    let abc = (imm8 >> 5) & 0x7;
4323                    let defgh = imm8 & 0x1F;
4324                    opcode.reset((op_data.opcode as u32) << 10);
4325                    opcode.add_imm(size_op.q(), 30);
4326                    opcode.add_imm(op, 29);
4327                    opcode.add_imm(abc, 16);
4328                    opcode.add_imm(cmode, 12);
4329                    opcode.add_imm(defgh, 5);
4330                    emit_rd0!();
4331                    return;
4332                }
4333            }
4334
4335            Encoding::SimdShift => {
4336                let op_data = &SIMD_SHIFT[encoding_index];
4337                let sop = significant_simd_op(op0, op1, inst_flags as u32);
4338                let size_op = element_type_to_size_op(
4339                    op_data.vec_op_type,
4340                    sop.as_::<Reg>().reg_type(),
4341                    sop.as_::<Vec>().element_type(),
4342                );
4343                if !size_op.is_valid() {
4344                    self.last_error = Some(AsmError::InvalidInstruction);
4345                    return;
4346                }
4347                if isign4 == enc_ops!(Reg, Reg, Imm) && op_data.immediate_op != 0 {
4348                    if !check_signature!(op0, op1, op2) {
4349                        self.last_error = Some(AsmError::InvalidInstruction);
4350                        return;
4351                    }
4352                    if op2.as_::<Imm>().value_as::<u64>() > 63 {
4353                        self.last_error = Some(AsmError::InvalidImmediate);
4354                        return;
4355                    }
4356                    let lsb_shift = size_op.size() + 3;
4357                    let lsb_mask = (1u32 << lsb_shift) - 1;
4358                    let mut imm = op2.as_::<Imm>().value_as::<u32>();
4359                    if op_data.inverted_imm != 0 {
4360                        if imm == 0 || imm > (1u32 << lsb_shift) {
4361                            self.last_error = Some(AsmError::InvalidImmediate);
4362                            return;
4363                        }
4364                        imm = (!imm + 1) & lsb_mask;
4365                    }
4366                    if imm > lsb_mask {
4367                        self.last_error = Some(AsmError::InvalidImmediate);
4368                        return;
4369                    }
4370                    imm |= 1u32 << lsb_shift;
4371                    opcode.reset((op_data.immediate_op as u32) << 10);
4372                    opcode.add_imm(size_op.qs(), 30);
4373                    opcode.add_imm(size_op.scalar(), 28);
4374                    opcode.add_imm(imm, 16);
4375                    emit_rd0_rn5!();
4376                    return;
4377                }
4378                if isign4 == enc_ops!(Reg, Reg, Reg) && op_data.register_op != 0 {
4379                    if !check_signature!(op0, op1, op2) {
4380                        self.last_error = Some(AsmError::InvalidInstruction);
4381                        return;
4382                    }
4383                    opcode.reset((op_data.register_op as u32) << 10);
4384                    opcode.add_imm(size_op.qs(), 30);
4385                    opcode.add_imm(size_op.scalar(), 28);
4386                    opcode.add_imm(size_op.size(), 22);
4387                    emit_rd0_rn5_rm16!();
4388                    return;
4389                }
4390            }
4391
4392            Encoding::SimdShiftES => {
4393                let op_data = &SIMD_SHIFT_ES[encoding_index];
4394                if isign4 == enc_ops!(Reg, Reg, Imm) {
4395                    let size_op = element_type_to_size_op(
4396                        op_data.vec_op_type,
4397                        op1.as_::<Reg>().reg_type(),
4398                        op1.as_::<Vec>().element_type(),
4399                    );
4400                    if !size_op.is_valid() {
4401                        self.last_error = Some(AsmError::InvalidInstruction);
4402                        return;
4403                    }
4404                    if !check_signature!(op0, op1) {
4405                        self.last_error = Some(AsmError::InvalidInstruction);
4406                        return;
4407                    }
4408                    let shift = op2.as_::<Imm>().value_as::<u64>();
4409                    let shift_op = op2.as_::<Imm>().predicate();
4410                    if shift != (8u64 << size_op.size()) || shift_op != ShiftOp::LSL as u32 {
4411                        self.last_error = Some(AsmError::InvalidImmediate);
4412                        return;
4413                    }
4414                    opcode.reset((op_data.opcode as u32) << 10);
4415                    opcode.add_imm(size_op.q(), 30);
4416                    opcode.add_imm(size_op.size(), 22);
4417                    emit_rd0_rn5!();
4418                    return;
4419                }
4420            }
4421
4422            Encoding::SimdSm3tt => {
4423                let op_data = &SIMD_SM3TT[encoding_index];
4424                if isign4 == enc_ops!(Reg, Reg, Reg) {
4425                    if op0.as_::<Vec>().is_vec_s4()
4426                        && op1.as_::<Vec>().is_vec_s4()
4427                        && op2.as_::<Vec>().is_vec_s4()
4428                        && op2.as_::<Vec>().has_element_index()
4429                    {
4430                        let imm2 = op2.as_::<Vec>().element_index();
4431                        if imm2 > 3 {
4432                            self.last_error = Some(AsmError::InvalidOperand);
4433                            return;
4434                        }
4435                        opcode.reset((op_data.opcode as u32) << 10);
4436                        opcode.add_imm(imm2, 12);
4437                        emit_rd0_rn5_rm16!();
4438                        return;
4439                    }
4440                }
4441            }
4442
4443            Encoding::SimdSmovUmov => {
4444                simd_umov!();
4445            }
4446
4447            Encoding::SimdSxtlUxtl => {
4448                let op_data = &SIMD_SXTL_UXTL[encoding_index];
4449                if isign4 == enc_ops!(Reg, Reg) {
4450                    let size_op = element_type_to_size_op(
4451                        op_data.vec_op_type,
4452                        op1.as_::<Reg>().reg_type(),
4453                        op1.as_::<Vec>().element_type(),
4454                    );
4455                    if !size_op.is_valid() {
4456                        self.last_error = Some(AsmError::InvalidInstruction);
4457                        return;
4458                    }
4459                    if !check_signature!(op0, op1) {
4460                        self.last_error = Some(AsmError::InvalidInstruction);
4461                        return;
4462                    }
4463                    opcode.reset((op_data.opcode as u32) << 10);
4464                    opcode.add_imm(size_op.q(), 30);
4465                    opcode.add_imm(1u32, size_op.size() + 19);
4466                    emit_rd0_rn5!();
4467                    return;
4468                }
4469            }
4470
4471            Encoding::SimdTblTbx => {
4472                let op_data = &SIMD_TBL_TBX[encoding_index];
4473                if isign4 == enc_ops!(Reg, Reg, Reg) || isign4 == enc_ops!(Reg, Reg, Reg, Reg) {
4474                    opcode.reset((op_data.opcode as u32) << 10);
4475
4476                    let q = op0.as_::<Reg>().reg_type() as u32 - RegType::Vec64 as u32;
4477                    if q > 1 || op0.as_::<Vec>().has_element_index() {
4478                        self.last_error = Some(AsmError::InvalidInstruction);
4479                        return;
4480                    }
4481                    if !op1.as_::<Vec>().is_vec_b16() || op1.as_::<Vec>().has_element_index() {
4482                        self.last_error = Some(AsmError::InvalidInstruction);
4483                        return;
4484                    }
4485                    let len =
4486                        (!op3.is_none() as u32) + (!op4.is_none() as u32) + (!op5.is_none() as u32);
4487                    opcode.add_imm(q, 30);
4488                    opcode.add_imm(len, 13);
4489
4490                    match len {
4491                        0 => {
4492                            if !check_signature!(op0, op2) {
4493                                self.last_error = Some(AsmError::InvalidInstruction);
4494                                return;
4495                            }
4496                            if op2.id() > 31 {
4497                                self.last_error = Some(AsmError::InvalidOperand);
4498                                return;
4499                            }
4500                            opcode.add_reg(op2.id(), 16);
4501                            emit_rd0_rn5!();
4502                            return;
4503                        }
4504                        1 => {
4505                            if !check_signature!(op0, op3) {
4506                                self.last_error = Some(AsmError::InvalidInstruction);
4507                                return;
4508                            }
4509                            if op3.id() > 31 {
4510                                self.last_error = Some(AsmError::InvalidOperand);
4511                                return;
4512                            }
4513                            opcode.add_reg(op3.id(), 16);
4514                            emit_rd0_rn5!();
4515                            return;
4516                        }
4517                        2 => {
4518                            if !check_signature!(op0, op4) {
4519                                self.last_error = Some(AsmError::InvalidInstruction);
4520                                return;
4521                            }
4522                            if op4.id() > 31 {
4523                                self.last_error = Some(AsmError::InvalidOperand);
4524                                return;
4525                            }
4526                            opcode.add_reg(op4.id(), 16);
4527                            emit_rd0_rn5!();
4528                            return;
4529                        }
4530                        3 => {
4531                            if !check_signature!(op0, op5) {
4532                                self.last_error = Some(AsmError::InvalidInstruction);
4533                                return;
4534                            }
4535                            if op5.id() > 31 {
4536                                self.last_error = Some(AsmError::InvalidOperand);
4537                                return;
4538                            }
4539                            opcode.add_reg(op5.id(), 16);
4540                            emit_rd0_rn5!();
4541                            return;
4542                        }
4543                        _ => {
4544                            self.last_error = Some(AsmError::InvalidInstruction);
4545                            return;
4546                        }
4547                    }
4548                }
4549            }
4550
4551            Encoding::SimdLdSt => {
4552                let op_data = &SIMD_LD_ST[encoding_index];
4553                if isign4 == enc_ops!(Reg, Mem) {
4554                    let m = op1.as_::<Mem>();
4555                    rm_rel = op1;
4556
4557                    let xsz = op0.as_::<Reg>().reg_type() as u32 - RegType::Vec8 as u32;
4558                    if xsz > 4 || op0.as_::<Vec>().has_element_index() {
4559                        self.last_error = Some(AsmError::InvalidOperand);
4560                        return;
4561                    }
4562
4563                    if !check_vec_id(op0) {
4564                        self.last_error = Some(AsmError::InvalidOperand);
4565                        return;
4566                    }
4567
4568                    // TODO: check_mem_base_index_rel(m)
4569                    let offset = m.offset();
4570                    if m.has_base_reg() {
4571                        if m.has_index() {
4572                            let opt = SHIFT_OP_TO_LD_ST_OP_MAP[m.shift_op() as usize];
4573                            if opt == 0xFF {
4574                                self.last_error = Some(AsmError::InvalidOperand);
4575                                return;
4576                            }
4577                            let shift = m.shift();
4578                            let s = if shift != 0 { 1 } else { 0 };
4579                            if s != 0 && shift != xsz {
4580                                self.last_error = Some(AsmError::InvalidOperand);
4581                                return;
4582                            }
4583                            opcode.reset((op_data.register_op as u32) << 21);
4584                            opcode.add_imm(xsz & 3, 30);
4585                            opcode.add_imm(xsz >> 2, 23);
4586                            opcode.add_imm(opt as u32, 13);
4587                            opcode.add_imm(s, 12);
4588                            opcode.0 |= 1 << 11;
4589                            opcode.add_reg(op0.id(), 0);
4590                            // Emit mem base index
4591                            opcode.add_reg(m.base_id(), 5);
4592                            opcode.add_reg(m.index_id(), 16);
4593                            return self.buffer.write_u32(opcode.get());
4594                        }
4595
4596                        let offset32 = offset as i32;
4597                        if m.is_pre_or_post() {
4598                            if offset32 < -256 || offset32 > 255 {
4599                                self.last_error = Some(AsmError::InvalidOperand);
4600                                return;
4601                            }
4602                            opcode.reset((op_data.pre_post_op as u32) << 21);
4603                            opcode.add_imm(xsz & 3, 30);
4604                            opcode.add_imm(xsz >> 2, 23);
4605                            opcode.add_imm((offset32 as u32) & 0x1FF, 12);
4606                            opcode.add_imm(m.is_pre_index() as u32, 11);
4607                            opcode.0 |= 1 << 10;
4608                            opcode.add_reg(op0.id(), 0);
4609                            opcode.add_reg(m.base_id(), 5);
4610                            return self.buffer.write_u32(opcode.get());
4611                        } else {
4612                            let imm12 = (offset32 as u32) >> xsz;
4613                            if imm12 >= (1 << 12) || ((imm12 << xsz) as i32) != offset32 {
4614                                // Fallback to SimdLdurStur
4615                                let op_data_ldur = &SIMD_LDUR_STUR[encoding_index];
4616                                if m.has_base_reg() && !m.has_index() && !m.is_pre_or_post() {
4617                                    if offset32 < -256 || offset32 > 255 {
4618                                        self.last_error = Some(AsmError::InvalidOperand);
4619                                        return;
4620                                    }
4621                                    opcode.reset((op_data_ldur.opcode as u32) << 10);
4622                                    opcode.add_imm(xsz & 3, 30);
4623                                    opcode.add_imm(xsz >> 2, 23);
4624                                    opcode.add_imm((offset32 as u32) & 0x1FF, 12);
4625                                    opcode.add_reg(op0.id(), 0);
4626                                    opcode.add_reg(m.base_id(), 5);
4627                                    return self.buffer.write_u32(opcode.get());
4628                                }
4629                                self.last_error = Some(AsmError::InvalidOperand);
4630                                return;
4631                            }
4632                            opcode.reset((op_data.u_offset_op as u32) << 22);
4633                            opcode.add_imm(xsz & 3, 30);
4634                            opcode.add_imm(xsz >> 2, 23);
4635                            opcode.add_imm(imm12, 10);
4636                            opcode.add_reg(op0.id(), 0);
4637                            opcode.add_reg(m.base_id(), 5);
4638                            return self.buffer.write_u32(opcode.get());
4639                        }
4640                    } else {
4641                        if op_data.literal_op == 0 {
4642                            self.last_error = Some(AsmError::InvalidOperand);
4643                            return;
4644                        }
4645                        if xsz < 2 {
4646                            self.last_error = Some(AsmError::InvalidOperand);
4647                            return;
4648                        }
4649                        let opc = xsz - 2;
4650                        opcode.reset((op_data.literal_op as u32) << 24);
4651                        opcode.add_imm(opc, 30);
4652                        opcode.add_reg(op0.id(), 0);
4653                        offset_format.reset_to_imm_type(OffsetType::SignedOffset, 4, 5, 19, 2);
4654                        rm_rel = op1;
4655                        emit_rel!();
4656                    }
4657                }
4658            }
4659
4660            Encoding::SimdLdpStp => {
4661                let op_data = &SIMD_LDP_STP[encoding_index];
4662                if isign4 == enc_ops!(Reg, Reg, Mem) {
4663                    let m = op2.as_::<Mem>();
4664                    rm_rel = op2;
4665
4666                    let opc = op0.as_::<Reg>().reg_type() as u32 - RegType::Vec32 as u32;
4667                    if opc > 2
4668                        || op0.as_::<Vec>().has_element_type()
4669                        || op0.as_::<Vec>().has_element_index()
4670                    {
4671                        self.last_error = Some(AsmError::InvalidInstruction);
4672                        return;
4673                    }
4674                    if !check_signature!(op0, op1) {
4675                        self.last_error = Some(AsmError::InvalidInstruction);
4676                        return;
4677                    }
4678                    if !check_vec_id2(op0, op1) {
4679                        self.last_error = Some(AsmError::InvalidOperand);
4680                        return;
4681                    }
4682                    if m.base_type() != RegType::Gp64 || m.has_index() {
4683                        self.last_error = Some(AsmError::InvalidOperand);
4684                        return;
4685                    }
4686                    let offset_shift = 2 + opc;
4687                    let offset32 = m.offset() as i32 >> offset_shift;
4688                    if ((offset32 << offset_shift) as i32) != m.offset() as i32 {
4689                        self.last_error = Some(AsmError::InvalidOperand);
4690                        return;
4691                    }
4692                    if offset32 < -64 || offset32 > 63 {
4693                        self.last_error = Some(AsmError::InvalidOperand);
4694                        return;
4695                    }
4696                    if m.is_pre_or_post() && offset32 != 0 {
4697                        if op_data.pre_post_op == 0 {
4698                            self.last_error = Some(AsmError::InvalidOperand);
4699                            return;
4700                        }
4701                        opcode.reset((op_data.pre_post_op as u32) << 22);
4702                        opcode.add_imm(m.is_pre_index() as u32, 24);
4703                    } else {
4704                        opcode.reset((op_data.offset_op as u32) << 22);
4705                    }
4706                    opcode.add_imm(opc, 30);
4707                    opcode.add_imm((offset32 as u32) & 0x7F, 15);
4708                    opcode.add_reg(op1.id(), 10);
4709                    opcode.add_reg(op0.id(), 0);
4710                    opcode.add_reg(m.base_id(), 5);
4711                    return self.buffer.write_u32(opcode.get());
4712                }
4713            }
4714
4715            Encoding::SimdLdurStur => {
4716                let op_data = &SIMD_LDUR_STUR[encoding_index];
4717                if isign4 == enc_ops!(Reg, Mem) {
4718                    let m = op1.as_::<Mem>();
4719                    rm_rel = op1;
4720
4721                    let sz = op0.as_::<Reg>().reg_type() as u32 - RegType::Vec8 as u32;
4722                    if sz > 4
4723                        || op0.as_::<Vec>().has_element_type()
4724                        || op0.as_::<Vec>().has_element_index()
4725                    {
4726                        self.last_error = Some(AsmError::InvalidInstruction);
4727                        return;
4728                    }
4729                    if !check_vec_id(op0) {
4730                        self.last_error = Some(AsmError::InvalidOperand);
4731                        return;
4732                    }
4733                    if m.has_base_reg() && !m.has_index() && !m.is_pre_or_post() {
4734                        let offset32 = m.offset() as i32;
4735                        if offset32 < -256 || offset32 > 255 {
4736                            self.last_error = Some(AsmError::InvalidOperand);
4737                            return;
4738                        }
4739                        opcode.reset((op_data.opcode as u32) << 10);
4740                        opcode.add_imm(sz & 3, 30);
4741                        opcode.add_imm(sz >> 2, 23);
4742                        opcode.add_imm((offset32 as u32) & 0x1FF, 12);
4743                        opcode.add_reg(op0.id(), 0);
4744                        opcode.add_reg(m.base_id(), 5);
4745                        return self.buffer.write_u32(opcode.get());
4746                    }
4747                    self.last_error = Some(AsmError::InvalidOperand);
4748                    return;
4749                }
4750            }
4751
4752            Encoding::SimdLdNStN => {
4753                let op_data = &SIMD_LD_N_ST_N[encoding_index];
4754                let o4 = *ops.get(4).unwrap_or(&NOREG);
4755
4756                let mut n = 1;
4757
4758                if isign4 == enc_ops!(Reg, Mem) {
4759                    if op_data.n != 1 {
4760                        self.last_error = Some(AsmError::InvalidInstruction);
4761                        return;
4762                    }
4763                    rm_rel = op1;
4764                } else if isign4 == enc_ops!(Reg, Reg, Mem) {
4765                    if op_data.n != 1 && op_data.n != 2 {
4766                        self.last_error = Some(AsmError::InvalidInstruction);
4767                        return;
4768                    }
4769                    if !check_signature!(op0, op1) || op0.id() + 1 != op1.id() {
4770                        self.last_error = Some(AsmError::InvalidInstruction);
4771                        return;
4772                    }
4773                    n = 2;
4774                    rm_rel = op2;
4775                } else if isign4 == enc_ops!(Reg, Reg, Reg, Mem) && o4.is_none() {
4776                    if op_data.n != 1 && op_data.n != 3 {
4777                        self.last_error = Some(AsmError::InvalidInstruction);
4778                        return;
4779                    }
4780                    if !check_signature!(op0, op1, op2)
4781                        || op0.id() + 1 != op1.id()
4782                        || op1.id() + 1 != op2.id()
4783                    {
4784                        self.last_error = Some(AsmError::InvalidInstruction);
4785                        return;
4786                    }
4787                    n = 3;
4788                    rm_rel = op3;
4789                } else if isign4 == enc_ops!(Reg, Reg, Reg, Reg) && o4.is_mem() {
4790                    if op_data.n != 1 && op_data.n != 4 {
4791                        self.last_error = Some(AsmError::InvalidInstruction);
4792                        return;
4793                    }
4794                    if !check_signature!(op0, op1, op2, op3)
4795                        || op0.id() + 1 != op1.id()
4796                        || op1.id() + 1 != op2.id()
4797                        || op2.id() + 1 != op3.id()
4798                    {
4799                        self.last_error = Some(AsmError::InvalidInstruction);
4800                        return;
4801                    }
4802                    n = 4;
4803                    rm_rel = &o4;
4804                } else {
4805                    self.last_error = Some(AsmError::InvalidInstruction);
4806                    return;
4807                }
4808
4809                let v = op0.as_::<Vec>();
4810                let m = rm_rel.as_::<Mem>();
4811
4812                let mut q = 0u32;
4813                let mut rm = 0u32;
4814                let mut rn = m.base_id();
4815                let sz = (v.element_type() as u32).wrapping_sub(VecElementType::B as u32);
4816                let mut opc_s_size = sz;
4817                let mut offset_possibility = 0u32;
4818
4819                if sz > 3 {
4820                    self.last_error = Some(AsmError::InvalidInstruction);
4821                    return;
4822                }
4823
4824                if m.base_type() != RegType::Gp64 {
4825                    self.last_error = Some(AsmError::InvalidOperand);
4826                    return;
4827                }
4828
4829                if rn > 30 && rn != Gp::ID_SP {
4830                    self.last_error = Some(AsmError::InvalidOperand);
4831                    return;
4832                }
4833
4834                rn &= 31;
4835
4836                if op_data.replicate != 0 {
4837                    if n != op_data.n {
4838                        self.last_error = Some(AsmError::InvalidInstruction);
4839                        return;
4840                    }
4841                    if v.has_element_index() {
4842                        self.last_error = Some(AsmError::InvalidInstruction);
4843                        return;
4844                    }
4845                    q = (v.reg_type() as u32).wrapping_sub(RegType::Vec64 as u32);
4846                    if q > 1 {
4847                        self.last_error = Some(AsmError::InvalidInstruction);
4848                        return;
4849                    }
4850                    opcode.reset((op_data.single_op as u32) << 10);
4851                    offset_possibility = (1u32 << sz) * n;
4852                } else if v.has_element_index() {
4853                    if n != op_data.n {
4854                        self.last_error = Some(AsmError::InvalidInstruction);
4855                        return;
4856                    }
4857                    const OPC_S_SIZE_BY_SZ_TABLE: [u32; 4] =
4858                        [0u32 << 3, 2u32 << 3, 4u32 << 3, (4u32 << 3) | 1u32];
4859                    opcode.reset((op_data.single_op as u32) << 10);
4860                    opc_s_size = OPC_S_SIZE_BY_SZ_TABLE[sz as usize];
4861                    offset_possibility = (1u32 << sz) * op_data.n;
4862                    let element_index = v.element_index();
4863                    let max_element_index = 15u32 >> sz;
4864                    if element_index > max_element_index {
4865                        self.last_error = Some(AsmError::InvalidOperand);
4866                        return;
4867                    }
4868                    let element_index_shifted = element_index << sz;
4869                    q = element_index_shifted >> 3;
4870                    opc_s_size |= element_index_shifted & 0x7;
4871                } else {
4872                    const OPC_S_SIZE_BY_N_TABLE: [u32; 5] =
4873                        [0u32, 0x7u32 << 2, 0xAu32 << 2, 0x6u32 << 2, 0x2u32 << 2];
4874                    q = (v.reg_type() as u32).wrapping_sub(RegType::Vec64 as u32);
4875                    if q > 1 {
4876                        self.last_error = Some(AsmError::InvalidInstruction);
4877                        return;
4878                    }
4879                    if op_data.n == 1 {
4880                        opc_s_size |= OPC_S_SIZE_BY_N_TABLE[n as usize];
4881                    }
4882                    opcode.reset((op_data.multiple_op as u32) << 10);
4883                    offset_possibility = (8u32 << q) * n;
4884                }
4885
4886                if m.has_index() {
4887                    if m.has_offset() || !m.is_post_index() {
4888                        self.last_error = Some(AsmError::InvalidOperand);
4889                        return;
4890                    }
4891                    rm = m.index_id();
4892                    if rm > 30 {
4893                        self.last_error = Some(AsmError::InvalidOperand);
4894                        return;
4895                    }
4896                    opcode.0 |= 1 << 23;
4897                } else {
4898                    if m.has_offset() {
4899                        if m.offset() != offset_possibility as i64 || !m.is_post_index() {
4900                            self.last_error = Some(AsmError::InvalidOperand);
4901                            return;
4902                        }
4903                        rm = 31;
4904                        opcode.0 |= 1 << 23;
4905                    }
4906                }
4907
4908                opcode.add_imm(q, 30);
4909                opcode.add_imm(rm, 16);
4910                opcode.add_imm(opc_s_size, 10);
4911                opcode.add_imm(rn, 5);
4912
4913                opcode.add_reg(op0.id(), 0);
4914                return self.buffer.write_u32(opcode.get());
4915            }
4916
4917            Encoding::None | Encoding::Count => (),
4918        }
4919
4920        self.last_error = Some(AsmError::UnsupportedInstruction {
4921            reason: "Unsupported instruction encoding or operand types",
4922        });
4923    }
4924}
4925
4926impl InstId {
4927    pub const ARM_COND: u32 = 0x78000000;
4928    pub const REAL_ID: u32 = 65535;
4929    pub const fn with_cc(self, cond: CondCode) -> u32 {
4930        let x = self as u32;
4931        x | (cond as u32) << Self::ARM_COND.trailing_zeros()
4932    }
4933
4934    pub const fn extract_cc(inst: u32) -> CondCode {
4935        unsafe {
4936            core::mem::transmute(((inst & Self::ARM_COND) >> Self::ARM_COND.trailing_zeros()) as u8)
4937        }
4938    }
4939
4940    pub const fn extract_real_id(inst: u32) -> u32 {
4941        inst & Self::REAL_ID
4942    }
4943}
4944
4945pub const BLE: u32 = InstId::B.with_cc(CondCode::GE);
4946
4947impl Into<u32> for InstId {
4948    fn into(self) -> u32 {
4949        self as u32
4950    }
4951}
4952
4953#[derive(Copy, Clone, PartialEq, Eq, Debug)]
4954#[repr(transparent)]
4955struct Opc(u32);
4956
4957impl Opc {
4958    const N: u32 = 1 << 2;
4959    const Q: u32 = 1 << 30;
4960    const X: u32 = 1 << 31;
4961
4962    pub fn reset(&mut self, value: u32) {
4963        self.0 = value;
4964    }
4965
4966    pub fn get(&self) -> u32 {
4967        self.0
4968    }
4969
4970    pub const fn has_q(&self) -> bool {
4971        (self.0 & Self::Q) != 0
4972    }
4973    pub const fn has_x(&self) -> bool {
4974        (self.0 & Self::X) != 0
4975    }
4976
4977    pub fn add_imm(&mut self, value: u32, bit_index: u32) -> &mut Self {
4978        self.0 |= value << bit_index;
4979        self
4980    }
4981
4982    pub fn xor_imm(&mut self, value: u32, bit_index: u32) -> &mut Self {
4983        self.0 ^= value << bit_index;
4984        self
4985    }
4986
4987    pub fn add_if(&mut self, condition: bool, value: u32, bit_index: u32) -> &mut Self {
4988        if condition {
4989            self.0 |= value << bit_index;
4990        }
4991        self
4992    }
4993
4994    pub fn add_logical_imm(&mut self, logical_imm: &LogicalImm) -> &mut Self {
4995        self.add_imm(logical_imm.n, 22)
4996            .add_imm(logical_imm.s, 10)
4997            .add_imm(logical_imm.r, 16);
4998        self
4999    }
5000
5001    pub fn add_reg(&mut self, id: u32, bit_index: u32) -> &mut Self {
5002        self.0 |= (id & 31) << bit_index;
5003        self
5004    }
5005}
5006
5007impl core::ops::BitOr<u32> for Opc {
5008    type Output = Self;
5009
5010    fn bitor(self, rhs: u32) -> Self::Output {
5011        Self(self.0 | rhs)
5012    }
5013}
5014
5015impl core::ops::BitOrAssign<u32> for Opc {
5016    fn bitor_assign(&mut self, rhs: u32) {
5017        self.0 |= rhs;
5018    }
5019}
5020
5021impl core::ops::BitAnd<u32> for Opc {
5022    type Output = Self;
5023
5024    fn bitand(self, rhs: u32) -> Self::Output {
5025        Self(self.0 & rhs)
5026    }
5027}
5028
5029impl core::ops::BitAndAssign<u32> for Opc {
5030    fn bitand_assign(&mut self, rhs: u32) {
5031        self.0 &= rhs;
5032    }
5033}
5034
5035impl core::ops::Not for Opc {
5036    type Output = Self;
5037
5038    fn not(self) -> Self::Output {
5039        Self(!self.0)
5040    }
5041}
5042
5043impl core::ops::BitXor<u32> for Opc {
5044    type Output = Self;
5045
5046    fn bitxor(self, rhs: u32) -> Self::Output {
5047        Self(self.0 ^ rhs)
5048    }
5049}
5050
5051impl core::ops::BitXorAssign<u32> for Opc {
5052    fn bitxor_assign(&mut self, rhs: u32) {
5053        self.0 ^= rhs;
5054    }
5055}
5056
5057impl core::ops::Shl<u32> for Opc {
5058    type Output = Self;
5059
5060    fn shl(self, rhs: u32) -> Self::Output {
5061        Self(self.0 << rhs)
5062    }
5063}
5064
5065impl core::ops::ShlAssign<u32> for Opc {
5066    fn shl_assign(&mut self, rhs: u32) {
5067        self.0 <<= rhs;
5068    }
5069}
5070
5071impl core::ops::Shr<u32> for Opc {
5072    type Output = Self;
5073
5074    fn shr(self, rhs: u32) -> Self::Output {
5075        Self(self.0 >> rhs)
5076    }
5077}
5078
5079impl core::ops::ShrAssign<u32> for Opc {
5080    fn shr_assign(&mut self, rhs: u32) {
5081        self.0 >>= rhs;
5082    }
5083}
5084
5085#[derive(Copy, Clone, PartialEq, Eq, Debug)]
5086pub struct LogicalImm {
5087    pub n: u32,
5088    pub s: u32,
5089    pub r: u32,
5090}
5091
5092fn check_gp_type(op: &Operand, allowed: u32) -> bool {
5093    let typ = op.as_::<Reg>().typ() as u32;
5094    let mask = allowed << RegType::Gp32 as u32;
5095    bit_test(mask, typ)
5096}
5097
5098fn check_gp_typex(op: &Operand, allowed: u32, x: &mut u32) -> bool {
5099    let typ = op.as_::<Reg>().typ() as u32;
5100    *x = (typ - RegType::Gp32 as u32) & allowed;
5101    bit_test(allowed << RegType::Gp32 as u32, typ)
5102}
5103
5104fn check_gp_typex2(o0: &Operand, o1: &Operand, allowed: u32, x: &mut u32) -> bool {
5105    check_gp_typex(o0, allowed, x) && check_signature!(o0, o1)
5106}
5107
5108fn check_gp_typex3(o0: &Operand, o1: &Operand, o2: &Operand, allowed: u32, x: &mut u32) -> bool {
5109    check_gp_typex(o0, allowed, x) && check_signature!(o0, o1, o2)
5110}
5111
5112fn check_gp_id(op: &Operand, hi_id: u32) -> bool {
5113    op.id() < 31 || op.id() == hi_id
5114}
5115
5116fn check_gp_id2(o0: &Operand, o1: &Operand, hi_id: u32) -> bool {
5117    let id0 = o0.id();
5118    let id1 = o1.id();
5119    (id0 < 31 || id0 == hi_id) && (id1 < 31 || id1 == hi_id)
5120}
5121
5122fn check_gp_id3(o0: &Operand, o1: &Operand, o2: &Operand, hi_id: u32) -> bool {
5123    let id0 = o0.id();
5124    let id1 = o1.id();
5125    let id2 = o2.id();
5126    (id0 < 31 || id0 == hi_id) && (id1 < 31 || id1 == hi_id) && (id2 < 31 || id2 == hi_id)
5127}
5128
5129fn check_vec_id(o0: &Operand) -> bool {
5130    let id = o0.id();
5131    id < 31
5132}
5133
5134fn check_vec_id2(o0: &Operand, o1: &Operand) -> bool {
5135    let id0 = o0.id();
5136    let id1 = o1.id();
5137    id0 < 31 && id1 < 31
5138}
5139
5140fn check_vec_id3(o0: &Operand, o1: &Operand, o2: &Operand) -> bool {
5141    let id0 = o0.id();
5142    let id1 = o1.id();
5143    let id2 = o2.id();
5144    id0 < 31 && id1 < 31 && id2 < 31
5145}
5146
5147fn bit_test(value: u32, n: u32) -> bool {
5148    /*
5149        SUPPORT_INLINE constexpr bool bit_test(const T& value, const N& n) noexcept {
5150      return (as_std_uint(value) & (as_std_uint(T(1)) << as_std_uint(n))) != 0u;
5151    } */
5152    value & (1 << n) != 0
5153}
5154
5155fn encode_mov_sequence64(out: &mut [u32; 4], mut imm: u64, rd: u32, x: u32) -> usize {
5156    const MOVZ: u32 = 0b11010010100000000000000000000000;
5157    const MOVN: u32 = 0b10010010100000000000000000000000;
5158    const MOVK: u32 = 0b11110010100000000000000000000000;
5159
5160    if imm <= 0xFFFFFFFF {
5161        return encode_mov_sequence32(out, imm as u32, rd, x);
5162    }
5163
5164    let zhw = count_zero_half_words_64(imm);
5165    let ohw = count_zero_half_words_64(!imm);
5166
5167    if zhw >= ohw {
5168        let mut op = MOVZ;
5169        let mut count = 0;
5170        for hw_index in 0..4 {
5171            let hw_imm = (imm & 0xFFFF) as u32;
5172            if hw_imm == 0 {
5173                imm >>= 16;
5174                continue;
5175            }
5176            out[count] = op | (hw_index << 21) | (hw_imm << 5) | rd;
5177            op = MOVK;
5178            count += 1;
5179
5180            imm >>= 16;
5181        }
5182
5183        return count;
5184    }
5185
5186    let mut op = MOVN;
5187    let mut count = 0;
5188    let mut neg_mask = 0xFFFF;
5189
5190    for hw_index in 0..4 {
5191        let hw_imm = (imm & 0xFFFF) as u32;
5192        if hw_imm == 0xFFFF {
5193            imm >>= 16;
5194            continue;
5195        }
5196
5197        out[count] = op | (hw_index << 21) | ((hw_imm ^ neg_mask) << 5) | rd;
5198        count += 1;
5199        op = MOVK;
5200        neg_mask = 0;
5201        imm >>= 16;
5202    }
5203
5204    count
5205}
5206
5207fn encode_mov_sequence32(out: &mut [u32], imm: u32, rd: u32, x: u32) -> usize {
5208    let movz = 0b11010010100000000000000000000000 | (x << 31);
5209    let movn = 0b10010010100000000000000000000000;
5210    let movk = 0b11110010100000000000000000000000;
5211    if (imm & 0xFFFF0000) == 0 {
5212        out[0] = movz | (0 << 21) | ((imm & 0xffff) << 5) | rd;
5213        return 1;
5214    }
5215
5216    if (imm & 0xFFFF0000) == 0xFFFF0000 {
5217        out[0] = movn | (0 << 21) | ((!imm & 0xFFFF) << 5) | rd;
5218        return 1;
5219    }
5220
5221    if (imm & 0x0000FFFF) == 0x00000000 {
5222        out[0] = movz | (1 << 21) | ((imm >> 16) << 5) | rd;
5223        return 1;
5224    }
5225
5226    if (imm & 0x0000FFFF) == 0x0000FFFF {
5227        out[0] = movn | (1 << 21) | ((!imm >> 16) << 5) | rd;
5228        return 1;
5229    }
5230
5231    out[0] = movz | (0 << 21) | ((imm & 0xFFFF) << 5) | rd;
5232    out[1] = movk | (1 << 21) | ((imm >> 16) << 5) | rd;
5233    return 2;
5234}
5235
5236pub const fn count_zero_half_words_64(imm: u64) -> u32 {
5237    let mut count = 0;
5238    if (imm & 0x000000000000FFFF) == 0 {
5239        count += 1;
5240    }
5241    if (imm & 0x00000000FFFF0000) == 0 {
5242        count += 1;
5243    }
5244    if (imm & 0x0000FFFF00000000) == 0 {
5245        count += 1;
5246    }
5247    if (imm & 0xFFFF000000000000) == 0 {
5248        count += 1;
5249    }
5250    count
5251}
5252
5253/// Encodes the given `imm` value of the given `width` to a logical immediate value represented as N, S, and R fields
5254/// and writes these fields to `out`.
5255///
5256/// Encoding Table:
5257///
5258/// ```text
5259/// +---+--------+--------+------+
5260/// | N |  ImmS  |  ImmR  | Size |
5261/// +---+--------+--------+------+
5262/// | 1 | ssssss | rrrrrr |  64  |
5263/// | 0 | 0sssss | .rrrrr |  32  |
5264/// | 0 | 10ssss | ..rrrr |  16  |
5265/// | 0 | 110sss | ...rrr |  8   |
5266/// | 0 | 1110ss | ....rr |  4   |
5267/// | 0 | 11110s | .....r |  2   |
5268/// +---+--------+--------+------+
5269/// ```
5270pub const fn encode_logical_imm(mut imm: u64, mut width: u32) -> Option<LogicalImm> {
5271    loop {
5272        width /= 2;
5273        let mask = (1u64 << width) - 1;
5274        if (imm & mask) != (imm >> width) & mask {
5275            width *= 2;
5276            break;
5277        }
5278        if width <= 2 {
5279            break;
5280        }
5281    }
5282
5283    let lsb_mask = lsb_mask::<u64>(width);
5284    imm &= lsb_mask;
5285
5286    // Patterns of all zeros and all ones are not encodable.
5287    if imm == 0 || lsb_mask == imm {
5288        return None;
5289    }
5290
5291    // Inspect the pattern and get the most important bit indexes.
5292    //
5293    //         o_index <-+      +-> z_index
5294    //                  |      |
5295    // |..zeros..|o_count|z_count|..ones..|
5296    // |000000000|111111|000000|11111111|
5297    let z_index = (!imm).trailing_zeros();
5298    let z_imm = imm ^ ((1 << z_index) - 1);
5299    let z_count = if z_imm != 0 {
5300        z_imm.trailing_zeros()
5301    } else {
5302        width
5303    } - z_index;
5304
5305    let o_index = z_index + z_count;
5306    let o_imm = !(z_imm ^ ((1 << o_index) - 1));
5307    let o_count = if o_imm != 0 {
5308        o_imm.trailing_zeros()
5309    } else {
5310        width
5311    } - o_index;
5312
5313    let must_be_zero = o_imm ^ !((1 << (o_index + o_count)) - 1);
5314    if must_be_zero != 0 || (z_index > 0 && width - (o_index + o_count) != 0) {
5315        return None;
5316    }
5317
5318    Some(LogicalImm {
5319        n: if width == 64 { 1 } else { 0 },
5320        s: (o_count + z_index - 1) | 0u32.wrapping_sub(width * 2) & 0x3f,
5321        r: width - o_index,
5322    })
5323}
5324
5325#[derive(Copy, Clone, PartialEq, Eq, Debug)]
5326#[repr(u8)]
5327pub(super) enum OffsetType {
5328    SignedOffset,
5329    Adr,
5330    Adrp,
5331    Ldr,
5332}
5333
5334impl TryFrom<u8> for OffsetType {
5335    type Error = ();
5336
5337    fn try_from(value: u8) -> Result<Self, Self::Error> {
5338        match value {
5339            0 => Ok(Self::SignedOffset),
5340            1 => Ok(Self::Adr),
5341            2 => Ok(Self::Adrp),
5342            3 => Ok(Self::Ldr),
5343            _ => Err(()),
5344        }
5345    }
5346}
5347
5348struct OffsetFormat {
5349    typ: OffsetType,
5350    flags: u8,
5351    region_size: u8,
5352    value_size: u8,
5353    value_offset: u8,
5354    imm_bit_count: u8,
5355    imm_bit_shift: u8,
5356    imm_discard_lsb: u8,
5357}
5358
5359impl OffsetFormat {
5360    pub const fn new(
5361        typ: OffsetType,
5362        flags: u8,
5363        region_size: u8,
5364        value_size: u8,
5365        value_offset: u8,
5366        imm_bit_count: u8,
5367        imm_bit_shift: u8,
5368        imm_discard_lsb: u8,
5369    ) -> Self {
5370        Self {
5371            typ,
5372            flags,
5373            region_size,
5374            value_size,
5375            value_offset,
5376            imm_bit_count,
5377            imm_bit_shift,
5378            imm_discard_lsb,
5379        }
5380    }
5381
5382    pub fn reset_to_imm_type(
5383        &mut self,
5384        typ: OffsetType,
5385        value_size: usize,
5386        imm_bit_shift: u32,
5387        imm_bit_count: u32,
5388        imm_discard_lsb: u32,
5389    ) {
5390        self.typ = typ;
5391        self.value_size = value_size as u8;
5392        self.region_size = value_size as u8;
5393        self.imm_bit_shift = imm_bit_shift as u8;
5394        self.imm_bit_count = imm_bit_count as u8;
5395        self.imm_discard_lsb = imm_discard_lsb as u8;
5396        self.flags = 0;
5397        self.value_offset = 0;
5398    }
5399
5400    fn set_region(&mut self, region_size: usize, value_offset: usize) {
5401        self.region_size = region_size as u8;
5402        self.value_offset = value_offset as u8;
5403    }
5404
5405    fn set_leading_and_trailing_size(&mut self, leading_size: usize, trailing_size: usize) {
5406        self.region_size = (leading_size + trailing_size + self.value_size as usize) as u8;
5407        self.value_offset = leading_size as u8;
5408    }
5409
5410    fn typ(&self) -> OffsetType {
5411        self.typ
5412    }
5413
5414    fn flags(&self) -> u8 {
5415        self.flags
5416    }
5417
5418    fn region_size(&self) -> usize {
5419        self.region_size as usize
5420    }
5421
5422    fn value_size(&self) -> usize {
5423        self.value_size as usize
5424    }
5425
5426    fn value_offset(&self) -> usize {
5427        self.value_offset as usize
5428    }
5429
5430    fn imm_bit_count(&self) -> usize {
5431        self.imm_bit_count as usize
5432    }
5433
5434    fn imm_bit_shift(&self) -> usize {
5435        self.imm_bit_shift as usize
5436    }
5437
5438    fn imm_discard_lsb(&self) -> usize {
5439        self.imm_discard_lsb as usize
5440    }
5441}
5442
5443const fn lsb_mask<T>(n: u32) -> u64 {
5444    if size_of::<T>() < size_of::<u64>() {
5445        (1 << n) - 1
5446    } else {
5447        if n != 0 {
5448            (!0u64).wrapping_shr((size_of::<T>() as u32 * 8) - n)
5449        } else {
5450            0
5451        }
5452    }
5453}
5454
5455const fn cond_code_to_opcode_field(cond: u32) -> u32 {
5456    (cond.wrapping_sub(2)) & 0xf
5457}
5458
5459const SHIFT_OP_TO_LD_ST_OP_MAP: [u8; 16] = [
5460    3, 255, 255, 255, 255, 255, 255, 255, 2, 255, 255, 255, 6, 7, 255, 255,
5461];
5462
5463const fn is_byte_mask_imm(imm: u64) -> bool {
5464    let mask = 0x0101010101010101 & u64::MAX;
5465    imm == (imm & mask) * 255
5466}
5467
5468const fn encode_imm64_byte_mask_to_imm8(imm: u64) -> u32 {
5469    (((imm >> (7  - 0)) & 0b00000011) | // [.......G|H.......]
5470     ((imm >> (23 - 2)) & 0b00001100) | // [.......E|F.......]
5471     ((imm >> (39 - 4)) & 0b00110000) | // [.......C|D.......]
5472     ((imm >> (55 - 6)) & 0b11000000)) as u32
5473}
5474
5475macro_rules! is_fp_imm8_generic {
5476    ($t: ty: $val: expr, $num_b_bits: expr, $num_cdefgh_bits: expr, $num_zero_bits: expr) => {{
5477        let all_bs_mask = lsb_mask::<u32>($num_b_bits);
5478        let b0_pattern = 1u32 << ($num_b_bits - 1);
5479        let b1_pattern = all_bs_mask as u32 ^ b0_pattern;
5480
5481        let imm_z = $val & lsb_mask::<$t>($num_zero_bits as _) as $t;
5482        let imm_b = ($val >> ($num_zero_bits + $num_cdefgh_bits)) as u32 & all_bs_mask as u32;
5483        imm_z == 0 && (imm_b == b0_pattern || imm_b == b1_pattern)
5484    }};
5485}
5486
5487pub const fn is_fp16_imm8(val: u32) -> bool {
5488    is_fp_imm8_generic!(u32: val, 3, 6, 6)
5489}
5490
5491pub const fn is_fp32_imm8(val: u32) -> bool {
5492    is_fp_imm8_generic!(u32: val, 6, 6, 19)
5493}
5494
5495pub const fn is_fp64_imm8(val: u64) -> bool {
5496    is_fp_imm8_generic!(u64: val, 9, 6, 48)
5497}
5498
5499macro_rules! encode_fp_to_imm8_generic {
5500    ($t: ty: $val: expr, $num_b_bits: expr, $num_cdefgh_bits: expr, $num_zero_bits: expr) => {{
5501        let bits = ($val >> $num_zero_bits) as u32;
5502        ((bits >> ($num_b_bits + $num_cdefgh_bits - 7)) & 0x80) | (bits & 0x7f)
5503    }};
5504}
5505
5506pub const fn encode_fp64_to_imm8(val: u64) -> u32 {
5507    encode_fp_to_imm8_generic!(u64: val, 9, 6, 48)
5508}
5509
5510fn pick_fp_opcode(
5511    reg: Vec,
5512    s_op: u32,
5513    s_hf: u32,
5514    v_op: u32,
5515    v_hf: u32,
5516    sz_out: &mut u32,
5517) -> Option<Opc> {
5518    const QBIT_INDEX: usize = 30;
5519
5520    struct EncodeFpOpcodeBits {
5521        size_mask: u32,
5522        mask: [u32; 3],
5523    }
5524
5525    static SZ_BITS_TABLE: [EncodeFpOpcodeBits; 6] = [
5526        EncodeFpOpcodeBits {
5527            size_mask: (1 << 2) | (1 << 1),
5528            mask: [0, 0, 1 << 22],
5529        },
5530        EncodeFpOpcodeBits {
5531            size_mask: (1 << 2) | (1 << 1) | (1 << 0),
5532            mask: [0, 0, 0],
5533        },
5534        EncodeFpOpcodeBits {
5535            size_mask: (1 << 2) | (1 << 1) | (1 << 0),
5536            mask: [1 << 23 | 1 << 22, 0, 1 << 22],
5537        },
5538        EncodeFpOpcodeBits {
5539            size_mask: (1 << 2) | (1 << 1) | (1 << 0),
5540            mask: [(1 << 22) | (1 << 20) | (1 << 19), 0, 0],
5541        },
5542        EncodeFpOpcodeBits {
5543            size_mask: (1 << 2) | (1 << 1) | (1 << 0),
5544            mask: [1 << 22 | (1 << 21) | (1 << 15) | (1 << 14), 0, 1 << 22],
5545        },
5546        EncodeFpOpcodeBits {
5547            size_mask: (1 << 2) | (1 << 1) | (1 << 0),
5548            mask: [1 << 23, 0, 1 << 22],
5549        },
5550    ];
5551
5552    let mut op = Opc(0);
5553    if reg.has_element_type() {
5554        let sz = reg.typ() as u32 - RegType::Vec16 as u32;
5555        if sz > 2 || !bit_test32(SZ_BITS_TABLE[s_hf as usize].size_mask, sz) {
5556            return None;
5557        }
5558
5559        op.reset(SZ_BITS_TABLE[s_hf as usize].mask[sz as usize] ^ s_op);
5560        *sz_out = sz;
5561
5562        return (s_op != 0).then_some(op);
5563    } else {
5564        let q = reg.typ() as u32 - RegType::Vec64 as u32;
5565        let sz = (reg.element_type() as u32).saturating_sub(VecElementType::H as u32);
5566
5567        if q > 1 || sz > 2 || !bit_test32(SZ_BITS_TABLE[v_hf as usize].size_mask, sz) {
5568            return None;
5569        }
5570
5571        op.reset(SZ_BITS_TABLE[v_hf as usize].mask[sz as usize] ^ (v_op | (q << QBIT_INDEX)));
5572        *sz_out = sz;
5573        return (v_op != 0).then_some(op);
5574    }
5575}
5576
5577const fn bit_test32(value: u32, n: u32) -> bool {
5578    value & (1 << n) != 0
5579}
5580
5581struct SizeOpTable {
5582    array: [SizeOp; ((RegType::Vec128 as usize - RegType::Vec8 as usize + 1) + 1) * 40],
5583}
5584
5585impl SizeOpTable {
5586    const fn len() -> usize {
5587        ((RegType::Vec128 as usize - RegType::Vec8 as usize + 1) + 1) * 40
5588    }
5589    const fn bin() -> Self {
5590        let mut i = 0;
5591        let mut array = [SizeOp::new(SizeOp::K_INVALID); Self::len()];
5592        while i < Self::len() {
5593            array[i] = Self::bin_at(i);
5594            i += 1;
5595        }
5596        Self { array }
5597    }
5598
5599    const fn any() -> Self {
5600        let mut i = 0;
5601        let mut array = [SizeOp::new(SizeOp::K_INVALID); Self::len()];
5602        while i < Self::len() {
5603            array[i] = Self::any_at(i);
5604            i += 1;
5605        }
5606        Self { array }
5607    }
5608
5609    const fn bin_at(x: usize) -> SizeOp {
5610        if x == (((RegType::Vec64 as usize - RegType::Vec8 as usize) << 3)
5611            | VecElementType::None as usize)
5612        {
5613            SizeOp::new(SizeOp::K00)
5614        } else if x
5615            == (((RegType::Vec128 as usize - RegType::Vec8 as usize) << 3)
5616                | VecElementType::None as usize)
5617        {
5618            SizeOp::new(SizeOp::K00_Q)
5619        } else if x
5620            == (((RegType::Vec64 as usize - RegType::Vec8 as usize) << 3)
5621                | VecElementType::B as usize)
5622        {
5623            SizeOp::new(SizeOp::K00)
5624        } else if x
5625            == (((RegType::Vec128 as usize - RegType::Vec8 as usize) << 3)
5626                | VecElementType::B as usize)
5627        {
5628            SizeOp::new(SizeOp::K00_Q)
5629        } else {
5630            SizeOp::new(SizeOp::K_INVALID)
5631        }
5632    }
5633
5634    const fn any_at(x: usize) -> SizeOp {
5635        if x == (((RegType::Vec8 as usize - RegType::Vec8 as usize) << 3)
5636            | VecElementType::None as usize)
5637        {
5638            SizeOp::new(SizeOp::K00_S)
5639        } else if x
5640            == (((RegType::Vec16 as usize - RegType::Vec8 as usize) << 3)
5641                | VecElementType::None as usize)
5642        {
5643            SizeOp::new(SizeOp::K01_S)
5644        } else if x
5645            == (((RegType::Vec32 as usize - RegType::Vec8 as usize) << 3)
5646                | VecElementType::None as usize)
5647        {
5648            SizeOp::new(SizeOp::K10_S)
5649        } else if x
5650            == (((RegType::Vec64 as usize - RegType::Vec8 as usize) << 3)
5651                | VecElementType::None as usize)
5652        {
5653            SizeOp::new(SizeOp::K11_S)
5654        } else if x
5655            == (((RegType::Vec64 as usize - RegType::Vec8 as usize) << 3)
5656                | VecElementType::B as usize)
5657        {
5658            SizeOp::new(SizeOp::K00)
5659        } else if x
5660            == (((RegType::Vec128 as usize - RegType::Vec8 as usize) << 3)
5661                | VecElementType::B as usize)
5662        {
5663            SizeOp::new(SizeOp::K00_Q)
5664        } else if x
5665            == (((RegType::Vec64 as usize - RegType::Vec8 as usize) << 3)
5666                | VecElementType::H as usize)
5667        {
5668            SizeOp::new(SizeOp::K01)
5669        } else if x
5670            == (((RegType::Vec128 as usize - RegType::Vec8 as usize) << 3)
5671                | VecElementType::H as usize)
5672        {
5673            SizeOp::new(SizeOp::K01_Q)
5674        } else if x
5675            == (((RegType::Vec64 as usize - RegType::Vec8 as usize) << 3)
5676                | VecElementType::S as usize)
5677        {
5678            SizeOp::new(SizeOp::K10)
5679        } else if x
5680            == (((RegType::Vec128 as usize - RegType::Vec8 as usize) << 3)
5681                | VecElementType::S as usize)
5682        {
5683            SizeOp::new(SizeOp::K10_Q)
5684        } else if x
5685            == (((RegType::Vec64 as usize - RegType::Vec8 as usize) << 3)
5686                | VecElementType::D as usize)
5687        {
5688            SizeOp::new(SizeOp::K11_S)
5689        } else if x
5690            == (((RegType::Vec128 as usize - RegType::Vec8 as usize) << 3)
5691                | VecElementType::D as usize)
5692        {
5693            SizeOp::new(SizeOp::K11_Q)
5694        } else {
5695            SizeOp::new(SizeOp::K_INVALID)
5696        }
5697    }
5698}
5699
5700static SIZE_OP_TABLE: [SizeOpTable; 2] = [SizeOpTable::bin(), SizeOpTable::any()];
5701
5702const TABLE_BIN: usize = 0;
5703const TABLE_ANY: usize = 1;
5704
5705#[derive(Copy, Clone, PartialEq, Eq, Debug)]
5706#[repr(transparent)]
5707struct SizeOp(u8);
5708
5709impl SizeOp {
5710    pub const fn new(val: u8) -> Self {
5711        Self(val)
5712    }
5713
5714    const K128_BIT_SHIFT: u8 = 0;
5715    const K_SCALAR_SHIFT: u8 = 1;
5716    const K_SIZE_SHIFT: u8 = 2;
5717
5718    const K_Q: u8 = 1u8 << Self::K128_BIT_SHIFT;
5719    const K_S: u8 = 1u8 << Self::K_SCALAR_SHIFT;
5720
5721    const K00: u8 = 0 << Self::K_SIZE_SHIFT;
5722    const K01: u8 = 1 << Self::K_SIZE_SHIFT;
5723    const K10: u8 = 2 << Self::K_SIZE_SHIFT;
5724    const K11: u8 = 3 << Self::K_SIZE_SHIFT;
5725
5726    const K00_Q: u8 = Self::K00 | Self::K_Q;
5727    const K01_Q: u8 = Self::K01 | Self::K_Q;
5728    const K10_Q: u8 = Self::K10 | Self::K_Q;
5729    const K11_Q: u8 = Self::K11 | Self::K_Q;
5730
5731    const K00_S: u8 = Self::K00 | Self::K_S;
5732    const K01_S: u8 = Self::K01 | Self::K_S;
5733    const K10_S: u8 = Self::K10 | Self::K_S;
5734    const K11_S: u8 = Self::K11 | Self::K_S;
5735
5736    const K_INVALID: u8 = 0xFF;
5737
5738    const K_SZ_Q: u8 = (0x3u8 << Self::K_SIZE_SHIFT) | Self::K_Q;
5739    const K_SZ_S: u8 = (0x3u8 << Self::K_SIZE_SHIFT) | Self::K_S;
5740    const K_SZ_QS: u8 = (0x3u8 << Self::K_SIZE_SHIFT) | Self::K_Q | Self::K_S;
5741
5742    const fn is_valid(self) -> bool {
5743        self.0 != Self::K_INVALID
5744    }
5745
5746    const fn make_invalid(&mut self) {
5747        self.0 = Self::K_INVALID;
5748    }
5749
5750    const fn q(&self) -> u32 {
5751        (self.0 >> Self::K128_BIT_SHIFT) as u32 & 1
5752    }
5753
5754    const fn qs(&self) -> u32 {
5755        (((self.0 >> Self::K128_BIT_SHIFT) as u32) | ((self.0 >> Self::K_SCALAR_SHIFT) as u32)) & 1
5756    }
5757
5758    const fn scalar(&self) -> u32 {
5759        (self.0 >> Self::K_SCALAR_SHIFT) as u32 & 1
5760    }
5761
5762    const fn size(&self) -> u32 {
5763        (self.0 >> Self::K_SIZE_SHIFT) as u32 & 0x3
5764    }
5765
5766    const fn decrement_size(&mut self) {
5767        self.0 = (self.0 as u32 - (1u32 << Self::K_SIZE_SHIFT)) as u8;
5768    }
5769}
5770
5771#[derive(Copy, Clone, Debug)]
5772struct SizeOpMap {
5773    table_id: u8,
5774    size_op_mask: u8,
5775    accept_mask: u16,
5776}
5777
5778static SIZE_OP_MAP: [SizeOpMap; 23] = [
5779    // kVO_V_B
5780    SizeOpMap {
5781        table_id: TABLE_BIN as u8,
5782        size_op_mask: SizeOp::K_Q,
5783        accept_mask: (B!(SizeOp::K00) | B!(SizeOp::K00_Q)) as u16,
5784    },
5785    // kVO_V_BH
5786    SizeOpMap {
5787        table_id: TABLE_ANY as u8,
5788        size_op_mask: SizeOp::K_SZ_QS,
5789        accept_mask: (B!(SizeOp::K00) | B!(SizeOp::K00_Q) | B!(SizeOp::K01) | B!(SizeOp::K01_Q))
5790            as u16,
5791    },
5792    // kVO_V_BH_4S
5793    SizeOpMap {
5794        table_id: TABLE_ANY as u8,
5795        size_op_mask: SizeOp::K_SZ_QS,
5796        accept_mask: (B!(SizeOp::K00)
5797            | B!(SizeOp::K00_Q)
5798            | B!(SizeOp::K01)
5799            | B!(SizeOp::K01_Q)
5800            | B!(SizeOp::K10_Q)) as u16,
5801    },
5802    // kVO_V_BHS
5803    SizeOpMap {
5804        table_id: TABLE_ANY as u8,
5805        size_op_mask: SizeOp::K_SZ_QS,
5806        accept_mask: (B!(SizeOp::K00)
5807            | B!(SizeOp::K00_Q)
5808            | B!(SizeOp::K01)
5809            | B!(SizeOp::K01_Q)
5810            | B!(SizeOp::K10)
5811            | B!(SizeOp::K10_Q)) as u16,
5812    },
5813    // kVO_V_BHS_D2
5814    SizeOpMap {
5815        table_id: TABLE_ANY as u8,
5816        size_op_mask: SizeOp::K_SZ_QS,
5817        accept_mask: (B!(SizeOp::K00)
5818            | B!(SizeOp::K00_Q)
5819            | B!(SizeOp::K01)
5820            | B!(SizeOp::K01_Q)
5821            | B!(SizeOp::K10)
5822            | B!(SizeOp::K10_Q)
5823            | B!(SizeOp::K11_Q)) as u16,
5824    },
5825    // kVO_V_HS
5826    SizeOpMap {
5827        table_id: TABLE_ANY as u8,
5828        size_op_mask: SizeOp::K_SZ_QS,
5829        accept_mask: (B!(SizeOp::K01) | B!(SizeOp::K01_Q) | B!(SizeOp::K10) | B!(SizeOp::K10_Q))
5830            as u16,
5831    },
5832    // kVO_V_S
5833    SizeOpMap {
5834        table_id: TABLE_ANY as u8,
5835        size_op_mask: SizeOp::K_Q,
5836        accept_mask: (B!(SizeOp::K10) | B!(SizeOp::K10_Q)) as u16,
5837    },
5838    // kVO_V_B8H4
5839    SizeOpMap {
5840        table_id: TABLE_ANY as u8,
5841        size_op_mask: SizeOp::K_SZ_QS,
5842        accept_mask: (B!(SizeOp::K00) | B!(SizeOp::K01)) as u16,
5843    },
5844    // kVO_V_B8H4S2
5845    SizeOpMap {
5846        table_id: TABLE_ANY as u8,
5847        size_op_mask: SizeOp::K_SZ_QS,
5848        accept_mask: (B!(SizeOp::K00) | B!(SizeOp::K01) | B!(SizeOp::K10)) as u16,
5849    },
5850    // kVO_V_B8D1
5851    SizeOpMap {
5852        table_id: TABLE_ANY as u8,
5853        size_op_mask: SizeOp::K_SZ_Q,
5854        accept_mask: (B!(SizeOp::K00) | B!(SizeOp::K11_S)) as u16,
5855    },
5856    // kVO_V_H4S2
5857    SizeOpMap {
5858        table_id: TABLE_ANY as u8,
5859        size_op_mask: SizeOp::K_SZ_QS,
5860        accept_mask: (B!(SizeOp::K01) | B!(SizeOp::K10)) as u16,
5861    },
5862    // kVO_V_B16
5863    SizeOpMap {
5864        table_id: TABLE_BIN as u8,
5865        size_op_mask: SizeOp::K_Q,
5866        accept_mask: (B!(SizeOp::K00_Q)) as u16,
5867    },
5868    // kVO_V_B16H8
5869    SizeOpMap {
5870        table_id: TABLE_ANY as u8,
5871        size_op_mask: SizeOp::K_SZ_QS,
5872        accept_mask: (B!(SizeOp::K00_Q) | B!(SizeOp::K01_Q)) as u16,
5873    },
5874    // kVO_V_B16H8S4
5875    SizeOpMap {
5876        table_id: TABLE_ANY as u8,
5877        size_op_mask: SizeOp::K_SZ_QS,
5878        accept_mask: (B!(SizeOp::K00_Q) | B!(SizeOp::K01_Q) | B!(SizeOp::K10_Q)) as u16,
5879    },
5880    // kVO_V_B16D2
5881    SizeOpMap {
5882        table_id: TABLE_ANY as u8,
5883        size_op_mask: SizeOp::K_SZ_QS,
5884        accept_mask: (B!(SizeOp::K00_Q) | B!(SizeOp::K11_Q)) as u16,
5885    },
5886    // kVO_V_H8S4
5887    SizeOpMap {
5888        table_id: TABLE_ANY as u8,
5889        size_op_mask: SizeOp::K_SZ_QS,
5890        accept_mask: (B!(SizeOp::K01_Q) | B!(SizeOp::K10_Q)) as u16,
5891    },
5892    // kVO_V_S4
5893    SizeOpMap {
5894        table_id: TABLE_ANY as u8,
5895        size_op_mask: 0,
5896        accept_mask: (B!(SizeOp::K10_Q)) as u16,
5897    },
5898    // kVO_V_D2
5899    SizeOpMap {
5900        table_id: TABLE_ANY as u8,
5901        size_op_mask: 0,
5902        accept_mask: (B!(SizeOp::K11_Q)) as u16,
5903    },
5904    // kVO_SV_BHS
5905    SizeOpMap {
5906        table_id: TABLE_ANY as u8,
5907        size_op_mask: SizeOp::K_SZ_QS,
5908        accept_mask: (B!(SizeOp::K00)
5909            | B!(SizeOp::K00_Q)
5910            | B!(SizeOp::K00_S)
5911            | B!(SizeOp::K01)
5912            | B!(SizeOp::K01_Q)
5913            | B!(SizeOp::K01_S)
5914            | B!(SizeOp::K10)
5915            | B!(SizeOp::K10_Q)
5916            | B!(SizeOp::K10_S)) as u16,
5917    },
5918    // kVO_SV_B8H4S2
5919    SizeOpMap {
5920        table_id: TABLE_ANY as u8,
5921        size_op_mask: SizeOp::K_SZ_QS,
5922        accept_mask: (B!(SizeOp::K00)
5923            | B!(SizeOp::K00_S)
5924            | B!(SizeOp::K01)
5925            | B!(SizeOp::K01_S)
5926            | B!(SizeOp::K10)
5927            | B!(SizeOp::K10_S)) as u16,
5928    },
5929    // kVO_SV_HS
5930    SizeOpMap {
5931        table_id: TABLE_ANY as u8,
5932        size_op_mask: SizeOp::K_SZ_QS,
5933        accept_mask: (B!(SizeOp::K01)
5934            | B!(SizeOp::K01_Q)
5935            | B!(SizeOp::K01_S)
5936            | B!(SizeOp::K10)
5937            | B!(SizeOp::K10_Q)
5938            | B!(SizeOp::K10_S)) as u16,
5939    },
5940    // kVO_V_Any
5941    SizeOpMap {
5942        table_id: TABLE_ANY as u8,
5943        size_op_mask: SizeOp::K_SZ_QS,
5944        accept_mask: (B!(SizeOp::K00)
5945            | B!(SizeOp::K00_Q)
5946            | B!(SizeOp::K01)
5947            | B!(SizeOp::K01_Q)
5948            | B!(SizeOp::K10)
5949            | B!(SizeOp::K10_Q)
5950            | B!(SizeOp::K11_S)
5951            | B!(SizeOp::K11_Q)) as u16,
5952    },
5953    // kVO_SV_Any
5954    SizeOpMap {
5955        table_id: TABLE_ANY as u8,
5956        size_op_mask: SizeOp::K_SZ_QS,
5957        accept_mask: (B!(SizeOp::K00)
5958            | B!(SizeOp::K00_Q)
5959            | B!(SizeOp::K00_S)
5960            | B!(SizeOp::K01)
5961            | B!(SizeOp::K01_Q)
5962            | B!(SizeOp::K01_S)
5963            | B!(SizeOp::K10)
5964            | B!(SizeOp::K10_Q)
5965            | B!(SizeOp::K10_S)
5966            | B!(SizeOp::K11)
5967            | B!(SizeOp::K11_Q)
5968            | B!(SizeOp::K11_S)) as u16,
5969    },
5970];
5971
5972const fn significant_simd_op<'a>(o0: &'a Operand, o1: &'a Operand, inst_flags: u32) -> &'a Operand {
5973    if (inst_flags & InstFlag::Long as u32) == 0 {
5974        o0
5975    } else {
5976        o1
5977    }
5978}
5979
5980const fn element_type_to_size_op(
5981    vec_op_type: u32,
5982    reg_type: RegType,
5983    element_type: VecElementType,
5984) -> SizeOp {
5985    let map = &SIZE_OP_MAP[vec_op_type as usize];
5986    let table = &SIZE_OP_TABLE[map.table_id as usize];
5987
5988    let a = reg_type as usize - RegType::Vec8 as usize;
5989    let b = RegType::Vec128 as usize - RegType::Vec8 as usize;
5990
5991    let index = /*(reg_type as usize - RegType::Vec8 as usize)
5992        .min(RegType::Vec128 as usize - RegType::Vec8 as usize)
5993        << 3
5994        | (element_type as usize);*/
5995        if a < b {
5996            (a << 3) | (element_type as usize)
5997        } else {
5998            (b << 3) | (element_type as usize)
5999        };
6000    let op = table.array[index];
6001    let mut modified_op = SizeOp::new(op.0 & map.size_op_mask);
6002
6003    if !bit_test32(map.accept_mask as u32, op.0 as u32) {
6004        modified_op.make_invalid();
6005    }
6006
6007    modified_op
6008}
6009
6010struct LMHImm {
6011    lm: u32,
6012    h: u32,
6013    max_rm_id: u32,
6014}
6015
6016fn encode_lmh(size_field: u32, element_index: u32, out: &mut LMHImm) -> bool {
6017    if size_field != 1 && size_field != 2 {
6018        return false;
6019    }
6020
6021    let h_shift = 3u32.saturating_sub(size_field);
6022    let lm_shift = size_field.saturating_sub(1u32);
6023    let max_element_index = 15u32 >> size_field;
6024
6025    out.h = element_index >> h_shift;
6026    out.lm = (element_index << lm_shift) & 0x3u32;
6027    out.max_rm_id = (8u32 << size_field).saturating_sub(1);
6028
6029    element_index <= max_element_index
6030}