1pub use emit_state::EmitState;
4
5use crate::binemit::{Addend, CodeOffset, Reloc, StackMap};
6use crate::ir::{types, ExternalName, LibCall, TrapCode, Type};
7use crate::isa::x64::abi::X64ABIMachineSpec;
8use crate::isa::x64::inst::regs::{pretty_print_reg, show_ireg_sized};
9use crate::isa::x64::settings as x64_settings;
10use crate::isa::{CallConv, FunctionAlignment};
11use crate::{machinst::*, trace};
12use crate::{settings, CodegenError, CodegenResult};
13use alloc::boxed::Box;
14use regalloc2::PRegSet;
15use smallvec::{smallvec, SmallVec};
16use std::fmt::{self, Write};
17use std::string::{String, ToString};
18
19pub mod args;
20mod emit;
21mod emit_state;
22#[cfg(test)]
23mod emit_tests;
24pub mod regs;
25pub mod unwind;
26
27use args::*;
28
29pub use super::lower::isle::generated_code::MInst as Inst;
34
35#[derive(Clone, Debug)]
37pub struct CallInfo {
38 pub uses: CallArgList,
40 pub defs: CallRetList,
42 pub clobbers: PRegSet,
44 pub callee_pop_size: u32,
48 pub callee_conv: CallConv,
50}
51
52#[derive(Clone, Debug)]
54pub struct ReturnCallInfo {
55 pub new_stack_arg_size: u32,
58
59 pub uses: CallArgList,
61
62 pub tmp: WritableGpr,
64}
65
66#[test]
67#[cfg(target_pointer_width = "64")]
68fn inst_size_test() {
69 assert_eq!(40, std::mem::size_of::<Inst>());
72}
73
74pub(crate) fn low32_will_sign_extend_to_64(x: u64) -> bool {
75 let xs = x as i64;
76 xs == ((xs << 32) >> 32)
77}
78
79impl Inst {
80 fn available_in_any_isa(&self) -> SmallVec<[InstructionSet; 2]> {
85 match self {
86 Inst::AluRmiR { .. }
89 | Inst::AluRM { .. }
90 | Inst::AtomicRmwSeq { .. }
91 | Inst::Bswap { .. }
92 | Inst::CallKnown { .. }
93 | Inst::CallUnknown { .. }
94 | Inst::ReturnCallKnown { .. }
95 | Inst::ReturnCallUnknown { .. }
96 | Inst::CheckedSRemSeq { .. }
97 | Inst::CheckedSRemSeq8 { .. }
98 | Inst::Cmove { .. }
99 | Inst::CmpRmiR { .. }
100 | Inst::CvtFloatToSintSeq { .. }
101 | Inst::CvtFloatToUintSeq { .. }
102 | Inst::CvtUint64ToFloatSeq { .. }
103 | Inst::Div { .. }
104 | Inst::Div8 { .. }
105 | Inst::Fence { .. }
106 | Inst::Hlt
107 | Inst::Imm { .. }
108 | Inst::JmpCond { .. }
109 | Inst::JmpIf { .. }
110 | Inst::JmpKnown { .. }
111 | Inst::JmpTableSeq { .. }
112 | Inst::JmpUnknown { .. }
113 | Inst::LoadEffectiveAddress { .. }
114 | Inst::LoadExtName { .. }
115 | Inst::LockCmpxchg { .. }
116 | Inst::Mov64MR { .. }
117 | Inst::MovImmM { .. }
118 | Inst::MovRM { .. }
119 | Inst::MovRR { .. }
120 | Inst::MovFromPReg { .. }
121 | Inst::MovToPReg { .. }
122 | Inst::MovsxRmR { .. }
123 | Inst::MovzxRmR { .. }
124 | Inst::Mul { .. }
125 | Inst::Mul8 { .. }
126 | Inst::IMul { .. }
127 | Inst::IMulImm { .. }
128 | Inst::Neg { .. }
129 | Inst::Not { .. }
130 | Inst::Nop { .. }
131 | Inst::Pop64 { .. }
132 | Inst::Push64 { .. }
133 | Inst::StackProbeLoop { .. }
134 | Inst::Args { .. }
135 | Inst::Rets { .. }
136 | Inst::Ret { .. }
137 | Inst::Setcc { .. }
138 | Inst::ShiftR { .. }
139 | Inst::SignExtendData { .. }
140 | Inst::TrapIf { .. }
141 | Inst::TrapIfAnd { .. }
142 | Inst::TrapIfOr { .. }
143 | Inst::Ud2 { .. }
144 | Inst::XmmCmove { .. }
145 | Inst::XmmCmpRmR { .. }
146 | Inst::XmmMinMaxSeq { .. }
147 | Inst::XmmUninitializedValue { .. }
148 | Inst::ElfTlsGetAddr { .. }
149 | Inst::MachOTlsGetAddr { .. }
150 | Inst::CoffTlsGetAddr { .. }
151 | Inst::Unwind { .. }
152 | Inst::DummyUse { .. }
153 | Inst::AluConstOp { .. } => smallvec![],
154
155 Inst::AluRmRVex { op, .. } => op.available_from(),
156 Inst::UnaryRmR { op, .. } => op.available_from(),
157 Inst::UnaryRmRVex { op, .. } => op.available_from(),
158 Inst::UnaryRmRImmVex { op, .. } => op.available_from(),
159
160 Inst::GprToXmm { op, .. }
162 | Inst::XmmMovRM { op, .. }
163 | Inst::XmmMovRMImm { op, .. }
164 | Inst::XmmRmiReg { opcode: op, .. }
165 | Inst::XmmRmR { op, .. }
166 | Inst::XmmRmRUnaligned { op, .. }
167 | Inst::XmmRmRBlend { op, .. }
168 | Inst::XmmRmRImm { op, .. }
169 | Inst::XmmToGpr { op, .. }
170 | Inst::XmmToGprImm { op, .. }
171 | Inst::XmmUnaryRmRImm { op, .. }
172 | Inst::XmmUnaryRmRUnaligned { op, .. }
173 | Inst::XmmUnaryRmR { op, .. }
174 | Inst::CvtIntToFloat { op, .. } => smallvec![op.available_from()],
175
176 Inst::XmmUnaryRmREvex { op, .. }
177 | Inst::XmmRmREvex { op, .. }
178 | Inst::XmmRmREvex3 { op, .. }
179 | Inst::XmmUnaryRmRImmEvex { op, .. } => op.available_from(),
180
181 Inst::XmmRmiRVex { op, .. }
182 | Inst::XmmRmRVex3 { op, .. }
183 | Inst::XmmRmRImmVex { op, .. }
184 | Inst::XmmRmRBlendVex { op, .. }
185 | Inst::XmmVexPinsr { op, .. }
186 | Inst::XmmUnaryRmRVex { op, .. }
187 | Inst::XmmUnaryRmRImmVex { op, .. }
188 | Inst::XmmMovRMVex { op, .. }
189 | Inst::XmmMovRMImmVex { op, .. }
190 | Inst::XmmToGprImmVex { op, .. }
191 | Inst::XmmToGprVex { op, .. }
192 | Inst::GprToXmmVex { op, .. }
193 | Inst::CvtIntToFloatVex { op, .. }
194 | Inst::XmmCmpRmRVex { op, .. } => op.available_from(),
195 }
196 }
197}
198
199impl Inst {
202 pub(crate) fn nop(len: u8) -> Self {
203 debug_assert!(len <= 15);
204 Self::Nop { len }
205 }
206
207 pub(crate) fn alu_rmi_r(
208 size: OperandSize,
209 op: AluRmiROpcode,
210 src: RegMemImm,
211 dst: Writable<Reg>,
212 ) -> Self {
213 src.assert_regclass_is(RegClass::Int);
214 debug_assert!(dst.to_reg().class() == RegClass::Int);
215 Self::AluRmiR {
216 size,
217 op,
218 src1: Gpr::unwrap_new(dst.to_reg()),
219 src2: GprMemImm::unwrap_new(src),
220 dst: WritableGpr::from_writable_reg(dst).unwrap(),
221 }
222 }
223
224 #[allow(dead_code)]
225 pub(crate) fn unary_rm_r(
226 size: OperandSize,
227 op: UnaryRmROpcode,
228 src: RegMem,
229 dst: Writable<Reg>,
230 ) -> Self {
231 src.assert_regclass_is(RegClass::Int);
232 debug_assert!(dst.to_reg().class() == RegClass::Int);
233 debug_assert!(size.is_one_of(&[
234 OperandSize::Size16,
235 OperandSize::Size32,
236 OperandSize::Size64
237 ]));
238 Self::UnaryRmR {
239 size,
240 op,
241 src: GprMem::unwrap_new(src),
242 dst: WritableGpr::from_writable_reg(dst).unwrap(),
243 }
244 }
245
246 pub(crate) fn not(size: OperandSize, src: Writable<Reg>) -> Inst {
247 debug_assert_eq!(src.to_reg().class(), RegClass::Int);
248 Inst::Not {
249 size,
250 src: Gpr::unwrap_new(src.to_reg()),
251 dst: WritableGpr::from_writable_reg(src).unwrap(),
252 }
253 }
254
255 pub(crate) fn div(
256 size: OperandSize,
257 sign: DivSignedness,
258 trap: TrapCode,
259 divisor: RegMem,
260 dividend_lo: Gpr,
261 dividend_hi: Gpr,
262 dst_quotient: WritableGpr,
263 dst_remainder: WritableGpr,
264 ) -> Inst {
265 divisor.assert_regclass_is(RegClass::Int);
266 Inst::Div {
267 size,
268 sign,
269 trap,
270 divisor: GprMem::unwrap_new(divisor),
271 dividend_lo,
272 dividend_hi,
273 dst_quotient,
274 dst_remainder,
275 }
276 }
277
278 pub(crate) fn div8(
279 sign: DivSignedness,
280 trap: TrapCode,
281 divisor: RegMem,
282 dividend: Gpr,
283 dst: WritableGpr,
284 ) -> Inst {
285 divisor.assert_regclass_is(RegClass::Int);
286 Inst::Div8 {
287 sign,
288 trap,
289 divisor: GprMem::unwrap_new(divisor),
290 dividend,
291 dst,
292 }
293 }
294
295 pub(crate) fn imm(dst_size: OperandSize, simm64: u64, dst: Writable<Reg>) -> Inst {
296 debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
297 debug_assert!(dst.to_reg().class() == RegClass::Int);
298 let dst_size = match dst_size {
301 OperandSize::Size64 if simm64 > u32::max_value() as u64 => OperandSize::Size64,
302 _ => OperandSize::Size32,
303 };
304 Inst::Imm {
305 dst_size,
306 simm64,
307 dst: WritableGpr::from_writable_reg(dst).unwrap(),
308 }
309 }
310
311 pub(crate) fn mov_r_r(size: OperandSize, src: Reg, dst: Writable<Reg>) -> Inst {
312 debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
313 debug_assert!(src.class() == RegClass::Int);
314 debug_assert!(dst.to_reg().class() == RegClass::Int);
315 let src = Gpr::unwrap_new(src);
316 let dst = WritableGpr::from_writable_reg(dst).unwrap();
317 Inst::MovRR { size, src, dst }
318 }
319
320 pub(crate) fn xmm_unary_rm_r(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Inst {
322 src.assert_regclass_is(RegClass::Float);
323 debug_assert!(dst.to_reg().class() == RegClass::Float);
324 Inst::XmmUnaryRmR {
325 op,
326 src: XmmMemAligned::unwrap_new(src),
327 dst: WritableXmm::from_writable_reg(dst).unwrap(),
328 }
329 }
330
331 pub(crate) fn xmm_rm_r(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Self {
332 src.assert_regclass_is(RegClass::Float);
333 debug_assert!(dst.to_reg().class() == RegClass::Float);
334 Inst::XmmRmR {
335 op,
336 src1: Xmm::unwrap_new(dst.to_reg()),
337 src2: XmmMemAligned::unwrap_new(src),
338 dst: WritableXmm::from_writable_reg(dst).unwrap(),
339 }
340 }
341
342 #[cfg(test)]
343 pub(crate) fn xmm_rmr_vex3(op: AvxOpcode, src3: RegMem, src2: Reg, dst: Writable<Reg>) -> Self {
344 src3.assert_regclass_is(RegClass::Float);
345 debug_assert!(src2.class() == RegClass::Float);
346 debug_assert!(dst.to_reg().class() == RegClass::Float);
347 Inst::XmmRmRVex3 {
348 op,
349 src3: XmmMem::unwrap_new(src3),
350 src2: Xmm::unwrap_new(src2),
351 src1: Xmm::unwrap_new(dst.to_reg()),
352 dst: WritableXmm::from_writable_reg(dst).unwrap(),
353 }
354 }
355
356 pub(crate) fn xmm_mov_r_m(op: SseOpcode, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst {
357 debug_assert!(src.class() == RegClass::Float);
358 Inst::XmmMovRM {
359 op,
360 src: Xmm::unwrap_new(src),
361 dst: dst.into(),
362 }
363 }
364
365 pub(crate) fn xmm_to_gpr(
366 op: SseOpcode,
367 src: Reg,
368 dst: Writable<Reg>,
369 dst_size: OperandSize,
370 ) -> Inst {
371 debug_assert!(src.class() == RegClass::Float);
372 debug_assert!(dst.to_reg().class() == RegClass::Int);
373 debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
374 Inst::XmmToGpr {
375 op,
376 src: Xmm::unwrap_new(src),
377 dst: WritableGpr::from_writable_reg(dst).unwrap(),
378 dst_size,
379 }
380 }
381
382 pub(crate) fn gpr_to_xmm(
383 op: SseOpcode,
384 src: RegMem,
385 src_size: OperandSize,
386 dst: Writable<Reg>,
387 ) -> Inst {
388 src.assert_regclass_is(RegClass::Int);
389 debug_assert!(src_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
390 debug_assert!(dst.to_reg().class() == RegClass::Float);
391 Inst::GprToXmm {
392 op,
393 src: GprMem::unwrap_new(src),
394 dst: WritableXmm::from_writable_reg(dst).unwrap(),
395 src_size,
396 }
397 }
398
399 pub(crate) fn xmm_cmp_rm_r(op: SseOpcode, src1: Reg, src2: RegMem) -> Inst {
400 src2.assert_regclass_is(RegClass::Float);
401 debug_assert!(src1.class() == RegClass::Float);
402 let src2 = XmmMemAligned::unwrap_new(src2);
403 let src1 = Xmm::unwrap_new(src1);
404 Inst::XmmCmpRmR { op, src1, src2 }
405 }
406
407 #[allow(dead_code)]
408 pub(crate) fn xmm_min_max_seq(
409 size: OperandSize,
410 is_min: bool,
411 lhs: Reg,
412 rhs: Reg,
413 dst: Writable<Reg>,
414 ) -> Inst {
415 debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
416 debug_assert_eq!(lhs.class(), RegClass::Float);
417 debug_assert_eq!(rhs.class(), RegClass::Float);
418 debug_assert_eq!(dst.to_reg().class(), RegClass::Float);
419 Inst::XmmMinMaxSeq {
420 size,
421 is_min,
422 lhs: Xmm::unwrap_new(lhs),
423 rhs: Xmm::unwrap_new(rhs),
424 dst: WritableXmm::from_writable_reg(dst).unwrap(),
425 }
426 }
427
428 pub(crate) fn movzx_rm_r(ext_mode: ExtMode, src: RegMem, dst: Writable<Reg>) -> Inst {
429 src.assert_regclass_is(RegClass::Int);
430 debug_assert!(dst.to_reg().class() == RegClass::Int);
431 let src = GprMem::unwrap_new(src);
432 let dst = WritableGpr::from_writable_reg(dst).unwrap();
433 Inst::MovzxRmR { ext_mode, src, dst }
434 }
435
436 pub(crate) fn movsx_rm_r(ext_mode: ExtMode, src: RegMem, dst: Writable<Reg>) -> Inst {
437 src.assert_regclass_is(RegClass::Int);
438 debug_assert!(dst.to_reg().class() == RegClass::Int);
439 let src = GprMem::unwrap_new(src);
440 let dst = WritableGpr::from_writable_reg(dst).unwrap();
441 Inst::MovsxRmR { ext_mode, src, dst }
442 }
443
444 pub(crate) fn mov64_m_r(src: impl Into<SyntheticAmode>, dst: Writable<Reg>) -> Inst {
445 debug_assert!(dst.to_reg().class() == RegClass::Int);
446 Inst::Mov64MR {
447 src: src.into(),
448 dst: WritableGpr::from_writable_reg(dst).unwrap(),
449 }
450 }
451
452 pub(crate) fn mov_r_m(size: OperandSize, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst {
453 debug_assert!(src.class() == RegClass::Int);
454 Inst::MovRM {
455 size,
456 src: Gpr::unwrap_new(src),
457 dst: dst.into(),
458 }
459 }
460
461 pub(crate) fn lea(addr: impl Into<SyntheticAmode>, dst: Writable<Reg>) -> Inst {
462 debug_assert!(dst.to_reg().class() == RegClass::Int);
463 Inst::LoadEffectiveAddress {
464 addr: addr.into(),
465 dst: WritableGpr::from_writable_reg(dst).unwrap(),
466 size: OperandSize::Size64,
467 }
468 }
469
470 pub(crate) fn shift_r(
471 size: OperandSize,
472 kind: ShiftKind,
473 num_bits: Imm8Gpr,
474 src: Reg,
475 dst: Writable<Reg>,
476 ) -> Inst {
477 if let &Imm8Reg::Imm8 { imm: num_bits } = num_bits.as_imm8_reg() {
478 debug_assert!(num_bits < size.to_bits());
479 }
480 debug_assert!(dst.to_reg().class() == RegClass::Int);
481 Inst::ShiftR {
482 size,
483 kind,
484 src: Gpr::unwrap_new(src),
485 num_bits,
486 dst: WritableGpr::from_writable_reg(dst).unwrap(),
487 }
488 }
489
490 pub(crate) fn cmp_rmi_r(size: OperandSize, src1: Reg, src2: RegMemImm) -> Inst {
493 src2.assert_regclass_is(RegClass::Int);
494 debug_assert_eq!(src1.class(), RegClass::Int);
495 Inst::CmpRmiR {
496 size,
497 src1: Gpr::unwrap_new(src1),
498 src2: GprMemImm::unwrap_new(src2),
499 opcode: CmpOpcode::Cmp,
500 }
501 }
502
503 pub(crate) fn trap(trap_code: TrapCode) -> Inst {
504 Inst::Ud2 { trap_code }
505 }
506
507 pub(crate) fn trap_if(cc: CC, trap_code: TrapCode) -> Inst {
508 Inst::TrapIf { cc, trap_code }
509 }
510
511 pub(crate) fn cmove(size: OperandSize, cc: CC, src: RegMem, dst: Writable<Reg>) -> Inst {
512 debug_assert!(size.is_one_of(&[
513 OperandSize::Size16,
514 OperandSize::Size32,
515 OperandSize::Size64
516 ]));
517 debug_assert!(dst.to_reg().class() == RegClass::Int);
518 Inst::Cmove {
519 size,
520 cc,
521 consequent: GprMem::unwrap_new(src),
522 alternative: Gpr::unwrap_new(dst.to_reg()),
523 dst: WritableGpr::from_writable_reg(dst).unwrap(),
524 }
525 }
526
527 pub(crate) fn push64(src: RegMemImm) -> Inst {
528 src.assert_regclass_is(RegClass::Int);
529 let src = GprMemImm::unwrap_new(src);
530 Inst::Push64 { src }
531 }
532
533 pub(crate) fn pop64(dst: Writable<Reg>) -> Inst {
534 debug_assert!(dst.to_reg().class() == RegClass::Int);
535 let dst = WritableGpr::from_writable_reg(dst).unwrap();
536 Inst::Pop64 { dst }
537 }
538
539 pub(crate) fn call_known(
540 dest: ExternalName,
541 uses: CallArgList,
542 defs: CallRetList,
543 clobbers: PRegSet,
544 callee_pop_size: u32,
545 callee_conv: CallConv,
546 ) -> Inst {
547 Inst::CallKnown {
548 dest,
549 info: Some(Box::new(CallInfo {
550 uses,
551 defs,
552 clobbers,
553 callee_pop_size,
554 callee_conv,
555 })),
556 }
557 }
558
559 pub(crate) fn call_unknown(
560 dest: RegMem,
561 uses: CallArgList,
562 defs: CallRetList,
563 clobbers: PRegSet,
564 callee_pop_size: u32,
565 callee_conv: CallConv,
566 ) -> Inst {
567 dest.assert_regclass_is(RegClass::Int);
568 Inst::CallUnknown {
569 dest,
570 info: Some(Box::new(CallInfo {
571 uses,
572 defs,
573 clobbers,
574 callee_pop_size,
575 callee_conv,
576 })),
577 }
578 }
579
580 pub(crate) fn ret(stack_bytes_to_pop: u32) -> Inst {
581 Inst::Ret { stack_bytes_to_pop }
582 }
583
584 pub(crate) fn jmp_known(dst: MachLabel) -> Inst {
585 Inst::JmpKnown { dst }
586 }
587
588 pub(crate) fn jmp_unknown(target: RegMem) -> Inst {
589 target.assert_regclass_is(RegClass::Int);
590 Inst::JmpUnknown { target }
591 }
592
593 pub(crate) fn load(
597 ty: Type,
598 from_addr: impl Into<SyntheticAmode>,
599 to_reg: Writable<Reg>,
600 ext_kind: ExtKind,
601 ) -> Inst {
602 let rc = to_reg.to_reg().class();
603 match rc {
604 RegClass::Int => {
605 let ext_mode = match ty.bytes() {
606 1 => Some(ExtMode::BQ),
607 2 => Some(ExtMode::WQ),
608 4 => Some(ExtMode::LQ),
609 8 => None,
610 _ => unreachable!("the type should never use a scalar load: {}", ty),
611 };
612 if let Some(ext_mode) = ext_mode {
613 match ext_kind {
615 ExtKind::SignExtend => {
616 Inst::movsx_rm_r(ext_mode, RegMem::mem(from_addr), to_reg)
617 }
618 ExtKind::ZeroExtend => {
619 Inst::movzx_rm_r(ext_mode, RegMem::mem(from_addr), to_reg)
620 }
621 ExtKind::None => panic!(
622 "expected an extension kind for extension mode: {:?}",
623 ext_mode
624 ),
625 }
626 } else {
627 Inst::mov64_m_r(from_addr, to_reg)
629 }
630 }
631 RegClass::Float => {
632 let opcode = match ty {
633 types::F16 => panic!("loading a f16 requires multiple instructions"),
634 types::F32 => SseOpcode::Movss,
635 types::F64 => SseOpcode::Movsd,
636 types::F32X4 => SseOpcode::Movups,
637 types::F64X2 => SseOpcode::Movupd,
638 _ if (ty.is_float() || ty.is_vector()) && ty.bits() == 128 => SseOpcode::Movdqu,
639 _ => unimplemented!("unable to load type: {}", ty),
640 };
641 Inst::xmm_unary_rm_r(opcode, RegMem::mem(from_addr), to_reg)
642 }
643 RegClass::Vector => unreachable!(),
644 }
645 }
646
647 pub(crate) fn store(ty: Type, from_reg: Reg, to_addr: impl Into<SyntheticAmode>) -> Inst {
649 let rc = from_reg.class();
650 match rc {
651 RegClass::Int => Inst::mov_r_m(OperandSize::from_ty(ty), from_reg, to_addr),
652 RegClass::Float => {
653 let opcode = match ty {
654 types::F16 => panic!("storing a f16 requires multiple instructions"),
655 types::F32 => SseOpcode::Movss,
656 types::F64 => SseOpcode::Movsd,
657 types::F32X4 => SseOpcode::Movups,
658 types::F64X2 => SseOpcode::Movupd,
659 _ if (ty.is_float() || ty.is_vector()) && ty.bits() == 128 => SseOpcode::Movdqu,
660 _ => unimplemented!("unable to store type: {}", ty),
661 };
662 Inst::xmm_mov_r_m(opcode, from_reg, to_addr)
663 }
664 RegClass::Vector => unreachable!(),
665 }
666 }
667}
668
669impl PrettyPrint for Inst {
673 fn pretty_print(&self, _size: u8) -> String {
674 fn ljustify(s: String) -> String {
675 let w = 7;
676 if s.len() >= w {
677 s
678 } else {
679 let need = usize::min(w, w - s.len());
680 s + &format!("{nil: <width$}", nil = "", width = need)
681 }
682 }
683
684 fn ljustify2(s1: String, s2: String) -> String {
685 ljustify(s1 + &s2)
686 }
687
688 fn suffix_lq(size: OperandSize) -> String {
689 match size {
690 OperandSize::Size32 => "l",
691 OperandSize::Size64 => "q",
692 _ => unreachable!(),
693 }
694 .to_string()
695 }
696
697 #[allow(dead_code)]
698 fn suffix_lqb(size: OperandSize) -> String {
699 match size {
700 OperandSize::Size32 => "l",
701 OperandSize::Size64 => "q",
702 _ => unreachable!(),
703 }
704 .to_string()
705 }
706
707 fn suffix_bwlq(size: OperandSize) -> String {
708 match size {
709 OperandSize::Size8 => "b".to_string(),
710 OperandSize::Size16 => "w".to_string(),
711 OperandSize::Size32 => "l".to_string(),
712 OperandSize::Size64 => "q".to_string(),
713 }
714 }
715
716 match self {
717 Inst::Nop { len } => format!("{} len={}", ljustify("nop".to_string()), len),
718
719 Inst::AluRmiR {
720 size,
721 op,
722 src1,
723 src2,
724 dst,
725 } => {
726 let size_bytes = size.to_bytes();
727 let src1 = pretty_print_reg(src1.to_reg(), size_bytes);
728 let dst = pretty_print_reg(dst.to_reg().to_reg(), size_bytes);
729 let src2 = src2.pretty_print(size_bytes);
730 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
731 format!("{op} {src1}, {src2}, {dst}")
732 }
733 Inst::AluConstOp { op, dst, size } => {
734 let size_bytes = size.to_bytes();
735 let dst = pretty_print_reg(dst.to_reg().to_reg(), size_bytes);
736 let op = ljustify2(op.to_string(), suffix_lqb(*size));
737 format!("{op} {dst}, {dst}, {dst}")
738 }
739 Inst::AluRM {
740 size,
741 op,
742 src1_dst,
743 src2,
744 } => {
745 let size_bytes = size.to_bytes();
746 let src2 = pretty_print_reg(src2.to_reg(), size_bytes);
747 let src1_dst = src1_dst.pretty_print(size_bytes);
748 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
749 format!("{op} {src2}, {src1_dst}")
750 }
751 Inst::AluRmRVex {
752 size,
753 op,
754 src1,
755 src2,
756 dst,
757 } => {
758 let size_bytes = size.to_bytes();
759 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
760 let src1 = pretty_print_reg(src1.to_reg(), size_bytes);
761 let src2 = src2.pretty_print(size_bytes);
762 let op = ljustify2(op.to_string(), String::new());
763 format!("{op} {src2}, {src1}, {dst}")
764 }
765 Inst::UnaryRmR { src, dst, op, size } => {
766 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
767 let src = src.pretty_print(size.to_bytes());
768 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
769 format!("{op} {src}, {dst}")
770 }
771
772 Inst::UnaryRmRVex { src, dst, op, size } => {
773 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
774 let src = src.pretty_print(size.to_bytes());
775 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
776 format!("{op} {src}, {dst}")
777 }
778
779 Inst::UnaryRmRImmVex {
780 src,
781 dst,
782 op,
783 size,
784 imm,
785 } => {
786 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
787 let src = src.pretty_print(size.to_bytes());
788 format!(
789 "{} ${imm}, {src}, {dst}",
790 ljustify2(op.to_string(), suffix_bwlq(*size))
791 )
792 }
793
794 Inst::Not { size, src, dst } => {
795 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
796 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
797 let op = ljustify2("not".to_string(), suffix_bwlq(*size));
798 format!("{op} {src}, {dst}")
799 }
800
801 Inst::Neg { size, src, dst } => {
802 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
803 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
804 let op = ljustify2("neg".to_string(), suffix_bwlq(*size));
805 format!("{op} {src}, {dst}")
806 }
807
808 Inst::Div {
809 size,
810 sign,
811 trap,
812 divisor,
813 dividend_lo,
814 dividend_hi,
815 dst_quotient,
816 dst_remainder,
817 } => {
818 let divisor = divisor.pretty_print(size.to_bytes());
819 let dividend_lo = pretty_print_reg(dividend_lo.to_reg(), size.to_bytes());
820 let dividend_hi = pretty_print_reg(dividend_hi.to_reg(), size.to_bytes());
821 let dst_quotient =
822 pretty_print_reg(dst_quotient.to_reg().to_reg(), size.to_bytes());
823 let dst_remainder =
824 pretty_print_reg(dst_remainder.to_reg().to_reg(), size.to_bytes());
825 let op = ljustify(match sign {
826 DivSignedness::Signed => "idiv".to_string(),
827 DivSignedness::Unsigned => "div".to_string(),
828 });
829 format!(
830 "{op} {dividend_lo}, {dividend_hi}, {divisor}, {dst_quotient}, {dst_remainder} ; trap={trap}"
831 )
832 }
833
834 Inst::Div8 {
835 sign,
836 trap,
837 divisor,
838 dividend,
839 dst,
840 } => {
841 let divisor = divisor.pretty_print(1);
842 let dividend = pretty_print_reg(dividend.to_reg(), 1);
843 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
844 let op = ljustify(match sign {
845 DivSignedness::Signed => "idiv".to_string(),
846 DivSignedness::Unsigned => "div".to_string(),
847 });
848 format!("{op} {dividend}, {divisor}, {dst} ; trap={trap}")
849 }
850
851 Inst::Mul {
852 size,
853 signed,
854 src1,
855 src2,
856 dst_lo,
857 dst_hi,
858 } => {
859 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
860 let dst_lo = pretty_print_reg(dst_lo.to_reg().to_reg(), size.to_bytes());
861 let dst_hi = pretty_print_reg(dst_hi.to_reg().to_reg(), size.to_bytes());
862 let src2 = src2.pretty_print(size.to_bytes());
863 let suffix = suffix_bwlq(*size);
864 let op = ljustify(if *signed {
865 format!("imul{suffix}")
866 } else {
867 format!("mul{suffix}")
868 });
869 format!("{op} {src1}, {src2}, {dst_lo}, {dst_hi}")
870 }
871
872 Inst::Mul8 {
873 signed,
874 src1,
875 src2,
876 dst,
877 } => {
878 let src1 = pretty_print_reg(src1.to_reg(), 1);
879 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
880 let src2 = src2.pretty_print(1);
881 let op = ljustify(if *signed {
882 "imulb".to_string()
883 } else {
884 "mulb".to_string()
885 });
886 format!("{op} {src1}, {src2}, {dst}")
887 }
888
889 Inst::IMul {
890 size,
891 src1,
892 src2,
893 dst,
894 } => {
895 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
896 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
897 let src2 = src2.pretty_print(size.to_bytes());
898 let suffix = suffix_bwlq(*size);
899 let op = ljustify(format!("imul{suffix}"));
900 format!("{op} {src1}, {src2}, {dst}")
901 }
902
903 Inst::IMulImm {
904 size,
905 src1,
906 src2,
907 dst,
908 } => {
909 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
910 let src1 = src1.pretty_print(size.to_bytes());
911 let suffix = suffix_bwlq(*size);
912 let op = ljustify(format!("imul{suffix}"));
913 format!("{op} {src1}, {src2:#x}, {dst}")
914 }
915
916 Inst::CheckedSRemSeq {
917 size,
918 divisor,
919 dividend_lo,
920 dividend_hi,
921 dst_quotient,
922 dst_remainder,
923 } => {
924 let divisor = pretty_print_reg(divisor.to_reg(), size.to_bytes());
925 let dividend_lo = pretty_print_reg(dividend_lo.to_reg(), size.to_bytes());
926 let dividend_hi = pretty_print_reg(dividend_hi.to_reg(), size.to_bytes());
927 let dst_quotient =
928 pretty_print_reg(dst_quotient.to_reg().to_reg(), size.to_bytes());
929 let dst_remainder =
930 pretty_print_reg(dst_remainder.to_reg().to_reg(), size.to_bytes());
931 format!(
932 "checked_srem_seq {dividend_lo}, {dividend_hi}, \
933 {divisor}, {dst_quotient}, {dst_remainder}",
934 )
935 }
936
937 Inst::CheckedSRemSeq8 {
938 divisor,
939 dividend,
940 dst,
941 } => {
942 let divisor = pretty_print_reg(divisor.to_reg(), 1);
943 let dividend = pretty_print_reg(dividend.to_reg(), 1);
944 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
945 format!("checked_srem_seq {dividend}, {divisor}, {dst}")
946 }
947
948 Inst::SignExtendData { size, src, dst } => {
949 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
950 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
951 let op = match size {
952 OperandSize::Size8 => "cbw",
953 OperandSize::Size16 => "cwd",
954 OperandSize::Size32 => "cdq",
955 OperandSize::Size64 => "cqo",
956 };
957 format!("{op} {src}, {dst}")
958 }
959
960 Inst::XmmUnaryRmR { op, src, dst, .. } => {
961 let dst = pretty_print_reg(dst.to_reg().to_reg(), op.src_size());
962 let src = src.pretty_print(op.src_size());
963 let op = ljustify(op.to_string());
964 format!("{op} {src}, {dst}")
965 }
966
967 Inst::XmmUnaryRmRUnaligned { op, src, dst, .. } => {
968 let dst = pretty_print_reg(dst.to_reg().to_reg(), op.src_size());
969 let src = src.pretty_print(op.src_size());
970 let op = ljustify(op.to_string());
971 format!("{op} {src}, {dst}")
972 }
973
974 Inst::XmmUnaryRmRImm {
975 op, src, dst, imm, ..
976 } => {
977 let dst = pretty_print_reg(dst.to_reg().to_reg(), op.src_size());
978 let src = src.pretty_print(op.src_size());
979 let op = ljustify(op.to_string());
980 format!("{op} ${imm}, {src}, {dst}")
981 }
982
983 Inst::XmmUnaryRmRVex { op, src, dst, .. } => {
984 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
985 let src = src.pretty_print(8);
986 let op = ljustify(op.to_string());
987 format!("{op} {src}, {dst}")
988 }
989
990 Inst::XmmUnaryRmRImmVex {
991 op, src, dst, imm, ..
992 } => {
993 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
994 let src = src.pretty_print(8);
995 let op = ljustify(op.to_string());
996 format!("{op} ${imm}, {src}, {dst}")
997 }
998
999 Inst::XmmUnaryRmREvex { op, src, dst, .. } => {
1000 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1001 let src = src.pretty_print(8);
1002 let op = ljustify(op.to_string());
1003 format!("{op} {src}, {dst}")
1004 }
1005
1006 Inst::XmmUnaryRmRImmEvex {
1007 op, src, dst, imm, ..
1008 } => {
1009 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1010 let src = src.pretty_print(8);
1011 let op = ljustify(op.to_string());
1012 format!("{op} ${imm}, {src}, {dst}")
1013 }
1014
1015 Inst::XmmMovRM { op, src, dst, .. } => {
1016 let src = pretty_print_reg(src.to_reg(), 8);
1017 let dst = dst.pretty_print(8);
1018 let op = ljustify(op.to_string());
1019 format!("{op} {src}, {dst}")
1020 }
1021
1022 Inst::XmmMovRMVex { op, src, dst, .. } => {
1023 let src = pretty_print_reg(src.to_reg(), 8);
1024 let dst = dst.pretty_print(8);
1025 let op = ljustify(op.to_string());
1026 format!("{op} {src}, {dst}")
1027 }
1028
1029 Inst::XmmMovRMImm {
1030 op, src, dst, imm, ..
1031 } => {
1032 let src = pretty_print_reg(src.to_reg(), 8);
1033 let dst = dst.pretty_print(8);
1034 let op = ljustify(op.to_string());
1035 format!("{op} ${imm}, {src}, {dst}")
1036 }
1037
1038 Inst::XmmMovRMImmVex {
1039 op, src, dst, imm, ..
1040 } => {
1041 let src = pretty_print_reg(src.to_reg(), 8);
1042 let dst = dst.pretty_print(8);
1043 let op = ljustify(op.to_string());
1044 format!("{op} ${imm}, {src}, {dst}")
1045 }
1046
1047 Inst::XmmRmR {
1048 op,
1049 src1,
1050 src2,
1051 dst,
1052 ..
1053 } => {
1054 let src1 = pretty_print_reg(src1.to_reg(), 8);
1055 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1056 let src2 = src2.pretty_print(8);
1057 let op = ljustify(op.to_string());
1058 format!("{op} {src1}, {src2}, {dst}")
1059 }
1060
1061 Inst::XmmRmRUnaligned {
1062 op,
1063 src1,
1064 src2,
1065 dst,
1066 ..
1067 } => {
1068 let src1 = pretty_print_reg(src1.to_reg(), 8);
1069 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1070 let src2 = src2.pretty_print(8);
1071 let op = ljustify(op.to_string());
1072 format!("{op} {src1}, {src2}, {dst}")
1073 }
1074
1075 Inst::XmmRmRBlend {
1076 op,
1077 src1,
1078 src2,
1079 mask,
1080 dst,
1081 } => {
1082 let src1 = pretty_print_reg(src1.to_reg(), 8);
1083 let mask = mask.to_reg();
1084 let mask = if mask.is_virtual() {
1085 format!(" <{}>", show_ireg_sized(mask, 8))
1086 } else {
1087 debug_assert_eq!(mask, regs::xmm0());
1088 String::new()
1089 };
1090 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1091 let src2 = src2.pretty_print(8);
1092 let op = ljustify(op.to_string());
1093 format!("{op} {src1}, {src2}, {dst}{mask}")
1094 }
1095
1096 Inst::XmmRmiRVex {
1097 op,
1098 src1,
1099 src2,
1100 dst,
1101 ..
1102 } => {
1103 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1104 let src1 = pretty_print_reg(src1.to_reg(), 8);
1105 let src2 = src2.pretty_print(8);
1106 let op = ljustify(op.to_string());
1107 format!("{op} {src1}, {src2}, {dst}")
1108 }
1109
1110 Inst::XmmRmRImmVex {
1111 op,
1112 src1,
1113 src2,
1114 dst,
1115 imm,
1116 ..
1117 } => {
1118 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1119 let src1 = pretty_print_reg(src1.to_reg(), 8);
1120 let src2 = src2.pretty_print(8);
1121 let op = ljustify(op.to_string());
1122 format!("{op} ${imm}, {src1}, {src2}, {dst}")
1123 }
1124
1125 Inst::XmmVexPinsr {
1126 op,
1127 src1,
1128 src2,
1129 dst,
1130 imm,
1131 ..
1132 } => {
1133 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1134 let src1 = pretty_print_reg(src1.to_reg(), 8);
1135 let src2 = src2.pretty_print(8);
1136 let op = ljustify(op.to_string());
1137 format!("{op} ${imm}, {src1}, {src2}, {dst}")
1138 }
1139
1140 Inst::XmmRmRVex3 {
1141 op,
1142 src1,
1143 src2,
1144 src3,
1145 dst,
1146 ..
1147 } => {
1148 let src1 = pretty_print_reg(src1.to_reg(), 8);
1149 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1150 let src2 = pretty_print_reg(src2.to_reg(), 8);
1151 let src3 = src3.pretty_print(8);
1152 let op = ljustify(op.to_string());
1153 format!("{op} {src1}, {src2}, {src3}, {dst}")
1154 }
1155
1156 Inst::XmmRmRBlendVex {
1157 op,
1158 src1,
1159 src2,
1160 mask,
1161 dst,
1162 ..
1163 } => {
1164 let src1 = pretty_print_reg(src1.to_reg(), 8);
1165 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1166 let src2 = src2.pretty_print(8);
1167 let mask = pretty_print_reg(mask.to_reg(), 8);
1168 let op = ljustify(op.to_string());
1169 format!("{op} {src1}, {src2}, {mask}, {dst}")
1170 }
1171
1172 Inst::XmmRmREvex {
1173 op,
1174 src1,
1175 src2,
1176 dst,
1177 ..
1178 } => {
1179 let src1 = pretty_print_reg(src1.to_reg(), 8);
1180 let src2 = src2.pretty_print(8);
1181 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1182 let op = ljustify(op.to_string());
1183 format!("{op} {src2}, {src1}, {dst}")
1184 }
1185
1186 Inst::XmmRmREvex3 {
1187 op,
1188 src1,
1189 src2,
1190 src3,
1191 dst,
1192 ..
1193 } => {
1194 let src1 = pretty_print_reg(src1.to_reg(), 8);
1195 let src2 = pretty_print_reg(src2.to_reg(), 8);
1196 let src3 = src3.pretty_print(8);
1197 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1198 let op = ljustify(op.to_string());
1199 format!("{op} {src3}, {src2}, {src1}, {dst}")
1200 }
1201
1202 Inst::XmmMinMaxSeq {
1203 lhs,
1204 rhs,
1205 dst,
1206 is_min,
1207 size,
1208 } => {
1209 let rhs = pretty_print_reg(rhs.to_reg(), 8);
1210 let lhs = pretty_print_reg(lhs.to_reg(), 8);
1211 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1212 let op = ljustify2(
1213 if *is_min {
1214 "xmm min seq ".to_string()
1215 } else {
1216 "xmm max seq ".to_string()
1217 },
1218 format!("f{}", size.to_bits()),
1219 );
1220 format!("{op} {lhs}, {rhs}, {dst}")
1221 }
1222
1223 Inst::XmmRmRImm {
1224 op,
1225 src1,
1226 src2,
1227 dst,
1228 imm,
1229 size,
1230 ..
1231 } => {
1232 let src1 = pretty_print_reg(*src1, 8);
1233 let dst = pretty_print_reg(dst.to_reg(), 8);
1234 let src2 = src2.pretty_print(8);
1235 let op = ljustify(format!(
1236 "{}{}",
1237 op.to_string(),
1238 if *size == OperandSize::Size64 {
1239 ".w"
1240 } else {
1241 ""
1242 }
1243 ));
1244 format!("{op} ${imm}, {src1}, {src2}, {dst}")
1245 }
1246
1247 Inst::XmmUninitializedValue { dst } => {
1248 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1249 let op = ljustify("uninit".into());
1250 format!("{op} {dst}")
1251 }
1252
1253 Inst::XmmToGpr {
1254 op,
1255 src,
1256 dst,
1257 dst_size,
1258 } => {
1259 let dst_size = dst_size.to_bytes();
1260 let src = pretty_print_reg(src.to_reg(), 8);
1261 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size);
1262 let op = ljustify(op.to_string());
1263 format!("{op} {src}, {dst}")
1264 }
1265
1266 Inst::XmmToGprVex {
1267 op,
1268 src,
1269 dst,
1270 dst_size,
1271 } => {
1272 let dst_size = dst_size.to_bytes();
1273 let src = pretty_print_reg(src.to_reg(), 8);
1274 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size);
1275 let op = ljustify(op.to_string());
1276 format!("{op} {src}, {dst}")
1277 }
1278
1279 Inst::XmmToGprImm { op, src, dst, imm } => {
1280 let src = pretty_print_reg(src.to_reg(), 8);
1281 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1282 let op = ljustify(op.to_string());
1283 format!("{op} ${imm}, {src}, {dst}")
1284 }
1285
1286 Inst::XmmToGprImmVex { op, src, dst, imm } => {
1287 let src = pretty_print_reg(src.to_reg(), 8);
1288 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1289 let op = ljustify(op.to_string());
1290 format!("{op} ${imm}, {src}, {dst}")
1291 }
1292
1293 Inst::GprToXmm {
1294 op,
1295 src,
1296 src_size,
1297 dst,
1298 } => {
1299 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1300 let src = src.pretty_print(src_size.to_bytes());
1301 let op = ljustify(op.to_string());
1302 format!("{op} {src}, {dst}")
1303 }
1304
1305 Inst::GprToXmmVex {
1306 op,
1307 src,
1308 src_size,
1309 dst,
1310 } => {
1311 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1312 let src = src.pretty_print(src_size.to_bytes());
1313 let op = ljustify(op.to_string());
1314 format!("{op} {src}, {dst}")
1315 }
1316
1317 Inst::XmmCmpRmR { op, src1, src2 } => {
1318 let src1 = pretty_print_reg(src1.to_reg(), 8);
1319 let src2 = src2.pretty_print(8);
1320 let op = ljustify(op.to_string());
1321 format!("{op} {src2}, {src1}")
1322 }
1323
1324 Inst::CvtIntToFloat {
1325 op,
1326 src1,
1327 src2,
1328 dst,
1329 src2_size,
1330 } => {
1331 let src1 = pretty_print_reg(src1.to_reg(), 8);
1332 let dst = pretty_print_reg(*dst.to_reg(), 8);
1333 let src2 = src2.pretty_print(src2_size.to_bytes());
1334 let op = ljustify(op.to_string());
1335 format!("{op} {src1}, {src2}, {dst}")
1336 }
1337
1338 Inst::CvtIntToFloatVex {
1339 op,
1340 src1,
1341 src2,
1342 dst,
1343 src2_size,
1344 } => {
1345 let dst = pretty_print_reg(*dst.to_reg(), 8);
1346 let src1 = pretty_print_reg(src1.to_reg(), 8);
1347 let src2 = src2.pretty_print(src2_size.to_bytes());
1348 let op = ljustify(op.to_string());
1349 format!("{op} {src1}, {src2}, {dst}")
1350 }
1351
1352 Inst::XmmCmpRmRVex { op, src1, src2 } => {
1353 let src1 = pretty_print_reg(src1.to_reg(), 8);
1354 let src2 = src2.pretty_print(8);
1355 format!("{} {src2}, {src1}", ljustify(op.to_string()))
1356 }
1357
1358 Inst::CvtUint64ToFloatSeq {
1359 src,
1360 dst,
1361 dst_size,
1362 tmp_gpr1,
1363 tmp_gpr2,
1364 ..
1365 } => {
1366 let src = pretty_print_reg(src.to_reg(), 8);
1367 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1368 let tmp_gpr1 = pretty_print_reg(tmp_gpr1.to_reg().to_reg(), 8);
1369 let tmp_gpr2 = pretty_print_reg(tmp_gpr2.to_reg().to_reg(), 8);
1370 let op = ljustify(format!(
1371 "u64_to_{}_seq",
1372 if *dst_size == OperandSize::Size64 {
1373 "f64"
1374 } else {
1375 "f32"
1376 }
1377 ));
1378 format!("{op} {src}, {dst}, {tmp_gpr1}, {tmp_gpr2}")
1379 }
1380
1381 Inst::CvtFloatToSintSeq {
1382 src,
1383 dst,
1384 src_size,
1385 dst_size,
1386 tmp_xmm,
1387 tmp_gpr,
1388 is_saturating,
1389 } => {
1390 let src = pretty_print_reg(src.to_reg(), src_size.to_bytes());
1391 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1392 let tmp_gpr = pretty_print_reg(tmp_gpr.to_reg().to_reg(), 8);
1393 let tmp_xmm = pretty_print_reg(tmp_xmm.to_reg().to_reg(), 8);
1394 let op = ljustify(format!(
1395 "cvt_float{}_to_sint{}{}_seq",
1396 src_size.to_bits(),
1397 dst_size.to_bits(),
1398 if *is_saturating { "_sat" } else { "" },
1399 ));
1400 format!("{op} {src}, {dst}, {tmp_gpr}, {tmp_xmm}")
1401 }
1402
1403 Inst::CvtFloatToUintSeq {
1404 src,
1405 dst,
1406 src_size,
1407 dst_size,
1408 tmp_gpr,
1409 tmp_xmm,
1410 tmp_xmm2,
1411 is_saturating,
1412 } => {
1413 let src = pretty_print_reg(src.to_reg(), src_size.to_bytes());
1414 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1415 let tmp_gpr = pretty_print_reg(tmp_gpr.to_reg().to_reg(), 8);
1416 let tmp_xmm = pretty_print_reg(tmp_xmm.to_reg().to_reg(), 8);
1417 let tmp_xmm2 = pretty_print_reg(tmp_xmm2.to_reg().to_reg(), 8);
1418 let op = ljustify(format!(
1419 "cvt_float{}_to_uint{}{}_seq",
1420 src_size.to_bits(),
1421 dst_size.to_bits(),
1422 if *is_saturating { "_sat" } else { "" },
1423 ));
1424 format!("{op} {src}, {dst}, {tmp_gpr}, {tmp_xmm}, {tmp_xmm2}")
1425 }
1426
1427 Inst::Imm {
1428 dst_size,
1429 simm64,
1430 dst,
1431 } => {
1432 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1433 if *dst_size == OperandSize::Size64 {
1434 let op = ljustify("movabsq".to_string());
1435 let imm = *simm64 as i64;
1436 format!("{op} ${imm}, {dst}")
1437 } else {
1438 let op = ljustify("movl".to_string());
1439 let imm = (*simm64 as u32) as i32;
1440 format!("{op} ${imm}, {dst}")
1441 }
1442 }
1443
1444 Inst::MovImmM { size, simm32, dst } => {
1445 let dst = dst.pretty_print(size.to_bytes());
1446 let suffix = suffix_bwlq(*size);
1447 let imm = match *size {
1448 OperandSize::Size8 => ((*simm32 as u8) as i8).to_string(),
1449 OperandSize::Size16 => ((*simm32 as u16) as i16).to_string(),
1450 OperandSize::Size32 => simm32.to_string(),
1451 OperandSize::Size64 => (*simm32 as i64).to_string(),
1452 };
1453 let op = ljustify2("mov".to_string(), suffix);
1454 format!("{op} ${imm}, {dst}")
1455 }
1456
1457 Inst::MovRR { size, src, dst } => {
1458 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1459 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1460 let op = ljustify2("mov".to_string(), suffix_lq(*size));
1461 format!("{op} {src}, {dst}")
1462 }
1463
1464 Inst::MovFromPReg { src, dst } => {
1465 let src: Reg = (*src).into();
1466 let src = regs::show_ireg_sized(src, 8);
1467 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1468 let op = ljustify("movq".to_string());
1469 format!("{op} {src}, {dst}")
1470 }
1471
1472 Inst::MovToPReg { src, dst } => {
1473 let src = pretty_print_reg(src.to_reg(), 8);
1474 let dst: Reg = (*dst).into();
1475 let dst = regs::show_ireg_sized(dst, 8);
1476 let op = ljustify("movq".to_string());
1477 format!("{op} {src}, {dst}")
1478 }
1479
1480 Inst::MovzxRmR {
1481 ext_mode, src, dst, ..
1482 } => {
1483 let dst_size = if *ext_mode == ExtMode::LQ {
1484 4
1485 } else {
1486 ext_mode.dst_size()
1487 };
1488 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size);
1489 let src = src.pretty_print(ext_mode.src_size());
1490
1491 if *ext_mode == ExtMode::LQ {
1492 let op = ljustify("movl".to_string());
1493 format!("{op} {src}, {dst}")
1494 } else {
1495 let op = ljustify2("movz".to_string(), ext_mode.to_string());
1496 format!("{op} {src}, {dst}")
1497 }
1498 }
1499
1500 Inst::Mov64MR { src, dst, .. } => {
1501 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1502 let src = src.pretty_print(8);
1503 let op = ljustify("movq".to_string());
1504 format!("{op} {src}, {dst}")
1505 }
1506
1507 Inst::LoadEffectiveAddress { addr, dst, size } => {
1508 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1509 let addr = addr.pretty_print(8);
1510 let op = ljustify("lea".to_string());
1511 format!("{op} {addr}, {dst}")
1512 }
1513
1514 Inst::MovsxRmR {
1515 ext_mode, src, dst, ..
1516 } => {
1517 let dst = pretty_print_reg(dst.to_reg().to_reg(), ext_mode.dst_size());
1518 let src = src.pretty_print(ext_mode.src_size());
1519 let op = ljustify2("movs".to_string(), ext_mode.to_string());
1520 format!("{op} {src}, {dst}")
1521 }
1522
1523 Inst::MovRM { size, src, dst, .. } => {
1524 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1525 let dst = dst.pretty_print(size.to_bytes());
1526 let op = ljustify2("mov".to_string(), suffix_bwlq(*size));
1527 format!("{op} {src}, {dst}")
1528 }
1529
1530 Inst::ShiftR {
1531 size,
1532 kind,
1533 num_bits,
1534 src,
1535 dst,
1536 ..
1537 } => {
1538 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1539 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1540 match num_bits.as_imm8_reg() {
1541 &Imm8Reg::Reg { reg } => {
1542 let reg = pretty_print_reg(reg, 1);
1543 let op = ljustify2(kind.to_string(), suffix_bwlq(*size));
1544 format!("{op} {reg}, {src}, {dst}")
1545 }
1546
1547 &Imm8Reg::Imm8 { imm: num_bits } => {
1548 let op = ljustify2(kind.to_string(), suffix_bwlq(*size));
1549 format!("{op} ${num_bits}, {src}, {dst}")
1550 }
1551 }
1552 }
1553
1554 Inst::XmmRmiReg {
1555 opcode,
1556 src1,
1557 src2,
1558 dst,
1559 ..
1560 } => {
1561 let src1 = pretty_print_reg(src1.to_reg(), 8);
1562 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1563 let src2 = src2.pretty_print(8);
1564 let op = ljustify(opcode.to_string());
1565 format!("{op} {src1}, {src2}, {dst}")
1566 }
1567
1568 Inst::CmpRmiR {
1569 size,
1570 src1,
1571 src2,
1572 opcode,
1573 } => {
1574 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
1575 let src2 = src2.pretty_print(size.to_bytes());
1576 let op = match opcode {
1577 CmpOpcode::Cmp => "cmp",
1578 CmpOpcode::Test => "test",
1579 };
1580 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
1581 format!("{op} {src2}, {src1}")
1582 }
1583
1584 Inst::Setcc { cc, dst } => {
1585 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
1586 let op = ljustify2("set".to_string(), cc.to_string());
1587 format!("{op} {dst}")
1588 }
1589
1590 Inst::Bswap { size, src, dst } => {
1591 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1592 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1593 let op = ljustify2("bswap".to_string(), suffix_bwlq(*size));
1594 format!("{op} {src}, {dst}")
1595 }
1596
1597 Inst::Cmove {
1598 size,
1599 cc,
1600 consequent,
1601 alternative,
1602 dst,
1603 } => {
1604 let alternative = pretty_print_reg(alternative.to_reg(), size.to_bytes());
1605 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1606 let consequent = consequent.pretty_print(size.to_bytes());
1607 let op = ljustify(format!("cmov{}{}", cc.to_string(), suffix_bwlq(*size)));
1608 format!("{op} {consequent}, {alternative}, {dst}")
1609 }
1610
1611 Inst::XmmCmove {
1612 ty,
1613 cc,
1614 consequent,
1615 alternative,
1616 dst,
1617 ..
1618 } => {
1619 let size = u8::try_from(ty.bytes()).unwrap();
1620 let alternative = pretty_print_reg(alternative.to_reg(), size);
1621 let dst = pretty_print_reg(dst.to_reg().to_reg(), size);
1622 let consequent = pretty_print_reg(consequent.to_reg(), size);
1623 let suffix = match *ty {
1624 types::F64 => "sd",
1625 types::F32 => "ss",
1626 types::F16 => "ss",
1627 types::F32X4 => "aps",
1628 types::F64X2 => "apd",
1629 _ => "dqa",
1630 };
1631 let cc = cc.invert();
1632 format!(
1633 "mov{suffix} {alternative}, {dst}; \
1634 j{cc} $next; \
1635 mov{suffix} {consequent}, {dst}; \
1636 $next:"
1637 )
1638 }
1639
1640 Inst::Push64 { src } => {
1641 let src = src.pretty_print(8);
1642 let op = ljustify("pushq".to_string());
1643 format!("{op} {src}")
1644 }
1645
1646 Inst::StackProbeLoop {
1647 tmp,
1648 frame_size,
1649 guard_size,
1650 } => {
1651 let tmp = pretty_print_reg(tmp.to_reg(), 8);
1652 let op = ljustify("stack_probe_loop".to_string());
1653 format!("{op} {tmp}, frame_size={frame_size}, guard_size={guard_size}")
1654 }
1655
1656 Inst::Pop64 { dst } => {
1657 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1658 let op = ljustify("popq".to_string());
1659 format!("{op} {dst}")
1660 }
1661
1662 Inst::CallKnown { dest, .. } => {
1663 let op = ljustify("call".to_string());
1664 format!("{op} {dest:?}")
1665 }
1666
1667 Inst::CallUnknown { dest, .. } => {
1668 let dest = dest.pretty_print(8);
1669 let op = ljustify("call".to_string());
1670 format!("{op} *{dest}")
1671 }
1672
1673 Inst::ReturnCallKnown { callee, info } => {
1674 let ReturnCallInfo {
1675 uses,
1676 new_stack_arg_size,
1677 tmp,
1678 } = &**info;
1679 let tmp = pretty_print_reg(tmp.to_reg().to_reg(), 8);
1680 let mut s =
1681 format!("return_call_known {callee:?} ({new_stack_arg_size}) tmp={tmp}");
1682 for ret in uses {
1683 let preg = regs::show_reg(ret.preg);
1684 let vreg = pretty_print_reg(ret.vreg, 8);
1685 write!(&mut s, " {vreg}={preg}").unwrap();
1686 }
1687 s
1688 }
1689
1690 Inst::ReturnCallUnknown { callee, info } => {
1691 let ReturnCallInfo {
1692 uses,
1693 new_stack_arg_size,
1694 tmp,
1695 } = &**info;
1696 let callee = pretty_print_reg(*callee, 8);
1697 let tmp = pretty_print_reg(tmp.to_reg().to_reg(), 8);
1698 let mut s =
1699 format!("return_call_unknown {callee} ({new_stack_arg_size}) tmp={tmp}");
1700 for ret in uses {
1701 let preg = regs::show_reg(ret.preg);
1702 let vreg = pretty_print_reg(ret.vreg, 8);
1703 write!(&mut s, " {vreg}={preg}").unwrap();
1704 }
1705 s
1706 }
1707
1708 Inst::Args { args } => {
1709 let mut s = "args".to_string();
1710 for arg in args {
1711 let preg = regs::show_reg(arg.preg);
1712 let def = pretty_print_reg(arg.vreg.to_reg(), 8);
1713 write!(&mut s, " {def}={preg}").unwrap();
1714 }
1715 s
1716 }
1717
1718 Inst::Rets { rets } => {
1719 let mut s = "rets".to_string();
1720 for ret in rets {
1721 let preg = regs::show_reg(ret.preg);
1722 let vreg = pretty_print_reg(ret.vreg, 8);
1723 write!(&mut s, " {vreg}={preg}").unwrap();
1724 }
1725 s
1726 }
1727
1728 Inst::Ret { stack_bytes_to_pop } => {
1729 let mut s = "ret".to_string();
1730 if *stack_bytes_to_pop != 0 {
1731 write!(&mut s, " {stack_bytes_to_pop}").unwrap();
1732 }
1733 s
1734 }
1735
1736 Inst::JmpKnown { dst } => {
1737 let op = ljustify("jmp".to_string());
1738 let dst = dst.to_string();
1739 format!("{op} {dst}")
1740 }
1741
1742 Inst::JmpIf { cc, taken } => {
1743 let taken = taken.to_string();
1744 let op = ljustify2("j".to_string(), cc.to_string());
1745 format!("{op} {taken}")
1746 }
1747
1748 Inst::JmpCond {
1749 cc,
1750 taken,
1751 not_taken,
1752 } => {
1753 let taken = taken.to_string();
1754 let not_taken = not_taken.to_string();
1755 let op = ljustify2("j".to_string(), cc.to_string());
1756 format!("{op} {taken}; j {not_taken}")
1757 }
1758
1759 Inst::JmpTableSeq {
1760 idx, tmp1, tmp2, ..
1761 } => {
1762 let idx = pretty_print_reg(*idx, 8);
1763 let tmp1 = pretty_print_reg(tmp1.to_reg(), 8);
1764 let tmp2 = pretty_print_reg(tmp2.to_reg(), 8);
1765 let op = ljustify("br_table".into());
1766 format!("{op} {idx}, {tmp1}, {tmp2}")
1767 }
1768
1769 Inst::JmpUnknown { target } => {
1770 let target = target.pretty_print(8);
1771 let op = ljustify("jmp".to_string());
1772 format!("{op} *{target}")
1773 }
1774
1775 Inst::TrapIf { cc, trap_code, .. } => {
1776 format!("j{cc} #trap={trap_code}")
1777 }
1778
1779 Inst::TrapIfAnd {
1780 cc1,
1781 cc2,
1782 trap_code,
1783 ..
1784 } => {
1785 let cc1 = cc1.invert();
1786 let cc2 = cc2.invert();
1787 format!("trap_if_and {cc1}, {cc2}, {trap_code}")
1788 }
1789
1790 Inst::TrapIfOr {
1791 cc1,
1792 cc2,
1793 trap_code,
1794 ..
1795 } => {
1796 let cc2 = cc2.invert();
1797 format!("trap_if_or {cc1}, {cc2}, {trap_code}")
1798 }
1799
1800 Inst::LoadExtName {
1801 dst, name, offset, ..
1802 } => {
1803 let dst = pretty_print_reg(dst.to_reg(), 8);
1804 let name = name.display(None);
1805 let op = ljustify("load_ext_name".into());
1806 format!("{op} {name}+{offset}, {dst}")
1807 }
1808
1809 Inst::LockCmpxchg {
1810 ty,
1811 replacement,
1812 expected,
1813 mem,
1814 dst_old,
1815 ..
1816 } => {
1817 let size = ty.bytes() as u8;
1818 let replacement = pretty_print_reg(*replacement, size);
1819 let expected = pretty_print_reg(*expected, size);
1820 let dst_old = pretty_print_reg(dst_old.to_reg(), size);
1821 let mem = mem.pretty_print(size);
1822 let suffix = suffix_bwlq(OperandSize::from_bytes(size as u32));
1823 format!(
1824 "lock cmpxchg{suffix} {replacement}, {mem}, expected={expected}, dst_old={dst_old}"
1825 )
1826 }
1827
1828 Inst::AtomicRmwSeq { ty, op, .. } => {
1829 let ty = ty.bits();
1830 format!(
1831 "atomically {{ {ty}_bits_at_[%r9]) {op:?}= %r10; %rax = old_value_at_[%r9]; %r11, %rflags = trash }}"
1832 )
1833 }
1834
1835 Inst::Fence { kind } => match kind {
1836 FenceKind::MFence => "mfence".to_string(),
1837 FenceKind::LFence => "lfence".to_string(),
1838 FenceKind::SFence => "sfence".to_string(),
1839 },
1840
1841 Inst::Hlt => "hlt".into(),
1842
1843 Inst::Ud2 { trap_code } => format!("ud2 {trap_code}"),
1844
1845 Inst::ElfTlsGetAddr { ref symbol, dst } => {
1846 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1847 format!("{dst} = elf_tls_get_addr {symbol:?}")
1848 }
1849
1850 Inst::MachOTlsGetAddr { ref symbol, dst } => {
1851 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1852 format!("{dst} = macho_tls_get_addr {symbol:?}")
1853 }
1854
1855 Inst::CoffTlsGetAddr {
1856 ref symbol,
1857 dst,
1858 tmp,
1859 } => {
1860 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1861 let tmp = tmp.to_reg().to_reg();
1862
1863 let mut s = format!("{dst} = coff_tls_get_addr {symbol:?}");
1864 if tmp.is_virtual() {
1865 let tmp = show_ireg_sized(tmp, 8);
1866 write!(&mut s, ", {tmp}").unwrap();
1867 };
1868
1869 s
1870 }
1871
1872 Inst::Unwind { inst } => format!("unwind {inst:?}"),
1873
1874 Inst::DummyUse { reg } => {
1875 let reg = pretty_print_reg(*reg, 8);
1876 format!("dummy_use {reg}")
1877 }
1878 }
1879 }
1880}
1881
1882impl fmt::Debug for Inst {
1883 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
1884 write!(fmt, "{}", self.pretty_print_inst(&mut Default::default()))
1885 }
1886}
1887
1888fn x64_get_operands(inst: &mut Inst, collector: &mut impl OperandVisitor) {
1889 match inst {
1899 Inst::AluRmiR {
1900 src1, src2, dst, ..
1901 } => {
1902 collector.reg_use(src1);
1903 collector.reg_reuse_def(dst, 0);
1904 src2.get_operands(collector);
1905 }
1906 Inst::AluConstOp { dst, .. } => collector.reg_def(dst),
1907 Inst::AluRM { src1_dst, src2, .. } => {
1908 collector.reg_use(src2);
1909 src1_dst.get_operands(collector);
1910 }
1911 Inst::AluRmRVex {
1912 src1, src2, dst, ..
1913 } => {
1914 collector.reg_def(dst);
1915 collector.reg_use(src1);
1916 src2.get_operands(collector);
1917 }
1918 Inst::Not { src, dst, .. } => {
1919 collector.reg_use(src);
1920 collector.reg_reuse_def(dst, 0);
1921 }
1922 Inst::Neg { src, dst, .. } => {
1923 collector.reg_use(src);
1924 collector.reg_reuse_def(dst, 0);
1925 }
1926 Inst::Div {
1927 divisor,
1928 dividend_lo,
1929 dividend_hi,
1930 dst_quotient,
1931 dst_remainder,
1932 ..
1933 } => {
1934 divisor.get_operands(collector);
1935 collector.reg_fixed_use(dividend_lo, regs::rax());
1936 collector.reg_fixed_use(dividend_hi, regs::rdx());
1937 collector.reg_fixed_def(dst_quotient, regs::rax());
1938 collector.reg_fixed_def(dst_remainder, regs::rdx());
1939 }
1940 Inst::CheckedSRemSeq {
1941 divisor,
1942 dividend_lo,
1943 dividend_hi,
1944 dst_quotient,
1945 dst_remainder,
1946 ..
1947 } => {
1948 collector.reg_use(divisor);
1949 collector.reg_fixed_use(dividend_lo, regs::rax());
1950 collector.reg_fixed_use(dividend_hi, regs::rdx());
1951 collector.reg_fixed_def(dst_quotient, regs::rax());
1952 collector.reg_fixed_def(dst_remainder, regs::rdx());
1953 }
1954 Inst::Div8 {
1955 divisor,
1956 dividend,
1957 dst,
1958 ..
1959 } => {
1960 divisor.get_operands(collector);
1961 collector.reg_fixed_use(dividend, regs::rax());
1962 collector.reg_fixed_def(dst, regs::rax());
1963 }
1964 Inst::CheckedSRemSeq8 {
1965 divisor,
1966 dividend,
1967 dst,
1968 ..
1969 } => {
1970 collector.reg_use(divisor);
1971 collector.reg_fixed_use(dividend, regs::rax());
1972 collector.reg_fixed_def(dst, regs::rax());
1973 }
1974 Inst::Mul {
1975 src1,
1976 src2,
1977 dst_lo,
1978 dst_hi,
1979 ..
1980 } => {
1981 collector.reg_fixed_use(src1, regs::rax());
1982 collector.reg_fixed_def(dst_lo, regs::rax());
1983 collector.reg_fixed_def(dst_hi, regs::rdx());
1984 src2.get_operands(collector);
1985 }
1986 Inst::Mul8 {
1987 src1, src2, dst, ..
1988 } => {
1989 collector.reg_fixed_use(src1, regs::rax());
1990 collector.reg_fixed_def(dst, regs::rax());
1991 src2.get_operands(collector);
1992 }
1993 Inst::IMul {
1994 src1, src2, dst, ..
1995 } => {
1996 collector.reg_use(src1);
1997 collector.reg_reuse_def(dst, 0);
1998 src2.get_operands(collector);
1999 }
2000 Inst::IMulImm { src1, dst, .. } => {
2001 collector.reg_def(dst);
2002 src1.get_operands(collector);
2003 }
2004 Inst::SignExtendData { size, src, dst } => {
2005 match size {
2006 OperandSize::Size8 => {
2007 collector.reg_fixed_use(src, regs::rax());
2010 collector.reg_fixed_def(dst, regs::rax());
2011 }
2012 _ => {
2013 collector.reg_fixed_use(src, regs::rax());
2016 collector.reg_fixed_def(dst, regs::rdx());
2017 }
2018 }
2019 }
2020 Inst::UnaryRmR { src, dst, .. }
2021 | Inst::UnaryRmRVex { src, dst, .. }
2022 | Inst::UnaryRmRImmVex { src, dst, .. } => {
2023 collector.reg_def(dst);
2024 src.get_operands(collector);
2025 }
2026 Inst::XmmUnaryRmR { src, dst, .. } | Inst::XmmUnaryRmRImm { src, dst, .. } => {
2027 collector.reg_def(dst);
2028 src.get_operands(collector);
2029 }
2030 Inst::XmmUnaryRmREvex { src, dst, .. }
2031 | Inst::XmmUnaryRmRImmEvex { src, dst, .. }
2032 | Inst::XmmUnaryRmRUnaligned { src, dst, .. }
2033 | Inst::XmmUnaryRmRVex { src, dst, .. }
2034 | Inst::XmmUnaryRmRImmVex { src, dst, .. } => {
2035 collector.reg_def(dst);
2036 src.get_operands(collector);
2037 }
2038 Inst::XmmRmR {
2039 src1, src2, dst, ..
2040 } => {
2041 collector.reg_use(src1);
2042 collector.reg_reuse_def(dst, 0);
2043 src2.get_operands(collector);
2044 }
2045 Inst::XmmRmRUnaligned {
2046 src1, src2, dst, ..
2047 } => {
2048 collector.reg_use(src1);
2049 collector.reg_reuse_def(dst, 0);
2050 src2.get_operands(collector);
2051 }
2052 Inst::XmmRmRBlend {
2053 src1,
2054 src2,
2055 mask,
2056 dst,
2057 op,
2058 } => {
2059 assert!(matches!(
2060 op,
2061 SseOpcode::Blendvpd | SseOpcode::Blendvps | SseOpcode::Pblendvb
2062 ));
2063 collector.reg_use(src1);
2064 collector.reg_fixed_use(mask, regs::xmm0());
2065 collector.reg_reuse_def(dst, 0);
2066 src2.get_operands(collector);
2067 }
2068 Inst::XmmRmiRVex {
2069 src1, src2, dst, ..
2070 } => {
2071 collector.reg_def(dst);
2072 collector.reg_use(src1);
2073 src2.get_operands(collector);
2074 }
2075 Inst::XmmRmRImmVex {
2076 src1, src2, dst, ..
2077 } => {
2078 collector.reg_def(dst);
2079 collector.reg_use(src1);
2080 src2.get_operands(collector);
2081 }
2082 Inst::XmmVexPinsr {
2083 src1, src2, dst, ..
2084 } => {
2085 collector.reg_def(dst);
2086 collector.reg_use(src1);
2087 src2.get_operands(collector);
2088 }
2089 Inst::XmmRmRVex3 {
2090 src1,
2091 src2,
2092 src3,
2093 dst,
2094 ..
2095 } => {
2096 collector.reg_use(src1);
2097 collector.reg_reuse_def(dst, 0);
2098 collector.reg_use(src2);
2099 src3.get_operands(collector);
2100 }
2101 Inst::XmmRmRBlendVex {
2102 src1,
2103 src2,
2104 mask,
2105 dst,
2106 ..
2107 } => {
2108 collector.reg_def(dst);
2109 collector.reg_use(src1);
2110 src2.get_operands(collector);
2111 collector.reg_use(mask);
2112 }
2113 Inst::XmmRmREvex {
2114 op,
2115 src1,
2116 src2,
2117 dst,
2118 ..
2119 } => {
2120 assert_ne!(*op, Avx512Opcode::Vpermi2b);
2121 collector.reg_use(src1);
2122 src2.get_operands(collector);
2123 collector.reg_def(dst);
2124 }
2125 Inst::XmmRmREvex3 {
2126 op,
2127 src1,
2128 src2,
2129 src3,
2130 dst,
2131 ..
2132 } => {
2133 assert_eq!(*op, Avx512Opcode::Vpermi2b);
2134 collector.reg_use(src1);
2135 collector.reg_use(src2);
2136 src3.get_operands(collector);
2137 collector.reg_reuse_def(dst, 0); }
2139 Inst::XmmRmRImm {
2140 src1, src2, dst, ..
2141 } => {
2142 collector.reg_use(src1);
2143 collector.reg_reuse_def(dst, 0);
2144 src2.get_operands(collector);
2145 }
2146 Inst::XmmUninitializedValue { dst } => collector.reg_def(dst),
2147 Inst::XmmMinMaxSeq { lhs, rhs, dst, .. } => {
2148 collector.reg_use(rhs);
2149 collector.reg_use(lhs);
2150 collector.reg_reuse_def(dst, 0); }
2152 Inst::XmmRmiReg {
2153 src1, src2, dst, ..
2154 } => {
2155 collector.reg_use(src1);
2156 collector.reg_reuse_def(dst, 0); src2.get_operands(collector);
2158 }
2159 Inst::XmmMovRM { src, dst, .. }
2160 | Inst::XmmMovRMVex { src, dst, .. }
2161 | Inst::XmmMovRMImm { src, dst, .. }
2162 | Inst::XmmMovRMImmVex { src, dst, .. } => {
2163 collector.reg_use(src);
2164 dst.get_operands(collector);
2165 }
2166 Inst::XmmCmpRmR { src1, src2, .. } => {
2167 collector.reg_use(src1);
2168 src2.get_operands(collector);
2169 }
2170 Inst::XmmCmpRmRVex { src1, src2, .. } => {
2171 collector.reg_use(src1);
2172 src2.get_operands(collector);
2173 }
2174 Inst::Imm { dst, .. } => {
2175 collector.reg_def(dst);
2176 }
2177 Inst::MovRR { src, dst, .. } => {
2178 collector.reg_use(src);
2179 collector.reg_def(dst);
2180 }
2181 Inst::MovFromPReg { dst, src } => {
2182 debug_assert!(dst.to_reg().to_reg().is_virtual());
2183 collector.reg_fixed_nonallocatable(*src);
2184 collector.reg_def(dst);
2185 }
2186 Inst::MovToPReg { dst, src } => {
2187 debug_assert!(src.to_reg().is_virtual());
2188 collector.reg_use(src);
2189 collector.reg_fixed_nonallocatable(*dst);
2190 }
2191 Inst::XmmToGpr { src, dst, .. }
2192 | Inst::XmmToGprVex { src, dst, .. }
2193 | Inst::XmmToGprImm { src, dst, .. }
2194 | Inst::XmmToGprImmVex { src, dst, .. } => {
2195 collector.reg_use(src);
2196 collector.reg_def(dst);
2197 }
2198 Inst::GprToXmm { src, dst, .. } | Inst::GprToXmmVex { src, dst, .. } => {
2199 collector.reg_def(dst);
2200 src.get_operands(collector);
2201 }
2202 Inst::CvtIntToFloat {
2203 src1, src2, dst, ..
2204 } => {
2205 collector.reg_use(src1);
2206 collector.reg_reuse_def(dst, 0);
2207 src2.get_operands(collector);
2208 }
2209 Inst::CvtIntToFloatVex {
2210 src1, src2, dst, ..
2211 } => {
2212 collector.reg_def(dst);
2213 collector.reg_use(src1);
2214 src2.get_operands(collector);
2215 }
2216 Inst::CvtUint64ToFloatSeq {
2217 src,
2218 dst,
2219 tmp_gpr1,
2220 tmp_gpr2,
2221 ..
2222 } => {
2223 collector.reg_use(src);
2224 collector.reg_early_def(dst);
2225 collector.reg_early_def(tmp_gpr1);
2226 collector.reg_early_def(tmp_gpr2);
2227 }
2228 Inst::CvtFloatToSintSeq {
2229 src,
2230 dst,
2231 tmp_xmm,
2232 tmp_gpr,
2233 ..
2234 } => {
2235 collector.reg_use(src);
2236 collector.reg_early_def(dst);
2237 collector.reg_early_def(tmp_gpr);
2238 collector.reg_early_def(tmp_xmm);
2239 }
2240 Inst::CvtFloatToUintSeq {
2241 src,
2242 dst,
2243 tmp_gpr,
2244 tmp_xmm,
2245 tmp_xmm2,
2246 ..
2247 } => {
2248 collector.reg_use(src);
2249 collector.reg_early_def(dst);
2250 collector.reg_early_def(tmp_gpr);
2251 collector.reg_early_def(tmp_xmm);
2252 collector.reg_early_def(tmp_xmm2);
2253 }
2254
2255 Inst::MovImmM { dst, .. } => {
2256 dst.get_operands(collector);
2257 }
2258
2259 Inst::MovzxRmR { src, dst, .. } => {
2260 collector.reg_def(dst);
2261 src.get_operands(collector);
2262 }
2263 Inst::Mov64MR { src, dst, .. } => {
2264 collector.reg_def(dst);
2265 src.get_operands(collector);
2266 }
2267 Inst::LoadEffectiveAddress { addr: src, dst, .. } => {
2268 collector.reg_def(dst);
2269 src.get_operands(collector);
2270 }
2271 Inst::MovsxRmR { src, dst, .. } => {
2272 collector.reg_def(dst);
2273 src.get_operands(collector);
2274 }
2275 Inst::MovRM { src, dst, .. } => {
2276 collector.reg_use(src);
2277 dst.get_operands(collector);
2278 }
2279 Inst::ShiftR {
2280 num_bits, src, dst, ..
2281 } => {
2282 collector.reg_use(src);
2283 collector.reg_reuse_def(dst, 0);
2284 if let Imm8Reg::Reg { reg } = num_bits.as_imm8_reg_mut() {
2285 collector.reg_fixed_use(reg, regs::rcx());
2286 }
2287 }
2288 Inst::CmpRmiR { src1, src2, .. } => {
2289 collector.reg_use(src1);
2290 src2.get_operands(collector);
2291 }
2292 Inst::Setcc { dst, .. } => {
2293 collector.reg_def(dst);
2294 }
2295 Inst::Bswap { src, dst, .. } => {
2296 collector.reg_use(src);
2297 collector.reg_reuse_def(dst, 0);
2298 }
2299 Inst::Cmove {
2300 consequent,
2301 alternative,
2302 dst,
2303 ..
2304 } => {
2305 collector.reg_use(alternative);
2306 collector.reg_reuse_def(dst, 0);
2307 consequent.get_operands(collector);
2308 }
2309 Inst::XmmCmove {
2310 consequent,
2311 alternative,
2312 dst,
2313 ..
2314 } => {
2315 collector.reg_use(alternative);
2316 collector.reg_reuse_def(dst, 0);
2317 collector.reg_use(consequent);
2318 }
2319 Inst::Push64 { src } => {
2320 src.get_operands(collector);
2321 }
2322 Inst::Pop64 { dst } => {
2323 collector.reg_def(dst);
2324 }
2325 Inst::StackProbeLoop { tmp, .. } => {
2326 collector.reg_early_def(tmp);
2327 }
2328
2329 Inst::CallKnown { dest, info, .. } => {
2330 let CallInfo {
2335 uses,
2336 defs,
2337 clobbers,
2338 ..
2339 } = &mut **info.as_mut().expect("CallInfo is expected in this path");
2340 debug_assert_ne!(*dest, ExternalName::LibCall(LibCall::Probestack));
2341 for CallArgPair { vreg, preg } in uses {
2342 collector.reg_fixed_use(vreg, *preg);
2343 }
2344 for CallRetPair { vreg, preg } in defs {
2345 collector.reg_fixed_def(vreg, *preg);
2346 }
2347 collector.reg_clobbers(*clobbers);
2348 }
2349
2350 Inst::CallUnknown { info, dest, .. } => {
2351 let CallInfo {
2352 uses,
2353 defs,
2354 clobbers,
2355 callee_conv,
2356 ..
2357 } = &mut **info.as_mut().expect("CallInfo is expected in this path");
2358 match dest {
2359 RegMem::Reg { reg } if *callee_conv == CallConv::Winch => {
2360 collector.reg_fixed_use(reg, regs::r10())
2364 }
2365 _ => dest.get_operands(collector),
2366 }
2367 for CallArgPair { vreg, preg } in uses {
2368 collector.reg_fixed_use(vreg, *preg);
2369 }
2370 for CallRetPair { vreg, preg } in defs {
2371 collector.reg_fixed_def(vreg, *preg);
2372 }
2373 collector.reg_clobbers(*clobbers);
2374 }
2375
2376 Inst::ReturnCallKnown { callee, info } => {
2377 let ReturnCallInfo { uses, tmp, .. } = &mut **info;
2378 collector.reg_fixed_def(tmp, regs::r11());
2379 debug_assert_ne!(*callee, ExternalName::LibCall(LibCall::Probestack));
2381 for CallArgPair { vreg, preg } in uses {
2382 collector.reg_fixed_use(vreg, *preg);
2383 }
2384 }
2385
2386 Inst::ReturnCallUnknown { callee, info } => {
2387 let ReturnCallInfo { uses, tmp, .. } = &mut **info;
2388
2389 collector.reg_fixed_use(callee, regs::r10());
2395
2396 collector.reg_fixed_def(tmp, regs::r11());
2397 for CallArgPair { vreg, preg } in uses {
2398 collector.reg_fixed_use(vreg, *preg);
2399 }
2400 }
2401
2402 Inst::JmpTableSeq {
2403 idx, tmp1, tmp2, ..
2404 } => {
2405 collector.reg_use(idx);
2406 collector.reg_early_def(tmp1);
2407 collector.reg_def(tmp2);
2411 }
2412
2413 Inst::JmpUnknown { target } => {
2414 target.get_operands(collector);
2415 }
2416
2417 Inst::LoadExtName { dst, .. } => {
2418 collector.reg_def(dst);
2419 }
2420
2421 Inst::LockCmpxchg {
2422 replacement,
2423 expected,
2424 mem,
2425 dst_old,
2426 ..
2427 } => {
2428 collector.reg_use(replacement);
2429 collector.reg_fixed_use(expected, regs::rax());
2430 collector.reg_fixed_def(dst_old, regs::rax());
2431 mem.get_operands(collector);
2432 }
2433
2434 Inst::AtomicRmwSeq {
2435 operand,
2436 temp,
2437 dst_old,
2438 mem,
2439 ..
2440 } => {
2441 collector.reg_late_use(operand);
2442 collector.reg_early_def(temp);
2443 collector.reg_fixed_def(dst_old, regs::rax());
2446 mem.get_operands_late(collector)
2447 }
2448
2449 Inst::Args { args } => {
2450 for ArgPair { vreg, preg } in args {
2451 collector.reg_fixed_def(vreg, *preg);
2452 }
2453 }
2454
2455 Inst::Rets { rets } => {
2456 for RetPair { vreg, preg } in rets {
2459 collector.reg_fixed_use(vreg, *preg);
2460 }
2461 }
2462
2463 Inst::JmpKnown { .. }
2464 | Inst::JmpIf { .. }
2465 | Inst::JmpCond { .. }
2466 | Inst::Ret { .. }
2467 | Inst::Nop { .. }
2468 | Inst::TrapIf { .. }
2469 | Inst::TrapIfAnd { .. }
2470 | Inst::TrapIfOr { .. }
2471 | Inst::Hlt
2472 | Inst::Ud2 { .. }
2473 | Inst::Fence { .. } => {
2474 }
2476
2477 Inst::ElfTlsGetAddr { dst, .. } | Inst::MachOTlsGetAddr { dst, .. } => {
2478 collector.reg_fixed_def(dst, regs::rax());
2479 let mut clobbers = X64ABIMachineSpec::get_regs_clobbered_by_call(CallConv::SystemV);
2486 clobbers.remove(regs::gpr_preg(regs::ENC_RAX));
2487 collector.reg_clobbers(clobbers);
2488 }
2489
2490 Inst::CoffTlsGetAddr { dst, tmp, .. } => {
2491 collector.reg_fixed_def(dst, regs::rax());
2496
2497 collector.reg_fixed_def(tmp, regs::rcx());
2499 }
2500
2501 Inst::Unwind { .. } => {}
2502
2503 Inst::DummyUse { reg } => {
2504 collector.reg_use(reg);
2505 }
2506 }
2507}
2508
2509impl MachInst for Inst {
2513 type ABIMachineSpec = X64ABIMachineSpec;
2514
2515 fn get_operands(&mut self, collector: &mut impl OperandVisitor) {
2516 x64_get_operands(self, collector)
2517 }
2518
2519 fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
2520 match self {
2521 Self::MovRR { size, src, dst, .. } if *size == OperandSize::Size64 => {
2526 Some((dst.to_writable_reg(), src.to_reg()))
2527 }
2528 Self::XmmUnaryRmR { op, src, dst, .. }
2533 if *op == SseOpcode::Movss
2534 || *op == SseOpcode::Movsd
2535 || *op == SseOpcode::Movaps
2536 || *op == SseOpcode::Movapd
2537 || *op == SseOpcode::Movups
2538 || *op == SseOpcode::Movupd
2539 || *op == SseOpcode::Movdqa
2540 || *op == SseOpcode::Movdqu =>
2541 {
2542 if let RegMem::Reg { reg } = src.clone().to_reg_mem() {
2543 Some((dst.to_writable_reg(), reg))
2544 } else {
2545 None
2546 }
2547 }
2548 _ => None,
2549 }
2550 }
2551
2552 fn is_included_in_clobbers(&self) -> bool {
2553 match self {
2554 &Inst::Args { .. } => false,
2555 _ => true,
2556 }
2557 }
2558
2559 fn is_trap(&self) -> bool {
2560 match self {
2561 Self::Ud2 { .. } => true,
2562 _ => false,
2563 }
2564 }
2565
2566 fn is_args(&self) -> bool {
2567 match self {
2568 Self::Args { .. } => true,
2569 _ => false,
2570 }
2571 }
2572
2573 fn is_term(&self) -> MachTerminator {
2574 match self {
2575 &Self::Rets { .. } => MachTerminator::Ret,
2577 &Self::ReturnCallKnown { .. } | &Self::ReturnCallUnknown { .. } => {
2578 MachTerminator::RetCall
2579 }
2580 &Self::JmpKnown { .. } => MachTerminator::Uncond,
2581 &Self::JmpCond { .. } => MachTerminator::Cond,
2582 &Self::JmpTableSeq { .. } => MachTerminator::Indirect,
2583 _ => MachTerminator::None,
2585 }
2586 }
2587
2588 fn is_mem_access(&self) -> bool {
2589 panic!("TODO FILL ME OUT")
2590 }
2591
2592 fn gen_move(dst_reg: Writable<Reg>, src_reg: Reg, ty: Type) -> Inst {
2593 trace!(
2594 "Inst::gen_move {:?} -> {:?} (type: {:?})",
2595 src_reg,
2596 dst_reg.to_reg(),
2597 ty
2598 );
2599 let rc_dst = dst_reg.to_reg().class();
2600 let rc_src = src_reg.class();
2601 debug_assert!(rc_dst == rc_src);
2603 match rc_dst {
2604 RegClass::Int => Inst::mov_r_r(OperandSize::Size64, src_reg, dst_reg),
2605 RegClass::Float => {
2606 let opcode = match ty {
2611 types::F16 | types::F32 | types::F64 | types::F32X4 => SseOpcode::Movaps,
2612 types::F64X2 => SseOpcode::Movapd,
2613 _ if (ty.is_float() || ty.is_vector()) && ty.bits() == 128 => SseOpcode::Movdqa,
2614 _ => unimplemented!("unable to move type: {}", ty),
2615 };
2616 Inst::xmm_unary_rm_r(opcode, RegMem::reg(src_reg), dst_reg)
2617 }
2618 RegClass::Vector => unreachable!(),
2619 }
2620 }
2621
2622 fn gen_nop(preferred_size: usize) -> Inst {
2623 Inst::nop(std::cmp::min(preferred_size, 15) as u8)
2624 }
2625
2626 fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
2627 match ty {
2628 types::I8 => Ok((&[RegClass::Int], &[types::I8])),
2629 types::I16 => Ok((&[RegClass::Int], &[types::I16])),
2630 types::I32 => Ok((&[RegClass::Int], &[types::I32])),
2631 types::I64 => Ok((&[RegClass::Int], &[types::I64])),
2632 types::R32 => panic!("32-bit reftype pointer should never be seen on x86-64"),
2633 types::R64 => Ok((&[RegClass::Int], &[types::R64])),
2634 types::F16 => Ok((&[RegClass::Float], &[types::F16])),
2635 types::F32 => Ok((&[RegClass::Float], &[types::F32])),
2636 types::F64 => Ok((&[RegClass::Float], &[types::F64])),
2637 types::F128 => Ok((&[RegClass::Float], &[types::F128])),
2638 types::I128 => Ok((&[RegClass::Int, RegClass::Int], &[types::I64, types::I64])),
2639 _ if ty.is_vector() => {
2640 assert!(ty.bits() <= 128);
2641 Ok((&[RegClass::Float], &[types::I8X16]))
2642 }
2643 _ => Err(CodegenError::Unsupported(format!(
2644 "Unexpected SSA-value type: {}",
2645 ty
2646 ))),
2647 }
2648 }
2649
2650 fn canonical_type_for_rc(rc: RegClass) -> Type {
2651 match rc {
2652 RegClass::Float => types::I8X16,
2653 RegClass::Int => types::I64,
2654 RegClass::Vector => unreachable!(),
2655 }
2656 }
2657
2658 fn gen_jump(label: MachLabel) -> Inst {
2659 Inst::jmp_known(label)
2660 }
2661
2662 fn gen_imm_u64(value: u64, dst: Writable<Reg>) -> Option<Self> {
2663 Some(Inst::imm(OperandSize::Size64, value, dst))
2664 }
2665
2666 fn gen_imm_f64(value: f64, tmp: Writable<Reg>, dst: Writable<Reg>) -> SmallVec<[Self; 2]> {
2667 let imm_to_gpr = Inst::imm(OperandSize::Size64, value.to_bits(), tmp);
2668 let gpr_to_xmm = Self::gpr_to_xmm(
2669 SseOpcode::Movd,
2670 tmp.to_reg().into(),
2671 OperandSize::Size64,
2672 dst,
2673 );
2674 smallvec![imm_to_gpr, gpr_to_xmm]
2675 }
2676
2677 fn gen_dummy_use(reg: Reg) -> Self {
2678 Inst::DummyUse { reg }
2679 }
2680
2681 fn worst_case_size() -> CodeOffset {
2682 15
2683 }
2684
2685 fn ref_type_regclass(_: &settings::Flags) -> RegClass {
2686 RegClass::Int
2687 }
2688
2689 fn is_safepoint(&self) -> bool {
2690 match self {
2691 Inst::CallKnown { .. } | Inst::CallUnknown { .. } => true,
2692 _ => false,
2693 }
2694 }
2695
2696 fn function_alignment() -> FunctionAlignment {
2697 FunctionAlignment {
2698 minimum: 1,
2699 preferred: 32,
2702 }
2703 }
2704
2705 type LabelUse = LabelUse;
2706
2707 const TRAP_OPCODE: &'static [u8] = &[0x0f, 0x0b];
2708}
2709
2710pub struct EmitInfo {
2712 pub(super) flags: settings::Flags,
2713 isa_flags: x64_settings::Flags,
2714}
2715
2716impl EmitInfo {
2717 pub fn new(flags: settings::Flags, isa_flags: x64_settings::Flags) -> Self {
2719 Self { flags, isa_flags }
2720 }
2721}
2722
2723impl MachInstEmit for Inst {
2724 type State = EmitState;
2725 type Info = EmitInfo;
2726
2727 fn emit(&self, sink: &mut MachBuffer<Inst>, info: &Self::Info, state: &mut Self::State) {
2728 emit::emit(self, sink, info, state);
2729 }
2730
2731 fn pretty_print_inst(&self, _: &mut Self::State) -> String {
2732 PrettyPrint::pretty_print(self, 0)
2733 }
2734}
2735
2736#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2738pub enum LabelUse {
2739 JmpRel32,
2743
2744 PCRel32,
2747}
2748
2749impl MachInstLabelUse for LabelUse {
2750 const ALIGN: CodeOffset = 1;
2751
2752 fn max_pos_range(self) -> CodeOffset {
2753 match self {
2754 LabelUse::JmpRel32 | LabelUse::PCRel32 => 0x7fff_ffff,
2755 }
2756 }
2757
2758 fn max_neg_range(self) -> CodeOffset {
2759 match self {
2760 LabelUse::JmpRel32 | LabelUse::PCRel32 => 0x8000_0000,
2761 }
2762 }
2763
2764 fn patch_size(self) -> CodeOffset {
2765 match self {
2766 LabelUse::JmpRel32 | LabelUse::PCRel32 => 4,
2767 }
2768 }
2769
2770 fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
2771 let pc_rel = (label_offset as i64) - (use_offset as i64);
2772 debug_assert!(pc_rel <= self.max_pos_range() as i64);
2773 debug_assert!(pc_rel >= -(self.max_neg_range() as i64));
2774 let pc_rel = pc_rel as u32;
2775 match self {
2776 LabelUse::JmpRel32 => {
2777 let addend = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
2778 let value = pc_rel.wrapping_add(addend).wrapping_sub(4);
2779 buffer.copy_from_slice(&value.to_le_bytes()[..]);
2780 }
2781 LabelUse::PCRel32 => {
2782 let addend = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
2783 let value = pc_rel.wrapping_add(addend);
2784 buffer.copy_from_slice(&value.to_le_bytes()[..]);
2785 }
2786 }
2787 }
2788
2789 fn supports_veneer(self) -> bool {
2790 match self {
2791 LabelUse::JmpRel32 | LabelUse::PCRel32 => false,
2792 }
2793 }
2794
2795 fn veneer_size(self) -> CodeOffset {
2796 match self {
2797 LabelUse::JmpRel32 | LabelUse::PCRel32 => 0,
2798 }
2799 }
2800
2801 fn worst_case_veneer_size() -> CodeOffset {
2802 0
2803 }
2804
2805 fn generate_veneer(self, _: &mut [u8], _: CodeOffset) -> (CodeOffset, LabelUse) {
2806 match self {
2807 LabelUse::JmpRel32 | LabelUse::PCRel32 => {
2808 panic!("Veneer not supported for JumpRel32 label-use.");
2809 }
2810 }
2811 }
2812
2813 fn from_reloc(reloc: Reloc, addend: Addend) -> Option<Self> {
2814 match (reloc, addend) {
2815 (Reloc::X86CallPCRel4, -4) => Some(LabelUse::JmpRel32),
2816 _ => None,
2817 }
2818 }
2819}