1pub use emit_state::EmitState;
4
5use crate::binemit::{Addend, CodeOffset, Reloc};
6use crate::ir::{types, ExternalName, LibCall, TrapCode, Type};
7use crate::isa::x64::abi::X64ABIMachineSpec;
8use crate::isa::x64::inst::regs::{pretty_print_reg, show_ireg_sized};
9use crate::isa::x64::settings as x64_settings;
10use crate::isa::{CallConv, FunctionAlignment};
11use crate::{machinst::*, trace};
12use crate::{settings, CodegenError, CodegenResult};
13use alloc::boxed::Box;
14use smallvec::{smallvec, SmallVec};
15use std::fmt::{self, Write};
16use std::string::{String, ToString};
17
18pub mod args;
19mod emit;
20mod emit_state;
21#[cfg(test)]
22mod emit_tests;
23pub mod external;
24pub mod regs;
25mod stack_switch;
26pub mod unwind;
27
28use args::*;
29
30pub use super::lower::isle::generated_code::AtomicRmwSeqOp;
35pub use super::lower::isle::generated_code::MInst as Inst;
36
37#[derive(Clone, Debug)]
39pub struct ReturnCallInfo<T> {
40 pub dest: T,
42
43 pub new_stack_arg_size: u32,
46
47 pub uses: CallArgList,
49
50 pub tmp: WritableGpr,
52}
53
54#[test]
55#[cfg(target_pointer_width = "64")]
56fn inst_size_test() {
57 assert_eq!(56, std::mem::size_of::<Inst>());
60}
61
62pub(crate) fn low32_will_sign_extend_to_64(x: u64) -> bool {
63 let xs = x as i64;
64 xs == ((xs << 32) >> 32)
65}
66
67impl Inst {
68 fn available_in_any_isa(&self) -> SmallVec<[InstructionSet; 2]> {
73 match self {
74 Inst::AluRmiR { .. }
77 | Inst::AtomicRmwSeq { .. }
78 | Inst::Bswap { .. }
79 | Inst::CallKnown { .. }
80 | Inst::CallUnknown { .. }
81 | Inst::ReturnCallKnown { .. }
82 | Inst::ReturnCallUnknown { .. }
83 | Inst::CheckedSRemSeq { .. }
84 | Inst::CheckedSRemSeq8 { .. }
85 | Inst::Cmove { .. }
86 | Inst::CmpRmiR { .. }
87 | Inst::CvtFloatToSintSeq { .. }
88 | Inst::CvtFloatToUintSeq { .. }
89 | Inst::CvtUint64ToFloatSeq { .. }
90 | Inst::Div { .. }
91 | Inst::Div8 { .. }
92 | Inst::Fence { .. }
93 | Inst::Hlt
94 | Inst::Imm { .. }
95 | Inst::JmpCond { .. }
96 | Inst::JmpCondOr { .. }
97 | Inst::WinchJmpIf { .. }
98 | Inst::JmpKnown { .. }
99 | Inst::JmpTableSeq { .. }
100 | Inst::JmpUnknown { .. }
101 | Inst::LoadEffectiveAddress { .. }
102 | Inst::LoadExtName { .. }
103 | Inst::LockCmpxchg { .. }
104 | Inst::LockXadd { .. }
105 | Inst::Xchg { .. }
106 | Inst::Mov64MR { .. }
107 | Inst::MovImmM { .. }
108 | Inst::MovRM { .. }
109 | Inst::MovRR { .. }
110 | Inst::MovFromPReg { .. }
111 | Inst::MovToPReg { .. }
112 | Inst::MovsxRmR { .. }
113 | Inst::MovzxRmR { .. }
114 | Inst::Mul { .. }
115 | Inst::Mul8 { .. }
116 | Inst::IMul { .. }
117 | Inst::IMulImm { .. }
118 | Inst::Neg { .. }
119 | Inst::Not { .. }
120 | Inst::Nop { .. }
121 | Inst::Pop64 { .. }
122 | Inst::Push64 { .. }
123 | Inst::StackProbeLoop { .. }
124 | Inst::Args { .. }
125 | Inst::Rets { .. }
126 | Inst::Ret { .. }
127 | Inst::Setcc { .. }
128 | Inst::ShiftR { .. }
129 | Inst::SignExtendData { .. }
130 | Inst::StackSwitchBasic { .. }
131 | Inst::TrapIf { .. }
132 | Inst::TrapIfAnd { .. }
133 | Inst::TrapIfOr { .. }
134 | Inst::Ud2 { .. }
135 | Inst::XmmCmove { .. }
136 | Inst::XmmCmpRmR { .. }
137 | Inst::XmmMinMaxSeq { .. }
138 | Inst::XmmUninitializedValue { .. }
139 | Inst::ElfTlsGetAddr { .. }
140 | Inst::MachOTlsGetAddr { .. }
141 | Inst::CoffTlsGetAddr { .. }
142 | Inst::Unwind { .. }
143 | Inst::DummyUse { .. }
144 | Inst::AluConstOp { .. } => smallvec![],
145
146 Inst::LockCmpxchg16b { .. }
147 | Inst::Atomic128RmwSeq { .. }
148 | Inst::Atomic128XchgSeq { .. } => smallvec![InstructionSet::CMPXCHG16b],
149
150 Inst::AluRmRVex { op, .. } => op.available_from(),
151 Inst::UnaryRmR { op, .. } => op.available_from(),
152 Inst::UnaryRmRVex { op, .. } => op.available_from(),
153 Inst::UnaryRmRImmVex { op, .. } => op.available_from(),
154
155 Inst::GprToXmm { op, .. }
157 | Inst::XmmMovRM { op, .. }
158 | Inst::XmmMovRMImm { op, .. }
159 | Inst::XmmRmiReg { opcode: op, .. }
160 | Inst::XmmRmR { op, .. }
161 | Inst::XmmRmRUnaligned { op, .. }
162 | Inst::XmmRmRBlend { op, .. }
163 | Inst::XmmRmRImm { op, .. }
164 | Inst::XmmToGpr { op, .. }
165 | Inst::XmmToGprImm { op, .. }
166 | Inst::XmmUnaryRmRImm { op, .. }
167 | Inst::XmmUnaryRmRUnaligned { op, .. }
168 | Inst::XmmUnaryRmR { op, .. }
169 | Inst::CvtIntToFloat { op, .. } => smallvec![op.available_from()],
170
171 Inst::XmmUnaryRmREvex { op, .. }
172 | Inst::XmmRmREvex { op, .. }
173 | Inst::XmmRmREvex3 { op, .. }
174 | Inst::XmmUnaryRmRImmEvex { op, .. } => op.available_from(),
175
176 Inst::XmmRmiRVex { op, .. }
177 | Inst::XmmRmRVex3 { op, .. }
178 | Inst::XmmRmRImmVex { op, .. }
179 | Inst::XmmRmRBlendVex { op, .. }
180 | Inst::XmmVexPinsr { op, .. }
181 | Inst::XmmUnaryRmRVex { op, .. }
182 | Inst::XmmUnaryRmRImmVex { op, .. }
183 | Inst::XmmMovRMVex { op, .. }
184 | Inst::XmmMovRMImmVex { op, .. }
185 | Inst::XmmToGprImmVex { op, .. }
186 | Inst::XmmToGprVex { op, .. }
187 | Inst::GprToXmmVex { op, .. }
188 | Inst::CvtIntToFloatVex { op, .. }
189 | Inst::XmmCmpRmRVex { op, .. } => op.available_from(),
190
191 Inst::MulX { .. } => smallvec![InstructionSet::BMI2],
192
193 Inst::External { inst } => {
194 use cranelift_assembler_x64::Feature::*;
195 let mut features = smallvec![];
196 for f in inst.features() {
197 match f {
198 _64b | compat => {}
199 sse => features.push(InstructionSet::SSE),
200 }
201 }
202 features
203 }
204 }
205 }
206}
207
208impl Inst {
211 pub(crate) fn nop(len: u8) -> Self {
212 debug_assert!(len <= 15);
213 Self::Nop { len }
214 }
215
216 pub(crate) fn alu_rmi_r(
217 size: OperandSize,
218 op: AluRmiROpcode,
219 src: RegMemImm,
220 dst: Writable<Reg>,
221 ) -> Self {
222 src.assert_regclass_is(RegClass::Int);
223 debug_assert!(dst.to_reg().class() == RegClass::Int);
224 Self::AluRmiR {
225 size,
226 op,
227 src1: Gpr::unwrap_new(dst.to_reg()),
228 src2: GprMemImm::unwrap_new(src),
229 dst: WritableGpr::from_writable_reg(dst).unwrap(),
230 }
231 }
232
233 #[allow(dead_code)]
234 pub(crate) fn unary_rm_r(
235 size: OperandSize,
236 op: UnaryRmROpcode,
237 src: RegMem,
238 dst: Writable<Reg>,
239 ) -> Self {
240 src.assert_regclass_is(RegClass::Int);
241 debug_assert!(dst.to_reg().class() == RegClass::Int);
242 debug_assert!(size.is_one_of(&[
243 OperandSize::Size16,
244 OperandSize::Size32,
245 OperandSize::Size64
246 ]));
247 Self::UnaryRmR {
248 size,
249 op,
250 src: GprMem::unwrap_new(src),
251 dst: WritableGpr::from_writable_reg(dst).unwrap(),
252 }
253 }
254
255 pub(crate) fn not(size: OperandSize, src: Writable<Reg>) -> Inst {
256 debug_assert_eq!(src.to_reg().class(), RegClass::Int);
257 Inst::Not {
258 size,
259 src: Gpr::unwrap_new(src.to_reg()),
260 dst: WritableGpr::from_writable_reg(src).unwrap(),
261 }
262 }
263
264 pub(crate) fn div(
265 size: OperandSize,
266 sign: DivSignedness,
267 trap: TrapCode,
268 divisor: RegMem,
269 dividend_lo: Gpr,
270 dividend_hi: Gpr,
271 dst_quotient: WritableGpr,
272 dst_remainder: WritableGpr,
273 ) -> Inst {
274 divisor.assert_regclass_is(RegClass::Int);
275 Inst::Div {
276 size,
277 sign,
278 trap,
279 divisor: GprMem::unwrap_new(divisor),
280 dividend_lo,
281 dividend_hi,
282 dst_quotient,
283 dst_remainder,
284 }
285 }
286
287 pub(crate) fn div8(
288 sign: DivSignedness,
289 trap: TrapCode,
290 divisor: RegMem,
291 dividend: Gpr,
292 dst: WritableGpr,
293 ) -> Inst {
294 divisor.assert_regclass_is(RegClass::Int);
295 Inst::Div8 {
296 sign,
297 trap,
298 divisor: GprMem::unwrap_new(divisor),
299 dividend,
300 dst,
301 }
302 }
303
304 pub(crate) fn imm(dst_size: OperandSize, simm64: u64, dst: Writable<Reg>) -> Inst {
305 debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
306 debug_assert!(dst.to_reg().class() == RegClass::Int);
307 let dst_size = match dst_size {
310 OperandSize::Size64 if simm64 > u32::max_value() as u64 => OperandSize::Size64,
311 _ => OperandSize::Size32,
312 };
313 Inst::Imm {
314 dst_size,
315 simm64,
316 dst: WritableGpr::from_writable_reg(dst).unwrap(),
317 }
318 }
319
320 pub(crate) fn mov_r_r(size: OperandSize, src: Reg, dst: Writable<Reg>) -> Inst {
321 debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
322 debug_assert!(src.class() == RegClass::Int);
323 debug_assert!(dst.to_reg().class() == RegClass::Int);
324 let src = Gpr::unwrap_new(src);
325 let dst = WritableGpr::from_writable_reg(dst).unwrap();
326 Inst::MovRR { size, src, dst }
327 }
328
329 pub(crate) fn xmm_unary_rm_r(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Inst {
331 src.assert_regclass_is(RegClass::Float);
332 debug_assert!(dst.to_reg().class() == RegClass::Float);
333 Inst::XmmUnaryRmR {
334 op,
335 src: XmmMemAligned::unwrap_new(src),
336 dst: WritableXmm::from_writable_reg(dst).unwrap(),
337 }
338 }
339
340 pub(crate) fn xmm_rm_r(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Self {
341 src.assert_regclass_is(RegClass::Float);
342 debug_assert!(dst.to_reg().class() == RegClass::Float);
343 Inst::XmmRmR {
344 op,
345 src1: Xmm::unwrap_new(dst.to_reg()),
346 src2: XmmMemAligned::unwrap_new(src),
347 dst: WritableXmm::from_writable_reg(dst).unwrap(),
348 }
349 }
350
351 #[cfg(test)]
352 pub(crate) fn xmm_rmr_vex3(op: AvxOpcode, src3: RegMem, src2: Reg, dst: Writable<Reg>) -> Self {
353 src3.assert_regclass_is(RegClass::Float);
354 debug_assert!(src2.class() == RegClass::Float);
355 debug_assert!(dst.to_reg().class() == RegClass::Float);
356 Inst::XmmRmRVex3 {
357 op,
358 src3: XmmMem::unwrap_new(src3),
359 src2: Xmm::unwrap_new(src2),
360 src1: Xmm::unwrap_new(dst.to_reg()),
361 dst: WritableXmm::from_writable_reg(dst).unwrap(),
362 }
363 }
364
365 pub(crate) fn xmm_mov_r_m(op: SseOpcode, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst {
366 debug_assert!(src.class() == RegClass::Float);
367 Inst::XmmMovRM {
368 op,
369 src: Xmm::unwrap_new(src),
370 dst: dst.into(),
371 }
372 }
373
374 pub(crate) fn xmm_to_gpr(
375 op: SseOpcode,
376 src: Reg,
377 dst: Writable<Reg>,
378 dst_size: OperandSize,
379 ) -> Inst {
380 debug_assert!(src.class() == RegClass::Float);
381 debug_assert!(dst.to_reg().class() == RegClass::Int);
382 debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
383 Inst::XmmToGpr {
384 op,
385 src: Xmm::unwrap_new(src),
386 dst: WritableGpr::from_writable_reg(dst).unwrap(),
387 dst_size,
388 }
389 }
390
391 pub(crate) fn gpr_to_xmm(
392 op: SseOpcode,
393 src: RegMem,
394 src_size: OperandSize,
395 dst: Writable<Reg>,
396 ) -> Inst {
397 src.assert_regclass_is(RegClass::Int);
398 debug_assert!(src_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
399 debug_assert!(dst.to_reg().class() == RegClass::Float);
400 Inst::GprToXmm {
401 op,
402 src: GprMem::unwrap_new(src),
403 dst: WritableXmm::from_writable_reg(dst).unwrap(),
404 src_size,
405 }
406 }
407
408 pub(crate) fn xmm_cmp_rm_r(op: SseOpcode, src1: Reg, src2: RegMem) -> Inst {
409 src2.assert_regclass_is(RegClass::Float);
410 debug_assert!(src1.class() == RegClass::Float);
411 let src2 = XmmMemAligned::unwrap_new(src2);
412 let src1 = Xmm::unwrap_new(src1);
413 Inst::XmmCmpRmR { op, src1, src2 }
414 }
415
416 #[allow(dead_code)]
417 pub(crate) fn xmm_min_max_seq(
418 size: OperandSize,
419 is_min: bool,
420 lhs: Reg,
421 rhs: Reg,
422 dst: Writable<Reg>,
423 ) -> Inst {
424 debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
425 debug_assert_eq!(lhs.class(), RegClass::Float);
426 debug_assert_eq!(rhs.class(), RegClass::Float);
427 debug_assert_eq!(dst.to_reg().class(), RegClass::Float);
428 Inst::XmmMinMaxSeq {
429 size,
430 is_min,
431 lhs: Xmm::unwrap_new(lhs),
432 rhs: Xmm::unwrap_new(rhs),
433 dst: WritableXmm::from_writable_reg(dst).unwrap(),
434 }
435 }
436
437 pub(crate) fn movzx_rm_r(ext_mode: ExtMode, src: RegMem, dst: Writable<Reg>) -> Inst {
438 src.assert_regclass_is(RegClass::Int);
439 debug_assert!(dst.to_reg().class() == RegClass::Int);
440 let src = GprMem::unwrap_new(src);
441 let dst = WritableGpr::from_writable_reg(dst).unwrap();
442 Inst::MovzxRmR { ext_mode, src, dst }
443 }
444
445 pub(crate) fn movsx_rm_r(ext_mode: ExtMode, src: RegMem, dst: Writable<Reg>) -> Inst {
446 src.assert_regclass_is(RegClass::Int);
447 debug_assert!(dst.to_reg().class() == RegClass::Int);
448 let src = GprMem::unwrap_new(src);
449 let dst = WritableGpr::from_writable_reg(dst).unwrap();
450 Inst::MovsxRmR { ext_mode, src, dst }
451 }
452
453 pub(crate) fn mov64_m_r(src: impl Into<SyntheticAmode>, dst: Writable<Reg>) -> Inst {
454 debug_assert!(dst.to_reg().class() == RegClass::Int);
455 Inst::Mov64MR {
456 src: src.into(),
457 dst: WritableGpr::from_writable_reg(dst).unwrap(),
458 }
459 }
460
461 pub(crate) fn mov_r_m(size: OperandSize, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst {
462 debug_assert!(src.class() == RegClass::Int);
463 Inst::MovRM {
464 size,
465 src: Gpr::unwrap_new(src),
466 dst: dst.into(),
467 }
468 }
469
470 pub(crate) fn lea(addr: impl Into<SyntheticAmode>, dst: Writable<Reg>) -> Inst {
471 debug_assert!(dst.to_reg().class() == RegClass::Int);
472 Inst::LoadEffectiveAddress {
473 addr: addr.into(),
474 dst: WritableGpr::from_writable_reg(dst).unwrap(),
475 size: OperandSize::Size64,
476 }
477 }
478
479 pub(crate) fn shift_r(
480 size: OperandSize,
481 kind: ShiftKind,
482 num_bits: Imm8Gpr,
483 src: Reg,
484 dst: Writable<Reg>,
485 ) -> Inst {
486 if let &Imm8Reg::Imm8 { imm: num_bits } = num_bits.as_imm8_reg() {
487 debug_assert!(num_bits < size.to_bits());
488 }
489 debug_assert!(dst.to_reg().class() == RegClass::Int);
490 Inst::ShiftR {
491 size,
492 kind,
493 src: Gpr::unwrap_new(src),
494 num_bits,
495 dst: WritableGpr::from_writable_reg(dst).unwrap(),
496 }
497 }
498
499 pub(crate) fn cmp_rmi_r(size: OperandSize, src1: Reg, src2: RegMemImm) -> Inst {
502 src2.assert_regclass_is(RegClass::Int);
503 debug_assert_eq!(src1.class(), RegClass::Int);
504 Inst::CmpRmiR {
505 size,
506 src1: Gpr::unwrap_new(src1),
507 src2: GprMemImm::unwrap_new(src2),
508 opcode: CmpOpcode::Cmp,
509 }
510 }
511
512 pub(crate) fn trap(trap_code: TrapCode) -> Inst {
513 Inst::Ud2 { trap_code }
514 }
515
516 pub(crate) fn trap_if(cc: CC, trap_code: TrapCode) -> Inst {
517 Inst::TrapIf { cc, trap_code }
518 }
519
520 pub(crate) fn cmove(size: OperandSize, cc: CC, src: RegMem, dst: Writable<Reg>) -> Inst {
521 debug_assert!(size.is_one_of(&[
522 OperandSize::Size16,
523 OperandSize::Size32,
524 OperandSize::Size64
525 ]));
526 debug_assert!(dst.to_reg().class() == RegClass::Int);
527 Inst::Cmove {
528 size,
529 cc,
530 consequent: GprMem::unwrap_new(src),
531 alternative: Gpr::unwrap_new(dst.to_reg()),
532 dst: WritableGpr::from_writable_reg(dst).unwrap(),
533 }
534 }
535
536 pub(crate) fn push64(src: RegMemImm) -> Inst {
537 src.assert_regclass_is(RegClass::Int);
538 let src = GprMemImm::unwrap_new(src);
539 Inst::Push64 { src }
540 }
541
542 pub(crate) fn pop64(dst: Writable<Reg>) -> Inst {
543 debug_assert!(dst.to_reg().class() == RegClass::Int);
544 let dst = WritableGpr::from_writable_reg(dst).unwrap();
545 Inst::Pop64 { dst }
546 }
547
548 pub(crate) fn call_known(info: Box<CallInfo<ExternalName>>) -> Inst {
549 Inst::CallKnown { info }
550 }
551
552 pub(crate) fn call_unknown(info: Box<CallInfo<RegMem>>) -> Inst {
553 info.dest.assert_regclass_is(RegClass::Int);
554 Inst::CallUnknown { info }
555 }
556
557 pub(crate) fn ret(stack_bytes_to_pop: u32) -> Inst {
558 Inst::Ret { stack_bytes_to_pop }
559 }
560
561 pub(crate) fn jmp_known(dst: MachLabel) -> Inst {
562 Inst::JmpKnown { dst }
563 }
564
565 pub(crate) fn jmp_unknown(target: RegMem) -> Inst {
566 target.assert_regclass_is(RegClass::Int);
567 Inst::JmpUnknown { target }
568 }
569
570 pub(crate) fn load(
574 ty: Type,
575 from_addr: impl Into<SyntheticAmode>,
576 to_reg: Writable<Reg>,
577 ext_kind: ExtKind,
578 ) -> Inst {
579 let rc = to_reg.to_reg().class();
580 match rc {
581 RegClass::Int => {
582 let ext_mode = match ty.bytes() {
583 1 => Some(ExtMode::BQ),
584 2 => Some(ExtMode::WQ),
585 4 => Some(ExtMode::LQ),
586 8 => None,
587 _ => unreachable!("the type should never use a scalar load: {}", ty),
588 };
589 if let Some(ext_mode) = ext_mode {
590 match ext_kind {
592 ExtKind::SignExtend => {
593 Inst::movsx_rm_r(ext_mode, RegMem::mem(from_addr), to_reg)
594 }
595 ExtKind::ZeroExtend => {
596 Inst::movzx_rm_r(ext_mode, RegMem::mem(from_addr), to_reg)
597 }
598 ExtKind::None => {
599 panic!("expected an extension kind for extension mode: {ext_mode:?}")
600 }
601 }
602 } else {
603 Inst::mov64_m_r(from_addr, to_reg)
605 }
606 }
607 RegClass::Float => {
608 let opcode = match ty {
609 types::F16 => panic!("loading a f16 requires multiple instructions"),
610 types::F32 => SseOpcode::Movss,
611 types::F64 => SseOpcode::Movsd,
612 types::F32X4 => SseOpcode::Movups,
613 types::F64X2 => SseOpcode::Movupd,
614 _ if (ty.is_float() || ty.is_vector()) && ty.bits() == 128 => SseOpcode::Movdqu,
615 _ => unimplemented!("unable to load type: {}", ty),
616 };
617 Inst::xmm_unary_rm_r(opcode, RegMem::mem(from_addr), to_reg)
618 }
619 RegClass::Vector => unreachable!(),
620 }
621 }
622
623 pub(crate) fn store(ty: Type, from_reg: Reg, to_addr: impl Into<SyntheticAmode>) -> Inst {
625 let rc = from_reg.class();
626 match rc {
627 RegClass::Int => Inst::mov_r_m(OperandSize::from_ty(ty), from_reg, to_addr),
628 RegClass::Float => {
629 let opcode = match ty {
630 types::F16 => panic!("storing a f16 requires multiple instructions"),
631 types::F32 => SseOpcode::Movss,
632 types::F64 => SseOpcode::Movsd,
633 types::F32X4 => SseOpcode::Movups,
634 types::F64X2 => SseOpcode::Movupd,
635 _ if (ty.is_float() || ty.is_vector()) && ty.bits() == 128 => SseOpcode::Movdqu,
636 _ => unimplemented!("unable to store type: {}", ty),
637 };
638 Inst::xmm_mov_r_m(opcode, from_reg, to_addr)
639 }
640 RegClass::Vector => unreachable!(),
641 }
642 }
643}
644
645impl PrettyPrint for Inst {
649 fn pretty_print(&self, _size: u8) -> String {
650 fn ljustify(s: String) -> String {
651 let w = 7;
652 if s.len() >= w {
653 s
654 } else {
655 let need = usize::min(w, w - s.len());
656 s + &format!("{nil: <width$}", nil = "", width = need)
657 }
658 }
659
660 fn ljustify2(s1: String, s2: String) -> String {
661 ljustify(s1 + &s2)
662 }
663
664 fn suffix_lq(size: OperandSize) -> String {
665 match size {
666 OperandSize::Size32 => "l",
667 OperandSize::Size64 => "q",
668 _ => unreachable!(),
669 }
670 .to_string()
671 }
672
673 #[allow(dead_code)]
674 fn suffix_lqb(size: OperandSize) -> String {
675 match size {
676 OperandSize::Size32 => "l",
677 OperandSize::Size64 => "q",
678 _ => unreachable!(),
679 }
680 .to_string()
681 }
682
683 fn suffix_bwlq(size: OperandSize) -> String {
684 match size {
685 OperandSize::Size8 => "b".to_string(),
686 OperandSize::Size16 => "w".to_string(),
687 OperandSize::Size32 => "l".to_string(),
688 OperandSize::Size64 => "q".to_string(),
689 }
690 }
691
692 match self {
693 Inst::Nop { len } => format!("{} len={}", ljustify("nop".to_string()), len),
694
695 Inst::AluRmiR {
696 size,
697 op,
698 src1,
699 src2,
700 dst,
701 } => {
702 let size_bytes = size.to_bytes();
703 let src1 = pretty_print_reg(src1.to_reg(), size_bytes);
704 let dst = pretty_print_reg(dst.to_reg().to_reg(), size_bytes);
705 let src2 = src2.pretty_print(size_bytes);
706 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
707 format!("{op} {src1}, {src2}, {dst}")
708 }
709 Inst::AluConstOp { op, dst, size } => {
710 let size_bytes = size.to_bytes();
711 let dst = pretty_print_reg(dst.to_reg().to_reg(), size_bytes);
712 let op = ljustify2(op.to_string(), suffix_lqb(*size));
713 format!("{op} {dst}, {dst}, {dst}")
714 }
715 Inst::AluRmRVex {
716 size,
717 op,
718 src1,
719 src2,
720 dst,
721 } => {
722 let size_bytes = size.to_bytes();
723 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
724 let src1 = pretty_print_reg(src1.to_reg(), size_bytes);
725 let src2 = src2.pretty_print(size_bytes);
726 let op = ljustify2(op.to_string(), String::new());
727 format!("{op} {src2}, {src1}, {dst}")
728 }
729 Inst::UnaryRmR { src, dst, op, size } => {
730 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
731 let src = src.pretty_print(size.to_bytes());
732 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
733 format!("{op} {src}, {dst}")
734 }
735
736 Inst::UnaryRmRVex { src, dst, op, size } => {
737 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
738 let src = src.pretty_print(size.to_bytes());
739 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
740 format!("{op} {src}, {dst}")
741 }
742
743 Inst::UnaryRmRImmVex {
744 src,
745 dst,
746 op,
747 size,
748 imm,
749 } => {
750 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
751 let src = src.pretty_print(size.to_bytes());
752 format!(
753 "{} ${imm}, {src}, {dst}",
754 ljustify2(op.to_string(), suffix_bwlq(*size))
755 )
756 }
757
758 Inst::Not { size, src, dst } => {
759 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
760 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
761 let op = ljustify2("not".to_string(), suffix_bwlq(*size));
762 format!("{op} {src}, {dst}")
763 }
764
765 Inst::Neg { size, src, dst } => {
766 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
767 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
768 let op = ljustify2("neg".to_string(), suffix_bwlq(*size));
769 format!("{op} {src}, {dst}")
770 }
771
772 Inst::Div {
773 size,
774 sign,
775 trap,
776 divisor,
777 dividend_lo,
778 dividend_hi,
779 dst_quotient,
780 dst_remainder,
781 } => {
782 let divisor = divisor.pretty_print(size.to_bytes());
783 let dividend_lo = pretty_print_reg(dividend_lo.to_reg(), size.to_bytes());
784 let dividend_hi = pretty_print_reg(dividend_hi.to_reg(), size.to_bytes());
785 let dst_quotient =
786 pretty_print_reg(dst_quotient.to_reg().to_reg(), size.to_bytes());
787 let dst_remainder =
788 pretty_print_reg(dst_remainder.to_reg().to_reg(), size.to_bytes());
789 let op = ljustify(match sign {
790 DivSignedness::Signed => "idiv".to_string(),
791 DivSignedness::Unsigned => "div".to_string(),
792 });
793 format!(
794 "{op} {dividend_lo}, {dividend_hi}, {divisor}, {dst_quotient}, {dst_remainder} ; trap={trap}"
795 )
796 }
797
798 Inst::Div8 {
799 sign,
800 trap,
801 divisor,
802 dividend,
803 dst,
804 } => {
805 let divisor = divisor.pretty_print(1);
806 let dividend = pretty_print_reg(dividend.to_reg(), 1);
807 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
808 let op = ljustify(match sign {
809 DivSignedness::Signed => "idiv".to_string(),
810 DivSignedness::Unsigned => "div".to_string(),
811 });
812 format!("{op} {dividend}, {divisor}, {dst} ; trap={trap}")
813 }
814
815 Inst::Mul {
816 size,
817 signed,
818 src1,
819 src2,
820 dst_lo,
821 dst_hi,
822 } => {
823 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
824 let dst_lo = pretty_print_reg(dst_lo.to_reg().to_reg(), size.to_bytes());
825 let dst_hi = pretty_print_reg(dst_hi.to_reg().to_reg(), size.to_bytes());
826 let src2 = src2.pretty_print(size.to_bytes());
827 let suffix = suffix_bwlq(*size);
828 let op = ljustify(if *signed {
829 format!("imul{suffix}")
830 } else {
831 format!("mul{suffix}")
832 });
833 format!("{op} {src1}, {src2}, {dst_lo}, {dst_hi}")
834 }
835
836 Inst::MulX {
837 size,
838 src1,
839 src2,
840 dst_lo,
841 dst_hi,
842 } => {
843 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
844 let dst_hi = pretty_print_reg(dst_hi.to_reg().to_reg(), size.to_bytes());
845 let dst_lo = if dst_lo.to_reg().is_invalid_sentinel() {
846 dst_hi.clone()
847 } else {
848 pretty_print_reg(dst_lo.to_reg().to_reg(), size.to_bytes())
849 };
850 let src2 = src2.pretty_print(size.to_bytes());
851 let suffix = suffix_bwlq(*size);
852 let op = ljustify(format!("mulx{suffix}"));
853 format!("{op} {src1}, {src2}, {dst_lo}, {dst_hi}")
854 }
855
856 Inst::Mul8 {
857 signed,
858 src1,
859 src2,
860 dst,
861 } => {
862 let src1 = pretty_print_reg(src1.to_reg(), 1);
863 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
864 let src2 = src2.pretty_print(1);
865 let op = ljustify(if *signed {
866 "imulb".to_string()
867 } else {
868 "mulb".to_string()
869 });
870 format!("{op} {src1}, {src2}, {dst}")
871 }
872
873 Inst::IMul {
874 size,
875 src1,
876 src2,
877 dst,
878 } => {
879 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
880 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
881 let src2 = src2.pretty_print(size.to_bytes());
882 let suffix = suffix_bwlq(*size);
883 let op = ljustify(format!("imul{suffix}"));
884 format!("{op} {src1}, {src2}, {dst}")
885 }
886
887 Inst::IMulImm {
888 size,
889 src1,
890 src2,
891 dst,
892 } => {
893 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
894 let src1 = src1.pretty_print(size.to_bytes());
895 let suffix = suffix_bwlq(*size);
896 let op = ljustify(format!("imul{suffix}"));
897 format!("{op} {src1}, {src2:#x}, {dst}")
898 }
899
900 Inst::CheckedSRemSeq {
901 size,
902 divisor,
903 dividend_lo,
904 dividend_hi,
905 dst_quotient,
906 dst_remainder,
907 } => {
908 let divisor = pretty_print_reg(divisor.to_reg(), size.to_bytes());
909 let dividend_lo = pretty_print_reg(dividend_lo.to_reg(), size.to_bytes());
910 let dividend_hi = pretty_print_reg(dividend_hi.to_reg(), size.to_bytes());
911 let dst_quotient =
912 pretty_print_reg(dst_quotient.to_reg().to_reg(), size.to_bytes());
913 let dst_remainder =
914 pretty_print_reg(dst_remainder.to_reg().to_reg(), size.to_bytes());
915 format!(
916 "checked_srem_seq {dividend_lo}, {dividend_hi}, \
917 {divisor}, {dst_quotient}, {dst_remainder}",
918 )
919 }
920
921 Inst::CheckedSRemSeq8 {
922 divisor,
923 dividend,
924 dst,
925 } => {
926 let divisor = pretty_print_reg(divisor.to_reg(), 1);
927 let dividend = pretty_print_reg(dividend.to_reg(), 1);
928 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
929 format!("checked_srem_seq {dividend}, {divisor}, {dst}")
930 }
931
932 Inst::SignExtendData { size, src, dst } => {
933 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
934 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
935 let op = match size {
936 OperandSize::Size8 => "cbw",
937 OperandSize::Size16 => "cwd",
938 OperandSize::Size32 => "cdq",
939 OperandSize::Size64 => "cqo",
940 };
941 format!("{op} {src}, {dst}")
942 }
943
944 Inst::XmmUnaryRmR { op, src, dst, .. } => {
945 let dst = pretty_print_reg(dst.to_reg().to_reg(), op.src_size());
946 let src = src.pretty_print(op.src_size());
947 let op = ljustify(op.to_string());
948 format!("{op} {src}, {dst}")
949 }
950
951 Inst::XmmUnaryRmRUnaligned { op, src, dst, .. } => {
952 let dst = pretty_print_reg(dst.to_reg().to_reg(), op.src_size());
953 let src = src.pretty_print(op.src_size());
954 let op = ljustify(op.to_string());
955 format!("{op} {src}, {dst}")
956 }
957
958 Inst::XmmUnaryRmRImm {
959 op, src, dst, imm, ..
960 } => {
961 let dst = pretty_print_reg(dst.to_reg().to_reg(), op.src_size());
962 let src = src.pretty_print(op.src_size());
963 let op = ljustify(op.to_string());
964 format!("{op} ${imm}, {src}, {dst}")
965 }
966
967 Inst::XmmUnaryRmRVex { op, src, dst, .. } => {
968 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
969 let src = src.pretty_print(8);
970 let op = ljustify(op.to_string());
971 format!("{op} {src}, {dst}")
972 }
973
974 Inst::XmmUnaryRmRImmVex {
975 op, src, dst, imm, ..
976 } => {
977 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
978 let src = src.pretty_print(8);
979 let op = ljustify(op.to_string());
980 format!("{op} ${imm}, {src}, {dst}")
981 }
982
983 Inst::XmmUnaryRmREvex { op, src, dst, .. } => {
984 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
985 let src = src.pretty_print(8);
986 let op = ljustify(op.to_string());
987 format!("{op} {src}, {dst}")
988 }
989
990 Inst::XmmUnaryRmRImmEvex {
991 op, src, dst, imm, ..
992 } => {
993 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
994 let src = src.pretty_print(8);
995 let op = ljustify(op.to_string());
996 format!("{op} ${imm}, {src}, {dst}")
997 }
998
999 Inst::XmmMovRM { op, src, dst, .. } => {
1000 let src = pretty_print_reg(src.to_reg(), 8);
1001 let dst = dst.pretty_print(8);
1002 let op = ljustify(op.to_string());
1003 format!("{op} {src}, {dst}")
1004 }
1005
1006 Inst::XmmMovRMVex { op, src, dst, .. } => {
1007 let src = pretty_print_reg(src.to_reg(), 8);
1008 let dst = dst.pretty_print(8);
1009 let op = ljustify(op.to_string());
1010 format!("{op} {src}, {dst}")
1011 }
1012
1013 Inst::XmmMovRMImm {
1014 op, src, dst, imm, ..
1015 } => {
1016 let src = pretty_print_reg(src.to_reg(), 8);
1017 let dst = dst.pretty_print(8);
1018 let op = ljustify(op.to_string());
1019 format!("{op} ${imm}, {src}, {dst}")
1020 }
1021
1022 Inst::XmmMovRMImmVex {
1023 op, src, dst, imm, ..
1024 } => {
1025 let src = pretty_print_reg(src.to_reg(), 8);
1026 let dst = dst.pretty_print(8);
1027 let op = ljustify(op.to_string());
1028 format!("{op} ${imm}, {src}, {dst}")
1029 }
1030
1031 Inst::XmmRmR {
1032 op,
1033 src1,
1034 src2,
1035 dst,
1036 ..
1037 } => {
1038 let src1 = pretty_print_reg(src1.to_reg(), 8);
1039 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1040 let src2 = src2.pretty_print(8);
1041 let op = ljustify(op.to_string());
1042 format!("{op} {src1}, {src2}, {dst}")
1043 }
1044
1045 Inst::XmmRmRUnaligned {
1046 op,
1047 src1,
1048 src2,
1049 dst,
1050 ..
1051 } => {
1052 let src1 = pretty_print_reg(src1.to_reg(), 8);
1053 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1054 let src2 = src2.pretty_print(8);
1055 let op = ljustify(op.to_string());
1056 format!("{op} {src1}, {src2}, {dst}")
1057 }
1058
1059 Inst::XmmRmRBlend {
1060 op,
1061 src1,
1062 src2,
1063 mask,
1064 dst,
1065 } => {
1066 let src1 = pretty_print_reg(src1.to_reg(), 8);
1067 let mask = mask.to_reg();
1068 let mask = if mask.is_virtual() {
1069 format!(" <{}>", show_ireg_sized(mask, 8))
1070 } else {
1071 debug_assert_eq!(mask, regs::xmm0());
1072 String::new()
1073 };
1074 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1075 let src2 = src2.pretty_print(8);
1076 let op = ljustify(op.to_string());
1077 format!("{op} {src1}, {src2}, {dst}{mask}")
1078 }
1079
1080 Inst::XmmRmiRVex {
1081 op,
1082 src1,
1083 src2,
1084 dst,
1085 ..
1086 } => {
1087 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1088 let src1 = pretty_print_reg(src1.to_reg(), 8);
1089 let src2 = src2.pretty_print(8);
1090 let op = ljustify(op.to_string());
1091 format!("{op} {src1}, {src2}, {dst}")
1092 }
1093
1094 Inst::XmmRmRImmVex {
1095 op,
1096 src1,
1097 src2,
1098 dst,
1099 imm,
1100 ..
1101 } => {
1102 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1103 let src1 = pretty_print_reg(src1.to_reg(), 8);
1104 let src2 = src2.pretty_print(8);
1105 let op = ljustify(op.to_string());
1106 format!("{op} ${imm}, {src1}, {src2}, {dst}")
1107 }
1108
1109 Inst::XmmVexPinsr {
1110 op,
1111 src1,
1112 src2,
1113 dst,
1114 imm,
1115 ..
1116 } => {
1117 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1118 let src1 = pretty_print_reg(src1.to_reg(), 8);
1119 let src2 = src2.pretty_print(8);
1120 let op = ljustify(op.to_string());
1121 format!("{op} ${imm}, {src1}, {src2}, {dst}")
1122 }
1123
1124 Inst::XmmRmRVex3 {
1125 op,
1126 src1,
1127 src2,
1128 src3,
1129 dst,
1130 ..
1131 } => {
1132 let src1 = pretty_print_reg(src1.to_reg(), 8);
1133 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1134 let src2 = pretty_print_reg(src2.to_reg(), 8);
1135 let src3 = src3.pretty_print(8);
1136 let op = ljustify(op.to_string());
1137 format!("{op} {src1}, {src2}, {src3}, {dst}")
1138 }
1139
1140 Inst::XmmRmRBlendVex {
1141 op,
1142 src1,
1143 src2,
1144 mask,
1145 dst,
1146 ..
1147 } => {
1148 let src1 = pretty_print_reg(src1.to_reg(), 8);
1149 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1150 let src2 = src2.pretty_print(8);
1151 let mask = pretty_print_reg(mask.to_reg(), 8);
1152 let op = ljustify(op.to_string());
1153 format!("{op} {src1}, {src2}, {mask}, {dst}")
1154 }
1155
1156 Inst::XmmRmREvex {
1157 op,
1158 src1,
1159 src2,
1160 dst,
1161 ..
1162 } => {
1163 let src1 = pretty_print_reg(src1.to_reg(), 8);
1164 let src2 = src2.pretty_print(8);
1165 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1166 let op = ljustify(op.to_string());
1167 format!("{op} {src2}, {src1}, {dst}")
1168 }
1169
1170 Inst::XmmRmREvex3 {
1171 op,
1172 src1,
1173 src2,
1174 src3,
1175 dst,
1176 ..
1177 } => {
1178 let src1 = pretty_print_reg(src1.to_reg(), 8);
1179 let src2 = pretty_print_reg(src2.to_reg(), 8);
1180 let src3 = src3.pretty_print(8);
1181 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1182 let op = ljustify(op.to_string());
1183 format!("{op} {src3}, {src2}, {src1}, {dst}")
1184 }
1185
1186 Inst::XmmMinMaxSeq {
1187 lhs,
1188 rhs,
1189 dst,
1190 is_min,
1191 size,
1192 } => {
1193 let rhs = pretty_print_reg(rhs.to_reg(), 8);
1194 let lhs = pretty_print_reg(lhs.to_reg(), 8);
1195 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1196 let op = ljustify2(
1197 if *is_min {
1198 "xmm min seq ".to_string()
1199 } else {
1200 "xmm max seq ".to_string()
1201 },
1202 format!("f{}", size.to_bits()),
1203 );
1204 format!("{op} {lhs}, {rhs}, {dst}")
1205 }
1206
1207 Inst::XmmRmRImm {
1208 op,
1209 src1,
1210 src2,
1211 dst,
1212 imm,
1213 size,
1214 ..
1215 } => {
1216 let src1 = pretty_print_reg(*src1, 8);
1217 let dst = pretty_print_reg(dst.to_reg(), 8);
1218 let src2 = src2.pretty_print(8);
1219 let op = ljustify(format!(
1220 "{}{}",
1221 op.to_string(),
1222 if *size == OperandSize::Size64 {
1223 ".w"
1224 } else {
1225 ""
1226 }
1227 ));
1228 format!("{op} ${imm}, {src1}, {src2}, {dst}")
1229 }
1230
1231 Inst::XmmUninitializedValue { dst } => {
1232 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1233 let op = ljustify("uninit".into());
1234 format!("{op} {dst}")
1235 }
1236
1237 Inst::XmmToGpr {
1238 op,
1239 src,
1240 dst,
1241 dst_size,
1242 } => {
1243 let dst_size = dst_size.to_bytes();
1244 let src = pretty_print_reg(src.to_reg(), 8);
1245 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size);
1246 let op = ljustify(op.to_string());
1247 format!("{op} {src}, {dst}")
1248 }
1249
1250 Inst::XmmToGprVex {
1251 op,
1252 src,
1253 dst,
1254 dst_size,
1255 } => {
1256 let dst_size = dst_size.to_bytes();
1257 let src = pretty_print_reg(src.to_reg(), 8);
1258 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size);
1259 let op = ljustify(op.to_string());
1260 format!("{op} {src}, {dst}")
1261 }
1262
1263 Inst::XmmToGprImm { op, src, dst, imm } => {
1264 let src = pretty_print_reg(src.to_reg(), 8);
1265 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1266 let op = ljustify(op.to_string());
1267 format!("{op} ${imm}, {src}, {dst}")
1268 }
1269
1270 Inst::XmmToGprImmVex { op, src, dst, imm } => {
1271 let src = pretty_print_reg(src.to_reg(), 8);
1272 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1273 let op = ljustify(op.to_string());
1274 format!("{op} ${imm}, {src}, {dst}")
1275 }
1276
1277 Inst::GprToXmm {
1278 op,
1279 src,
1280 src_size,
1281 dst,
1282 } => {
1283 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1284 let src = src.pretty_print(src_size.to_bytes());
1285 let op = ljustify(op.to_string());
1286 format!("{op} {src}, {dst}")
1287 }
1288
1289 Inst::GprToXmmVex {
1290 op,
1291 src,
1292 src_size,
1293 dst,
1294 } => {
1295 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1296 let src = src.pretty_print(src_size.to_bytes());
1297 let op = ljustify(op.to_string());
1298 format!("{op} {src}, {dst}")
1299 }
1300
1301 Inst::XmmCmpRmR { op, src1, src2 } => {
1302 let src1 = pretty_print_reg(src1.to_reg(), 8);
1303 let src2 = src2.pretty_print(8);
1304 let op = ljustify(op.to_string());
1305 format!("{op} {src2}, {src1}")
1306 }
1307
1308 Inst::CvtIntToFloat {
1309 op,
1310 src1,
1311 src2,
1312 dst,
1313 src2_size,
1314 } => {
1315 let src1 = pretty_print_reg(src1.to_reg(), 8);
1316 let dst = pretty_print_reg(*dst.to_reg(), 8);
1317 let src2 = src2.pretty_print(src2_size.to_bytes());
1318 let op = ljustify(op.to_string());
1319 format!("{op} {src1}, {src2}, {dst}")
1320 }
1321
1322 Inst::CvtIntToFloatVex {
1323 op,
1324 src1,
1325 src2,
1326 dst,
1327 src2_size,
1328 } => {
1329 let dst = pretty_print_reg(*dst.to_reg(), 8);
1330 let src1 = pretty_print_reg(src1.to_reg(), 8);
1331 let src2 = src2.pretty_print(src2_size.to_bytes());
1332 let op = ljustify(op.to_string());
1333 format!("{op} {src1}, {src2}, {dst}")
1334 }
1335
1336 Inst::XmmCmpRmRVex { op, src1, src2 } => {
1337 let src1 = pretty_print_reg(src1.to_reg(), 8);
1338 let src2 = src2.pretty_print(8);
1339 format!("{} {src2}, {src1}", ljustify(op.to_string()))
1340 }
1341
1342 Inst::CvtUint64ToFloatSeq {
1343 src,
1344 dst,
1345 dst_size,
1346 tmp_gpr1,
1347 tmp_gpr2,
1348 ..
1349 } => {
1350 let src = pretty_print_reg(src.to_reg(), 8);
1351 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1352 let tmp_gpr1 = pretty_print_reg(tmp_gpr1.to_reg().to_reg(), 8);
1353 let tmp_gpr2 = pretty_print_reg(tmp_gpr2.to_reg().to_reg(), 8);
1354 let op = ljustify(format!(
1355 "u64_to_{}_seq",
1356 if *dst_size == OperandSize::Size64 {
1357 "f64"
1358 } else {
1359 "f32"
1360 }
1361 ));
1362 format!("{op} {src}, {dst}, {tmp_gpr1}, {tmp_gpr2}")
1363 }
1364
1365 Inst::CvtFloatToSintSeq {
1366 src,
1367 dst,
1368 src_size,
1369 dst_size,
1370 tmp_xmm,
1371 tmp_gpr,
1372 is_saturating,
1373 } => {
1374 let src = pretty_print_reg(src.to_reg(), src_size.to_bytes());
1375 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1376 let tmp_gpr = pretty_print_reg(tmp_gpr.to_reg().to_reg(), 8);
1377 let tmp_xmm = pretty_print_reg(tmp_xmm.to_reg().to_reg(), 8);
1378 let op = ljustify(format!(
1379 "cvt_float{}_to_sint{}{}_seq",
1380 src_size.to_bits(),
1381 dst_size.to_bits(),
1382 if *is_saturating { "_sat" } else { "" },
1383 ));
1384 format!("{op} {src}, {dst}, {tmp_gpr}, {tmp_xmm}")
1385 }
1386
1387 Inst::CvtFloatToUintSeq {
1388 src,
1389 dst,
1390 src_size,
1391 dst_size,
1392 tmp_gpr,
1393 tmp_xmm,
1394 tmp_xmm2,
1395 is_saturating,
1396 } => {
1397 let src = pretty_print_reg(src.to_reg(), src_size.to_bytes());
1398 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1399 let tmp_gpr = pretty_print_reg(tmp_gpr.to_reg().to_reg(), 8);
1400 let tmp_xmm = pretty_print_reg(tmp_xmm.to_reg().to_reg(), 8);
1401 let tmp_xmm2 = pretty_print_reg(tmp_xmm2.to_reg().to_reg(), 8);
1402 let op = ljustify(format!(
1403 "cvt_float{}_to_uint{}{}_seq",
1404 src_size.to_bits(),
1405 dst_size.to_bits(),
1406 if *is_saturating { "_sat" } else { "" },
1407 ));
1408 format!("{op} {src}, {dst}, {tmp_gpr}, {tmp_xmm}, {tmp_xmm2}")
1409 }
1410
1411 Inst::Imm {
1412 dst_size,
1413 simm64,
1414 dst,
1415 } => {
1416 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1417 if *dst_size == OperandSize::Size64 {
1418 let op = ljustify("movabsq".to_string());
1419 let imm = *simm64 as i64;
1420 format!("{op} ${imm}, {dst}")
1421 } else {
1422 let op = ljustify("movl".to_string());
1423 let imm = (*simm64 as u32) as i32;
1424 format!("{op} ${imm}, {dst}")
1425 }
1426 }
1427
1428 Inst::MovImmM { size, simm32, dst } => {
1429 let dst = dst.pretty_print(size.to_bytes());
1430 let suffix = suffix_bwlq(*size);
1431 let imm = match *size {
1432 OperandSize::Size8 => ((*simm32 as u8) as i8).to_string(),
1433 OperandSize::Size16 => ((*simm32 as u16) as i16).to_string(),
1434 OperandSize::Size32 => simm32.to_string(),
1435 OperandSize::Size64 => (*simm32 as i64).to_string(),
1436 };
1437 let op = ljustify2("mov".to_string(), suffix);
1438 format!("{op} ${imm}, {dst}")
1439 }
1440
1441 Inst::MovRR { size, src, dst } => {
1442 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1443 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1444 let op = ljustify2("mov".to_string(), suffix_lq(*size));
1445 format!("{op} {src}, {dst}")
1446 }
1447
1448 Inst::MovFromPReg { src, dst } => {
1449 let src: Reg = (*src).into();
1450 let src = regs::show_ireg_sized(src, 8);
1451 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1452 let op = ljustify("movq".to_string());
1453 format!("{op} {src}, {dst}")
1454 }
1455
1456 Inst::MovToPReg { src, dst } => {
1457 let src = pretty_print_reg(src.to_reg(), 8);
1458 let dst: Reg = (*dst).into();
1459 let dst = regs::show_ireg_sized(dst, 8);
1460 let op = ljustify("movq".to_string());
1461 format!("{op} {src}, {dst}")
1462 }
1463
1464 Inst::MovzxRmR {
1465 ext_mode, src, dst, ..
1466 } => {
1467 let dst_size = if *ext_mode == ExtMode::LQ {
1468 4
1469 } else {
1470 ext_mode.dst_size()
1471 };
1472 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size);
1473 let src = src.pretty_print(ext_mode.src_size());
1474
1475 if *ext_mode == ExtMode::LQ {
1476 let op = ljustify("movl".to_string());
1477 format!("{op} {src}, {dst}")
1478 } else {
1479 let op = ljustify2("movz".to_string(), ext_mode.to_string());
1480 format!("{op} {src}, {dst}")
1481 }
1482 }
1483
1484 Inst::Mov64MR { src, dst, .. } => {
1485 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1486 let src = src.pretty_print(8);
1487 let op = ljustify("movq".to_string());
1488 format!("{op} {src}, {dst}")
1489 }
1490
1491 Inst::LoadEffectiveAddress { addr, dst, size } => {
1492 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1493 let addr = addr.pretty_print(8);
1494 let op = ljustify("lea".to_string());
1495 format!("{op} {addr}, {dst}")
1496 }
1497
1498 Inst::MovsxRmR {
1499 ext_mode, src, dst, ..
1500 } => {
1501 let dst = pretty_print_reg(dst.to_reg().to_reg(), ext_mode.dst_size());
1502 let src = src.pretty_print(ext_mode.src_size());
1503 let op = ljustify2("movs".to_string(), ext_mode.to_string());
1504 format!("{op} {src}, {dst}")
1505 }
1506
1507 Inst::MovRM { size, src, dst, .. } => {
1508 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1509 let dst = dst.pretty_print(size.to_bytes());
1510 let op = ljustify2("mov".to_string(), suffix_bwlq(*size));
1511 format!("{op} {src}, {dst}")
1512 }
1513
1514 Inst::ShiftR {
1515 size,
1516 kind,
1517 num_bits,
1518 src,
1519 dst,
1520 ..
1521 } => {
1522 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1523 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1524 match num_bits.as_imm8_reg() {
1525 &Imm8Reg::Reg { reg } => {
1526 let reg = pretty_print_reg(reg, 1);
1527 let op = ljustify2(kind.to_string(), suffix_bwlq(*size));
1528 format!("{op} {reg}, {src}, {dst}")
1529 }
1530
1531 &Imm8Reg::Imm8 { imm: num_bits } => {
1532 let op = ljustify2(kind.to_string(), suffix_bwlq(*size));
1533 format!("{op} ${num_bits}, {src}, {dst}")
1534 }
1535 }
1536 }
1537
1538 Inst::XmmRmiReg {
1539 opcode,
1540 src1,
1541 src2,
1542 dst,
1543 ..
1544 } => {
1545 let src1 = pretty_print_reg(src1.to_reg(), 8);
1546 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1547 let src2 = src2.pretty_print(8);
1548 let op = ljustify(opcode.to_string());
1549 format!("{op} {src1}, {src2}, {dst}")
1550 }
1551
1552 Inst::CmpRmiR {
1553 size,
1554 src1,
1555 src2,
1556 opcode,
1557 } => {
1558 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
1559 let src2 = src2.pretty_print(size.to_bytes());
1560 let op = match opcode {
1561 CmpOpcode::Cmp => "cmp",
1562 CmpOpcode::Test => "test",
1563 };
1564 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
1565 format!("{op} {src2}, {src1}")
1566 }
1567
1568 Inst::Setcc { cc, dst } => {
1569 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
1570 let op = ljustify2("set".to_string(), cc.to_string());
1571 format!("{op} {dst}")
1572 }
1573
1574 Inst::Bswap { size, src, dst } => {
1575 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1576 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1577 let op = ljustify2("bswap".to_string(), suffix_bwlq(*size));
1578 format!("{op} {src}, {dst}")
1579 }
1580
1581 Inst::Cmove {
1582 size,
1583 cc,
1584 consequent,
1585 alternative,
1586 dst,
1587 } => {
1588 let alternative = pretty_print_reg(alternative.to_reg(), size.to_bytes());
1589 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1590 let consequent = consequent.pretty_print(size.to_bytes());
1591 let op = ljustify(format!("cmov{}{}", cc.to_string(), suffix_bwlq(*size)));
1592 format!("{op} {consequent}, {alternative}, {dst}")
1593 }
1594
1595 Inst::XmmCmove {
1596 ty,
1597 cc,
1598 consequent,
1599 alternative,
1600 dst,
1601 ..
1602 } => {
1603 let size = u8::try_from(ty.bytes()).unwrap();
1604 let alternative = pretty_print_reg(alternative.to_reg(), size);
1605 let dst = pretty_print_reg(dst.to_reg().to_reg(), size);
1606 let consequent = pretty_print_reg(consequent.to_reg(), size);
1607 let suffix = match *ty {
1608 types::F64 => "sd",
1609 types::F32 => "ss",
1610 types::F16 => "ss",
1611 types::F32X4 => "aps",
1612 types::F64X2 => "apd",
1613 _ => "dqa",
1614 };
1615 let cc = cc.invert();
1616 format!(
1617 "mov{suffix} {alternative}, {dst}; \
1618 j{cc} $next; \
1619 mov{suffix} {consequent}, {dst}; \
1620 $next:"
1621 )
1622 }
1623
1624 Inst::Push64 { src } => {
1625 let src = src.pretty_print(8);
1626 let op = ljustify("pushq".to_string());
1627 format!("{op} {src}")
1628 }
1629
1630 Inst::StackProbeLoop {
1631 tmp,
1632 frame_size,
1633 guard_size,
1634 } => {
1635 let tmp = pretty_print_reg(tmp.to_reg(), 8);
1636 let op = ljustify("stack_probe_loop".to_string());
1637 format!("{op} {tmp}, frame_size={frame_size}, guard_size={guard_size}")
1638 }
1639
1640 Inst::Pop64 { dst } => {
1641 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1642 let op = ljustify("popq".to_string());
1643 format!("{op} {dst}")
1644 }
1645
1646 Inst::CallKnown { info } => {
1647 let op = ljustify("call".to_string());
1648 format!("{op} {:?}", info.dest)
1649 }
1650
1651 Inst::CallUnknown { info } => {
1652 let dest = info.dest.pretty_print(8);
1653 let op = ljustify("call".to_string());
1654 format!("{op} *{dest}")
1655 }
1656
1657 Inst::ReturnCallKnown { info } => {
1658 let ReturnCallInfo {
1659 uses,
1660 new_stack_arg_size,
1661 tmp,
1662 dest,
1663 } = &**info;
1664 let tmp = pretty_print_reg(tmp.to_reg().to_reg(), 8);
1665 let mut s = format!("return_call_known {dest:?} ({new_stack_arg_size}) tmp={tmp}");
1666 for ret in uses {
1667 let preg = regs::show_reg(ret.preg);
1668 let vreg = pretty_print_reg(ret.vreg, 8);
1669 write!(&mut s, " {vreg}={preg}").unwrap();
1670 }
1671 s
1672 }
1673
1674 Inst::ReturnCallUnknown { info } => {
1675 let ReturnCallInfo {
1676 uses,
1677 new_stack_arg_size,
1678 tmp,
1679 dest,
1680 } = &**info;
1681 let callee = pretty_print_reg(*dest, 8);
1682 let tmp = pretty_print_reg(tmp.to_reg().to_reg(), 8);
1683 let mut s =
1684 format!("return_call_unknown {callee} ({new_stack_arg_size}) tmp={tmp}");
1685 for ret in uses {
1686 let preg = regs::show_reg(ret.preg);
1687 let vreg = pretty_print_reg(ret.vreg, 8);
1688 write!(&mut s, " {vreg}={preg}").unwrap();
1689 }
1690 s
1691 }
1692
1693 Inst::Args { args } => {
1694 let mut s = "args".to_string();
1695 for arg in args {
1696 let preg = regs::show_reg(arg.preg);
1697 let def = pretty_print_reg(arg.vreg.to_reg(), 8);
1698 write!(&mut s, " {def}={preg}").unwrap();
1699 }
1700 s
1701 }
1702
1703 Inst::Rets { rets } => {
1704 let mut s = "rets".to_string();
1705 for ret in rets {
1706 let preg = regs::show_reg(ret.preg);
1707 let vreg = pretty_print_reg(ret.vreg, 8);
1708 write!(&mut s, " {vreg}={preg}").unwrap();
1709 }
1710 s
1711 }
1712
1713 Inst::Ret { stack_bytes_to_pop } => {
1714 let mut s = "ret".to_string();
1715 if *stack_bytes_to_pop != 0 {
1716 write!(&mut s, " {stack_bytes_to_pop}").unwrap();
1717 }
1718 s
1719 }
1720
1721 Inst::StackSwitchBasic {
1722 store_context_ptr,
1723 load_context_ptr,
1724 in_payload0,
1725 out_payload0,
1726 } => {
1727 let store_context_ptr = pretty_print_reg(**store_context_ptr, 8);
1728 let load_context_ptr = pretty_print_reg(**load_context_ptr, 8);
1729 let in_payload0 = pretty_print_reg(**in_payload0, 8);
1730 let out_payload0 = pretty_print_reg(*out_payload0.to_reg(), 8);
1731 format!("{out_payload0} = stack_switch_basic {store_context_ptr}, {load_context_ptr}, {in_payload0}")
1732 }
1733
1734 Inst::JmpKnown { dst } => {
1735 let op = ljustify("jmp".to_string());
1736 let dst = dst.to_string();
1737 format!("{op} {dst}")
1738 }
1739
1740 Inst::WinchJmpIf { cc, taken } => {
1741 let taken = taken.to_string();
1742 let op = ljustify2("j".to_string(), cc.to_string());
1743 format!("{op} {taken}")
1744 }
1745
1746 Inst::JmpCondOr {
1747 cc1,
1748 cc2,
1749 taken,
1750 not_taken,
1751 } => {
1752 let taken = taken.to_string();
1753 let not_taken = not_taken.to_string();
1754 let op = ljustify(format!("j{cc1},{cc2}"));
1755 format!("{op} {taken}; j {not_taken}")
1756 }
1757
1758 Inst::JmpCond {
1759 cc,
1760 taken,
1761 not_taken,
1762 } => {
1763 let taken = taken.to_string();
1764 let not_taken = not_taken.to_string();
1765 let op = ljustify2("j".to_string(), cc.to_string());
1766 format!("{op} {taken}; j {not_taken}")
1767 }
1768
1769 Inst::JmpTableSeq {
1770 idx, tmp1, tmp2, ..
1771 } => {
1772 let idx = pretty_print_reg(*idx, 8);
1773 let tmp1 = pretty_print_reg(tmp1.to_reg(), 8);
1774 let tmp2 = pretty_print_reg(tmp2.to_reg(), 8);
1775 let op = ljustify("br_table".into());
1776 format!("{op} {idx}, {tmp1}, {tmp2}")
1777 }
1778
1779 Inst::JmpUnknown { target } => {
1780 let target = target.pretty_print(8);
1781 let op = ljustify("jmp".to_string());
1782 format!("{op} *{target}")
1783 }
1784
1785 Inst::TrapIf { cc, trap_code, .. } => {
1786 format!("j{cc} #trap={trap_code}")
1787 }
1788
1789 Inst::TrapIfAnd {
1790 cc1,
1791 cc2,
1792 trap_code,
1793 ..
1794 } => {
1795 let cc1 = cc1.invert();
1796 let cc2 = cc2.invert();
1797 format!("trap_if_and {cc1}, {cc2}, {trap_code}")
1798 }
1799
1800 Inst::TrapIfOr {
1801 cc1,
1802 cc2,
1803 trap_code,
1804 ..
1805 } => {
1806 let cc2 = cc2.invert();
1807 format!("trap_if_or {cc1}, {cc2}, {trap_code}")
1808 }
1809
1810 Inst::LoadExtName {
1811 dst, name, offset, ..
1812 } => {
1813 let dst = pretty_print_reg(dst.to_reg(), 8);
1814 let name = name.display(None);
1815 let op = ljustify("load_ext_name".into());
1816 format!("{op} {name}+{offset}, {dst}")
1817 }
1818
1819 Inst::LockCmpxchg {
1820 ty,
1821 replacement,
1822 expected,
1823 mem,
1824 dst_old,
1825 ..
1826 } => {
1827 let size = ty.bytes() as u8;
1828 let replacement = pretty_print_reg(*replacement, size);
1829 let expected = pretty_print_reg(*expected, size);
1830 let dst_old = pretty_print_reg(dst_old.to_reg(), size);
1831 let mem = mem.pretty_print(size);
1832 let suffix = suffix_bwlq(OperandSize::from_bytes(size as u32));
1833 format!(
1834 "lock cmpxchg{suffix} {replacement}, {mem}, expected={expected}, dst_old={dst_old}"
1835 )
1836 }
1837
1838 Inst::LockCmpxchg16b {
1839 replacement_low,
1840 replacement_high,
1841 expected_low,
1842 expected_high,
1843 mem,
1844 dst_old_low,
1845 dst_old_high,
1846 ..
1847 } => {
1848 let replacement_low = pretty_print_reg(*replacement_low, 8);
1849 let replacement_high = pretty_print_reg(*replacement_high, 8);
1850 let expected_low = pretty_print_reg(*expected_low, 8);
1851 let expected_high = pretty_print_reg(*expected_high, 8);
1852 let dst_old_low = pretty_print_reg(dst_old_low.to_reg(), 8);
1853 let dst_old_high = pretty_print_reg(dst_old_high.to_reg(), 8);
1854 let mem = mem.pretty_print(16);
1855 format!(
1856 "lock cmpxchg16b {mem}, replacement={replacement_high}:{replacement_low}, expected={expected_high}:{expected_low}, dst_old={dst_old_high}:{dst_old_low}"
1857 )
1858 }
1859
1860 Inst::LockXadd {
1861 size,
1862 operand,
1863 mem,
1864 dst_old,
1865 } => {
1866 let operand = pretty_print_reg(*operand, size.to_bytes());
1867 let dst_old = pretty_print_reg(dst_old.to_reg(), size.to_bytes());
1868 let mem = mem.pretty_print(size.to_bytes());
1869 let suffix = suffix_bwlq(*size);
1870 format!("lock xadd{suffix} {operand}, {mem}, dst_old={dst_old}")
1871 }
1872
1873 Inst::Xchg {
1874 size,
1875 operand,
1876 mem,
1877 dst_old,
1878 } => {
1879 let operand = pretty_print_reg(*operand, size.to_bytes());
1880 let dst_old = pretty_print_reg(dst_old.to_reg(), size.to_bytes());
1881 let mem = mem.pretty_print(size.to_bytes());
1882 let suffix = suffix_bwlq(*size);
1883 format!("xchg{suffix} {operand}, {mem}, dst_old={dst_old}")
1884 }
1885
1886 Inst::AtomicRmwSeq { ty, op, .. } => {
1887 let ty = ty.bits();
1888 format!(
1889 "atomically {{ {ty}_bits_at_[%r9] {op:?}= %r10; %rax = old_value_at_[%r9]; %r11, %rflags = trash }}"
1890 )
1891 }
1892
1893 Inst::Atomic128RmwSeq {
1894 op,
1895 mem,
1896 operand_low,
1897 operand_high,
1898 temp_low,
1899 temp_high,
1900 dst_old_low,
1901 dst_old_high,
1902 } => {
1903 let operand_low = pretty_print_reg(*operand_low, 8);
1904 let operand_high = pretty_print_reg(*operand_high, 8);
1905 let temp_low = pretty_print_reg(temp_low.to_reg(), 8);
1906 let temp_high = pretty_print_reg(temp_high.to_reg(), 8);
1907 let dst_old_low = pretty_print_reg(dst_old_low.to_reg(), 8);
1908 let dst_old_high = pretty_print_reg(dst_old_high.to_reg(), 8);
1909 let mem = mem.pretty_print(16);
1910 format!("atomically {{ {dst_old_high}:{dst_old_low} = {mem}; {temp_high}:{temp_low} = {dst_old_high}:{dst_old_low} {op:?} {operand_high}:{operand_low}; {mem} = {temp_high}:{temp_low} }}")
1911 }
1912
1913 Inst::Atomic128XchgSeq {
1914 mem,
1915 operand_low,
1916 operand_high,
1917 dst_old_low,
1918 dst_old_high,
1919 } => {
1920 let operand_low = pretty_print_reg(*operand_low, 8);
1921 let operand_high = pretty_print_reg(*operand_high, 8);
1922 let dst_old_low = pretty_print_reg(dst_old_low.to_reg(), 8);
1923 let dst_old_high = pretty_print_reg(dst_old_high.to_reg(), 8);
1924 let mem = mem.pretty_print(16);
1925 format!("atomically {{ {dst_old_high}:{dst_old_low} = {mem}; {mem} = {operand_high}:{operand_low} }}")
1926 }
1927
1928 Inst::Fence { kind } => match kind {
1929 FenceKind::MFence => "mfence".to_string(),
1930 FenceKind::LFence => "lfence".to_string(),
1931 FenceKind::SFence => "sfence".to_string(),
1932 },
1933
1934 Inst::Hlt => "hlt".into(),
1935
1936 Inst::Ud2 { trap_code } => format!("ud2 {trap_code}"),
1937
1938 Inst::ElfTlsGetAddr { symbol, dst } => {
1939 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1940 format!("{dst} = elf_tls_get_addr {symbol:?}")
1941 }
1942
1943 Inst::MachOTlsGetAddr { symbol, dst } => {
1944 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1945 format!("{dst} = macho_tls_get_addr {symbol:?}")
1946 }
1947
1948 Inst::CoffTlsGetAddr { symbol, dst, tmp } => {
1949 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1950 let tmp = tmp.to_reg().to_reg();
1951
1952 let mut s = format!("{dst} = coff_tls_get_addr {symbol:?}");
1953 if tmp.is_virtual() {
1954 let tmp = show_ireg_sized(tmp, 8);
1955 write!(&mut s, ", {tmp}").unwrap();
1956 };
1957
1958 s
1959 }
1960
1961 Inst::Unwind { inst } => format!("unwind {inst:?}"),
1962
1963 Inst::DummyUse { reg } => {
1964 let reg = pretty_print_reg(*reg, 8);
1965 format!("dummy_use {reg}")
1966 }
1967
1968 Inst::External { inst } => {
1969 format!("{inst}")
1970 }
1971 }
1972 }
1973}
1974
1975impl fmt::Debug for Inst {
1976 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
1977 write!(fmt, "{}", self.pretty_print_inst(&mut Default::default()))
1978 }
1979}
1980
1981fn x64_get_operands(inst: &mut Inst, collector: &mut impl OperandVisitor) {
1982 match inst {
1992 Inst::AluRmiR {
1993 src1, src2, dst, ..
1994 } => {
1995 collector.reg_use(src1);
1996 collector.reg_reuse_def(dst, 0);
1997 src2.get_operands(collector);
1998 }
1999 Inst::AluConstOp { dst, .. } => collector.reg_def(dst),
2000 Inst::AluRmRVex {
2001 src1, src2, dst, ..
2002 } => {
2003 collector.reg_def(dst);
2004 collector.reg_use(src1);
2005 src2.get_operands(collector);
2006 }
2007 Inst::Not { src, dst, .. } => {
2008 collector.reg_use(src);
2009 collector.reg_reuse_def(dst, 0);
2010 }
2011 Inst::Neg { src, dst, .. } => {
2012 collector.reg_use(src);
2013 collector.reg_reuse_def(dst, 0);
2014 }
2015 Inst::Div {
2016 divisor,
2017 dividend_lo,
2018 dividend_hi,
2019 dst_quotient,
2020 dst_remainder,
2021 ..
2022 } => {
2023 divisor.get_operands(collector);
2024 collector.reg_fixed_use(dividend_lo, regs::rax());
2025 collector.reg_fixed_use(dividend_hi, regs::rdx());
2026 collector.reg_fixed_def(dst_quotient, regs::rax());
2027 collector.reg_fixed_def(dst_remainder, regs::rdx());
2028 }
2029 Inst::CheckedSRemSeq {
2030 divisor,
2031 dividend_lo,
2032 dividend_hi,
2033 dst_quotient,
2034 dst_remainder,
2035 ..
2036 } => {
2037 collector.reg_use(divisor);
2038 collector.reg_fixed_use(dividend_lo, regs::rax());
2039 collector.reg_fixed_use(dividend_hi, regs::rdx());
2040 collector.reg_fixed_def(dst_quotient, regs::rax());
2041 collector.reg_fixed_def(dst_remainder, regs::rdx());
2042 }
2043 Inst::Div8 {
2044 divisor,
2045 dividend,
2046 dst,
2047 ..
2048 } => {
2049 divisor.get_operands(collector);
2050 collector.reg_fixed_use(dividend, regs::rax());
2051 collector.reg_fixed_def(dst, regs::rax());
2052 }
2053 Inst::CheckedSRemSeq8 {
2054 divisor,
2055 dividend,
2056 dst,
2057 ..
2058 } => {
2059 collector.reg_use(divisor);
2060 collector.reg_fixed_use(dividend, regs::rax());
2061 collector.reg_fixed_def(dst, regs::rax());
2062 }
2063 Inst::Mul {
2064 src1,
2065 src2,
2066 dst_lo,
2067 dst_hi,
2068 ..
2069 } => {
2070 collector.reg_fixed_use(src1, regs::rax());
2071 collector.reg_fixed_def(dst_lo, regs::rax());
2072 collector.reg_fixed_def(dst_hi, regs::rdx());
2073 src2.get_operands(collector);
2074 }
2075 Inst::Mul8 {
2076 src1, src2, dst, ..
2077 } => {
2078 collector.reg_fixed_use(src1, regs::rax());
2079 collector.reg_fixed_def(dst, regs::rax());
2080 src2.get_operands(collector);
2081 }
2082 Inst::IMul {
2083 src1, src2, dst, ..
2084 } => {
2085 collector.reg_use(src1);
2086 collector.reg_reuse_def(dst, 0);
2087 src2.get_operands(collector);
2088 }
2089 Inst::IMulImm { src1, dst, .. } => {
2090 collector.reg_def(dst);
2091 src1.get_operands(collector);
2092 }
2093 Inst::MulX {
2094 src1,
2095 src2,
2096 dst_lo,
2097 dst_hi,
2098 ..
2099 } => {
2100 if !dst_lo.to_reg().is_invalid_sentinel() {
2101 collector.reg_def(dst_lo);
2102 }
2103 collector.reg_def(dst_hi);
2104 collector.reg_fixed_use(src1, regs::rdx());
2105 src2.get_operands(collector);
2106 }
2107 Inst::SignExtendData { size, src, dst } => {
2108 match size {
2109 OperandSize::Size8 => {
2110 collector.reg_fixed_use(src, regs::rax());
2113 collector.reg_fixed_def(dst, regs::rax());
2114 }
2115 _ => {
2116 collector.reg_fixed_use(src, regs::rax());
2119 collector.reg_fixed_def(dst, regs::rdx());
2120 }
2121 }
2122 }
2123 Inst::UnaryRmR { src, dst, .. }
2124 | Inst::UnaryRmRVex { src, dst, .. }
2125 | Inst::UnaryRmRImmVex { src, dst, .. } => {
2126 collector.reg_def(dst);
2127 src.get_operands(collector);
2128 }
2129 Inst::XmmUnaryRmR { src, dst, .. } | Inst::XmmUnaryRmRImm { src, dst, .. } => {
2130 collector.reg_def(dst);
2131 src.get_operands(collector);
2132 }
2133 Inst::XmmUnaryRmREvex { src, dst, .. }
2134 | Inst::XmmUnaryRmRImmEvex { src, dst, .. }
2135 | Inst::XmmUnaryRmRUnaligned { src, dst, .. }
2136 | Inst::XmmUnaryRmRVex { src, dst, .. }
2137 | Inst::XmmUnaryRmRImmVex { src, dst, .. } => {
2138 collector.reg_def(dst);
2139 src.get_operands(collector);
2140 }
2141 Inst::XmmRmR {
2142 src1, src2, dst, ..
2143 } => {
2144 collector.reg_use(src1);
2145 collector.reg_reuse_def(dst, 0);
2146 src2.get_operands(collector);
2147 }
2148 Inst::XmmRmRUnaligned {
2149 src1, src2, dst, ..
2150 } => {
2151 collector.reg_use(src1);
2152 collector.reg_reuse_def(dst, 0);
2153 src2.get_operands(collector);
2154 }
2155 Inst::XmmRmRBlend {
2156 src1,
2157 src2,
2158 mask,
2159 dst,
2160 op,
2161 } => {
2162 assert!(matches!(
2163 op,
2164 SseOpcode::Blendvpd | SseOpcode::Blendvps | SseOpcode::Pblendvb
2165 ));
2166 collector.reg_use(src1);
2167 collector.reg_fixed_use(mask, regs::xmm0());
2168 collector.reg_reuse_def(dst, 0);
2169 src2.get_operands(collector);
2170 }
2171 Inst::XmmRmiRVex {
2172 src1, src2, dst, ..
2173 } => {
2174 collector.reg_def(dst);
2175 collector.reg_use(src1);
2176 src2.get_operands(collector);
2177 }
2178 Inst::XmmRmRImmVex {
2179 src1, src2, dst, ..
2180 } => {
2181 collector.reg_def(dst);
2182 collector.reg_use(src1);
2183 src2.get_operands(collector);
2184 }
2185 Inst::XmmVexPinsr {
2186 src1, src2, dst, ..
2187 } => {
2188 collector.reg_def(dst);
2189 collector.reg_use(src1);
2190 src2.get_operands(collector);
2191 }
2192 Inst::XmmRmRVex3 {
2193 src1,
2194 src2,
2195 src3,
2196 dst,
2197 ..
2198 } => {
2199 collector.reg_use(src1);
2200 collector.reg_reuse_def(dst, 0);
2201 collector.reg_use(src2);
2202 src3.get_operands(collector);
2203 }
2204 Inst::XmmRmRBlendVex {
2205 src1,
2206 src2,
2207 mask,
2208 dst,
2209 ..
2210 } => {
2211 collector.reg_def(dst);
2212 collector.reg_use(src1);
2213 src2.get_operands(collector);
2214 collector.reg_use(mask);
2215 }
2216 Inst::XmmRmREvex {
2217 op,
2218 src1,
2219 src2,
2220 dst,
2221 ..
2222 } => {
2223 assert_ne!(*op, Avx512Opcode::Vpermi2b);
2224 collector.reg_use(src1);
2225 src2.get_operands(collector);
2226 collector.reg_def(dst);
2227 }
2228 Inst::XmmRmREvex3 {
2229 op,
2230 src1,
2231 src2,
2232 src3,
2233 dst,
2234 ..
2235 } => {
2236 assert_eq!(*op, Avx512Opcode::Vpermi2b);
2237 collector.reg_use(src1);
2238 collector.reg_use(src2);
2239 src3.get_operands(collector);
2240 collector.reg_reuse_def(dst, 0); }
2242 Inst::XmmRmRImm {
2243 src1, src2, dst, ..
2244 } => {
2245 collector.reg_use(src1);
2246 collector.reg_reuse_def(dst, 0);
2247 src2.get_operands(collector);
2248 }
2249 Inst::XmmUninitializedValue { dst } => collector.reg_def(dst),
2250 Inst::XmmMinMaxSeq { lhs, rhs, dst, .. } => {
2251 collector.reg_use(rhs);
2252 collector.reg_use(lhs);
2253 collector.reg_reuse_def(dst, 0); }
2255 Inst::XmmRmiReg {
2256 src1, src2, dst, ..
2257 } => {
2258 collector.reg_use(src1);
2259 collector.reg_reuse_def(dst, 0); src2.get_operands(collector);
2261 }
2262 Inst::XmmMovRM { src, dst, .. }
2263 | Inst::XmmMovRMVex { src, dst, .. }
2264 | Inst::XmmMovRMImm { src, dst, .. }
2265 | Inst::XmmMovRMImmVex { src, dst, .. } => {
2266 collector.reg_use(src);
2267 dst.get_operands(collector);
2268 }
2269 Inst::XmmCmpRmR { src1, src2, .. } => {
2270 collector.reg_use(src1);
2271 src2.get_operands(collector);
2272 }
2273 Inst::XmmCmpRmRVex { src1, src2, .. } => {
2274 collector.reg_use(src1);
2275 src2.get_operands(collector);
2276 }
2277 Inst::Imm { dst, .. } => {
2278 collector.reg_def(dst);
2279 }
2280 Inst::MovRR { src, dst, .. } => {
2281 collector.reg_use(src);
2282 collector.reg_def(dst);
2283 }
2284 Inst::MovFromPReg { dst, src } => {
2285 debug_assert!(dst.to_reg().to_reg().is_virtual());
2286 collector.reg_fixed_nonallocatable(*src);
2287 collector.reg_def(dst);
2288 }
2289 Inst::MovToPReg { dst, src } => {
2290 debug_assert!(src.to_reg().is_virtual());
2291 collector.reg_use(src);
2292 collector.reg_fixed_nonallocatable(*dst);
2293 }
2294 Inst::XmmToGpr { src, dst, .. }
2295 | Inst::XmmToGprVex { src, dst, .. }
2296 | Inst::XmmToGprImm { src, dst, .. }
2297 | Inst::XmmToGprImmVex { src, dst, .. } => {
2298 collector.reg_use(src);
2299 collector.reg_def(dst);
2300 }
2301 Inst::GprToXmm { src, dst, .. } | Inst::GprToXmmVex { src, dst, .. } => {
2302 collector.reg_def(dst);
2303 src.get_operands(collector);
2304 }
2305 Inst::CvtIntToFloat {
2306 src1, src2, dst, ..
2307 } => {
2308 collector.reg_use(src1);
2309 collector.reg_reuse_def(dst, 0);
2310 src2.get_operands(collector);
2311 }
2312 Inst::CvtIntToFloatVex {
2313 src1, src2, dst, ..
2314 } => {
2315 collector.reg_def(dst);
2316 collector.reg_use(src1);
2317 src2.get_operands(collector);
2318 }
2319 Inst::CvtUint64ToFloatSeq {
2320 src,
2321 dst,
2322 tmp_gpr1,
2323 tmp_gpr2,
2324 ..
2325 } => {
2326 collector.reg_use(src);
2327 collector.reg_early_def(dst);
2328 collector.reg_early_def(tmp_gpr1);
2329 collector.reg_early_def(tmp_gpr2);
2330 }
2331 Inst::CvtFloatToSintSeq {
2332 src,
2333 dst,
2334 tmp_xmm,
2335 tmp_gpr,
2336 ..
2337 } => {
2338 collector.reg_use(src);
2339 collector.reg_early_def(dst);
2340 collector.reg_early_def(tmp_gpr);
2341 collector.reg_early_def(tmp_xmm);
2342 }
2343 Inst::CvtFloatToUintSeq {
2344 src,
2345 dst,
2346 tmp_gpr,
2347 tmp_xmm,
2348 tmp_xmm2,
2349 ..
2350 } => {
2351 collector.reg_use(src);
2352 collector.reg_early_def(dst);
2353 collector.reg_early_def(tmp_gpr);
2354 collector.reg_early_def(tmp_xmm);
2355 collector.reg_early_def(tmp_xmm2);
2356 }
2357
2358 Inst::MovImmM { dst, .. } => {
2359 dst.get_operands(collector);
2360 }
2361
2362 Inst::MovzxRmR { src, dst, .. } => {
2363 collector.reg_def(dst);
2364 src.get_operands(collector);
2365 }
2366 Inst::Mov64MR { src, dst, .. } => {
2367 collector.reg_def(dst);
2368 src.get_operands(collector);
2369 }
2370 Inst::LoadEffectiveAddress { addr: src, dst, .. } => {
2371 collector.reg_def(dst);
2372 src.get_operands(collector);
2373 }
2374 Inst::MovsxRmR { src, dst, .. } => {
2375 collector.reg_def(dst);
2376 src.get_operands(collector);
2377 }
2378 Inst::MovRM { src, dst, .. } => {
2379 collector.reg_use(src);
2380 dst.get_operands(collector);
2381 }
2382 Inst::ShiftR {
2383 num_bits, src, dst, ..
2384 } => {
2385 collector.reg_use(src);
2386 collector.reg_reuse_def(dst, 0);
2387 if let Imm8Reg::Reg { reg } = num_bits.as_imm8_reg_mut() {
2388 collector.reg_fixed_use(reg, regs::rcx());
2389 }
2390 }
2391 Inst::CmpRmiR { src1, src2, .. } => {
2392 collector.reg_use(src1);
2393 src2.get_operands(collector);
2394 }
2395 Inst::Setcc { dst, .. } => {
2396 collector.reg_def(dst);
2397 }
2398 Inst::Bswap { src, dst, .. } => {
2399 collector.reg_use(src);
2400 collector.reg_reuse_def(dst, 0);
2401 }
2402 Inst::Cmove {
2403 consequent,
2404 alternative,
2405 dst,
2406 ..
2407 } => {
2408 collector.reg_use(alternative);
2409 collector.reg_reuse_def(dst, 0);
2410 consequent.get_operands(collector);
2411 }
2412 Inst::XmmCmove {
2413 consequent,
2414 alternative,
2415 dst,
2416 ..
2417 } => {
2418 collector.reg_use(alternative);
2419 collector.reg_reuse_def(dst, 0);
2420 collector.reg_use(consequent);
2421 }
2422 Inst::Push64 { src } => {
2423 src.get_operands(collector);
2424 }
2425 Inst::Pop64 { dst } => {
2426 collector.reg_def(dst);
2427 }
2428 Inst::StackProbeLoop { tmp, .. } => {
2429 collector.reg_early_def(tmp);
2430 }
2431
2432 Inst::CallKnown { info } => {
2433 let CallInfo {
2438 uses,
2439 defs,
2440 clobbers,
2441 dest,
2442 ..
2443 } = &mut **info;
2444 debug_assert_ne!(*dest, ExternalName::LibCall(LibCall::Probestack));
2445 for CallArgPair { vreg, preg } in uses {
2446 collector.reg_fixed_use(vreg, *preg);
2447 }
2448 for CallRetPair { vreg, preg } in defs {
2449 collector.reg_fixed_def(vreg, *preg);
2450 }
2451 collector.reg_clobbers(*clobbers);
2452 }
2453
2454 Inst::CallUnknown { info } => {
2455 let CallInfo {
2456 uses,
2457 defs,
2458 clobbers,
2459 callee_conv,
2460 dest,
2461 ..
2462 } = &mut **info;
2463 match dest {
2464 RegMem::Reg { reg } if *callee_conv == CallConv::Winch => {
2465 collector.reg_fixed_use(reg, regs::r10())
2469 }
2470 _ => dest.get_operands(collector),
2471 }
2472 for CallArgPair { vreg, preg } in uses {
2473 collector.reg_fixed_use(vreg, *preg);
2474 }
2475 for CallRetPair { vreg, preg } in defs {
2476 collector.reg_fixed_def(vreg, *preg);
2477 }
2478 collector.reg_clobbers(*clobbers);
2479 }
2480 Inst::StackSwitchBasic {
2481 store_context_ptr,
2482 load_context_ptr,
2483 in_payload0,
2484 out_payload0,
2485 } => {
2486 collector.reg_use(load_context_ptr);
2487 collector.reg_use(store_context_ptr);
2488 collector.reg_fixed_use(in_payload0, stack_switch::payload_register());
2489 collector.reg_fixed_def(out_payload0, stack_switch::payload_register());
2490
2491 let mut clobbers = crate::isa::x64::abi::ALL_CLOBBERS;
2492 clobbers.remove(
2494 stack_switch::payload_register()
2495 .to_real_reg()
2496 .unwrap()
2497 .into(),
2498 );
2499 collector.reg_clobbers(clobbers);
2500 }
2501
2502 Inst::ReturnCallKnown { info } => {
2503 let ReturnCallInfo {
2504 dest, uses, tmp, ..
2505 } = &mut **info;
2506 collector.reg_fixed_def(tmp, regs::r11());
2507 debug_assert_ne!(*dest, ExternalName::LibCall(LibCall::Probestack));
2509 for CallArgPair { vreg, preg } in uses {
2510 collector.reg_fixed_use(vreg, *preg);
2511 }
2512 }
2513
2514 Inst::ReturnCallUnknown { info } => {
2515 let ReturnCallInfo {
2516 dest, uses, tmp, ..
2517 } = &mut **info;
2518
2519 collector.reg_fixed_use(dest, regs::r10());
2525
2526 collector.reg_fixed_def(tmp, regs::r11());
2527 for CallArgPair { vreg, preg } in uses {
2528 collector.reg_fixed_use(vreg, *preg);
2529 }
2530 }
2531
2532 Inst::JmpTableSeq {
2533 idx, tmp1, tmp2, ..
2534 } => {
2535 collector.reg_use(idx);
2536 collector.reg_early_def(tmp1);
2537 collector.reg_def(tmp2);
2541 }
2542
2543 Inst::JmpUnknown { target } => {
2544 target.get_operands(collector);
2545 }
2546
2547 Inst::LoadExtName { dst, .. } => {
2548 collector.reg_def(dst);
2549 }
2550
2551 Inst::LockCmpxchg {
2552 replacement,
2553 expected,
2554 mem,
2555 dst_old,
2556 ..
2557 } => {
2558 collector.reg_use(replacement);
2559 collector.reg_fixed_use(expected, regs::rax());
2560 collector.reg_fixed_def(dst_old, regs::rax());
2561 mem.get_operands(collector);
2562 }
2563
2564 Inst::LockCmpxchg16b {
2565 replacement_low,
2566 replacement_high,
2567 expected_low,
2568 expected_high,
2569 mem,
2570 dst_old_low,
2571 dst_old_high,
2572 ..
2573 } => {
2574 collector.reg_fixed_use(replacement_low, regs::rbx());
2575 collector.reg_fixed_use(replacement_high, regs::rcx());
2576 collector.reg_fixed_use(expected_low, regs::rax());
2577 collector.reg_fixed_use(expected_high, regs::rdx());
2578 collector.reg_fixed_def(dst_old_low, regs::rax());
2579 collector.reg_fixed_def(dst_old_high, regs::rdx());
2580 mem.get_operands(collector);
2581 }
2582
2583 Inst::LockXadd {
2584 operand,
2585 mem,
2586 dst_old,
2587 ..
2588 } => {
2589 collector.reg_use(operand);
2590 collector.reg_reuse_def(dst_old, 0);
2591 mem.get_operands(collector);
2592 }
2593
2594 Inst::Xchg {
2595 operand,
2596 mem,
2597 dst_old,
2598 ..
2599 } => {
2600 collector.reg_use(operand);
2601 collector.reg_reuse_def(dst_old, 0);
2602 mem.get_operands(collector);
2603 }
2604
2605 Inst::AtomicRmwSeq {
2606 operand,
2607 temp,
2608 dst_old,
2609 mem,
2610 ..
2611 } => {
2612 collector.reg_late_use(operand);
2613 collector.reg_early_def(temp);
2614 collector.reg_fixed_def(dst_old, regs::rax());
2617 mem.get_operands_late(collector)
2618 }
2619
2620 Inst::Atomic128RmwSeq {
2621 operand_low,
2622 operand_high,
2623 temp_low,
2624 temp_high,
2625 dst_old_low,
2626 dst_old_high,
2627 mem,
2628 ..
2629 } => {
2630 collector.reg_late_use(operand_low);
2632 collector.reg_late_use(operand_high);
2633 collector.reg_fixed_def(temp_low, regs::rbx());
2634 collector.reg_fixed_def(temp_high, regs::rcx());
2635 collector.reg_fixed_def(dst_old_low, regs::rax());
2636 collector.reg_fixed_def(dst_old_high, regs::rdx());
2637 mem.get_operands_late(collector)
2638 }
2639
2640 Inst::Atomic128XchgSeq {
2641 operand_low,
2642 operand_high,
2643 dst_old_low,
2644 dst_old_high,
2645 mem,
2646 ..
2647 } => {
2648 collector.reg_fixed_late_use(operand_low, regs::rbx());
2650 collector.reg_fixed_late_use(operand_high, regs::rcx());
2651 collector.reg_fixed_def(dst_old_low, regs::rax());
2652 collector.reg_fixed_def(dst_old_high, regs::rdx());
2653 mem.get_operands_late(collector)
2654 }
2655
2656 Inst::Args { args } => {
2657 for ArgPair { vreg, preg } in args {
2658 collector.reg_fixed_def(vreg, *preg);
2659 }
2660 }
2661
2662 Inst::Rets { rets } => {
2663 for RetPair { vreg, preg } in rets {
2666 collector.reg_fixed_use(vreg, *preg);
2667 }
2668 }
2669
2670 Inst::JmpKnown { .. }
2671 | Inst::WinchJmpIf { .. }
2672 | Inst::JmpCond { .. }
2673 | Inst::JmpCondOr { .. }
2674 | Inst::Ret { .. }
2675 | Inst::Nop { .. }
2676 | Inst::TrapIf { .. }
2677 | Inst::TrapIfAnd { .. }
2678 | Inst::TrapIfOr { .. }
2679 | Inst::Hlt
2680 | Inst::Ud2 { .. }
2681 | Inst::Fence { .. } => {
2682 }
2684
2685 Inst::ElfTlsGetAddr { dst, .. } | Inst::MachOTlsGetAddr { dst, .. } => {
2686 collector.reg_fixed_def(dst, regs::rax());
2687 let mut clobbers = X64ABIMachineSpec::get_regs_clobbered_by_call(CallConv::SystemV);
2694 clobbers.remove(regs::gpr_preg(regs::ENC_RAX));
2695 collector.reg_clobbers(clobbers);
2696 }
2697
2698 Inst::CoffTlsGetAddr { dst, tmp, .. } => {
2699 collector.reg_fixed_def(dst, regs::rax());
2704
2705 collector.reg_fixed_def(tmp, regs::rcx());
2707 }
2708
2709 Inst::Unwind { .. } => {}
2710
2711 Inst::DummyUse { reg } => {
2712 collector.reg_use(reg);
2713 }
2714
2715 Inst::External { inst } => {
2716 inst.visit(&mut external::RegallocVisitor { collector });
2717 }
2718 }
2719}
2720
2721impl MachInst for Inst {
2725 type ABIMachineSpec = X64ABIMachineSpec;
2726
2727 fn get_operands(&mut self, collector: &mut impl OperandVisitor) {
2728 x64_get_operands(self, collector)
2729 }
2730
2731 fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
2732 match self {
2733 Self::MovRR { size, src, dst, .. } if *size == OperandSize::Size64 => {
2738 Some((dst.to_writable_reg(), src.to_reg()))
2739 }
2740 Self::XmmUnaryRmR { op, src, dst, .. }
2745 if *op == SseOpcode::Movss
2746 || *op == SseOpcode::Movsd
2747 || *op == SseOpcode::Movaps
2748 || *op == SseOpcode::Movapd
2749 || *op == SseOpcode::Movups
2750 || *op == SseOpcode::Movupd
2751 || *op == SseOpcode::Movdqa
2752 || *op == SseOpcode::Movdqu =>
2753 {
2754 if let RegMem::Reg { reg } = src.clone().to_reg_mem() {
2755 Some((dst.to_writable_reg(), reg))
2756 } else {
2757 None
2758 }
2759 }
2760 _ => None,
2761 }
2762 }
2763
2764 fn is_included_in_clobbers(&self) -> bool {
2765 match self {
2766 &Inst::Args { .. } => false,
2767 _ => true,
2768 }
2769 }
2770
2771 fn is_trap(&self) -> bool {
2772 match self {
2773 Self::Ud2 { .. } => true,
2774 _ => false,
2775 }
2776 }
2777
2778 fn is_args(&self) -> bool {
2779 match self {
2780 Self::Args { .. } => true,
2781 _ => false,
2782 }
2783 }
2784
2785 fn is_term(&self) -> MachTerminator {
2786 match self {
2787 &Self::Rets { .. } => MachTerminator::Ret,
2789 &Self::ReturnCallKnown { .. } | &Self::ReturnCallUnknown { .. } => {
2790 MachTerminator::RetCall
2791 }
2792 &Self::JmpKnown { .. } => MachTerminator::Uncond,
2793 &Self::JmpCond { .. } => MachTerminator::Cond,
2794 &Self::JmpCondOr { .. } => MachTerminator::Cond,
2795 &Self::JmpTableSeq { .. } => MachTerminator::Indirect,
2796 _ => MachTerminator::None,
2798 }
2799 }
2800
2801 fn is_low_level_branch(&self) -> bool {
2802 match self {
2803 &Self::WinchJmpIf { .. } => true,
2804 _ => false,
2805 }
2806 }
2807
2808 fn is_mem_access(&self) -> bool {
2809 panic!("TODO FILL ME OUT")
2810 }
2811
2812 fn gen_move(dst_reg: Writable<Reg>, src_reg: Reg, ty: Type) -> Inst {
2813 trace!(
2814 "Inst::gen_move {:?} -> {:?} (type: {:?})",
2815 src_reg,
2816 dst_reg.to_reg(),
2817 ty
2818 );
2819 let rc_dst = dst_reg.to_reg().class();
2820 let rc_src = src_reg.class();
2821 debug_assert!(rc_dst == rc_src);
2823 match rc_dst {
2824 RegClass::Int => Inst::mov_r_r(OperandSize::Size64, src_reg, dst_reg),
2825 RegClass::Float => {
2826 let opcode = match ty {
2831 types::F16 | types::F32 | types::F64 | types::F32X4 => SseOpcode::Movaps,
2832 types::F64X2 => SseOpcode::Movapd,
2833 _ if (ty.is_float() || ty.is_vector()) && ty.bits() == 128 => SseOpcode::Movdqa,
2834 _ => unimplemented!("unable to move type: {}", ty),
2835 };
2836 Inst::xmm_unary_rm_r(opcode, RegMem::reg(src_reg), dst_reg)
2837 }
2838 RegClass::Vector => unreachable!(),
2839 }
2840 }
2841
2842 fn gen_nop(preferred_size: usize) -> Inst {
2843 Inst::nop(std::cmp::min(preferred_size, 15) as u8)
2844 }
2845
2846 fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
2847 match ty {
2848 types::I8 => Ok((&[RegClass::Int], &[types::I8])),
2849 types::I16 => Ok((&[RegClass::Int], &[types::I16])),
2850 types::I32 => Ok((&[RegClass::Int], &[types::I32])),
2851 types::I64 => Ok((&[RegClass::Int], &[types::I64])),
2852 types::F16 => Ok((&[RegClass::Float], &[types::F16])),
2853 types::F32 => Ok((&[RegClass::Float], &[types::F32])),
2854 types::F64 => Ok((&[RegClass::Float], &[types::F64])),
2855 types::F128 => Ok((&[RegClass::Float], &[types::F128])),
2856 types::I128 => Ok((&[RegClass::Int, RegClass::Int], &[types::I64, types::I64])),
2857 _ if ty.is_vector() => {
2858 assert!(ty.bits() <= 128);
2859 Ok((&[RegClass::Float], &[types::I8X16]))
2860 }
2861 _ => Err(CodegenError::Unsupported(format!(
2862 "Unexpected SSA-value type: {ty}"
2863 ))),
2864 }
2865 }
2866
2867 fn canonical_type_for_rc(rc: RegClass) -> Type {
2868 match rc {
2869 RegClass::Float => types::I8X16,
2870 RegClass::Int => types::I64,
2871 RegClass::Vector => unreachable!(),
2872 }
2873 }
2874
2875 fn gen_jump(label: MachLabel) -> Inst {
2876 Inst::jmp_known(label)
2877 }
2878
2879 fn gen_imm_u64(value: u64, dst: Writable<Reg>) -> Option<Self> {
2880 Some(Inst::imm(OperandSize::Size64, value, dst))
2881 }
2882
2883 fn gen_imm_f64(value: f64, tmp: Writable<Reg>, dst: Writable<Reg>) -> SmallVec<[Self; 2]> {
2884 let imm_to_gpr = Inst::imm(OperandSize::Size64, value.to_bits(), tmp);
2885 let gpr_to_xmm = Self::gpr_to_xmm(
2886 SseOpcode::Movd,
2887 tmp.to_reg().into(),
2888 OperandSize::Size64,
2889 dst,
2890 );
2891 smallvec![imm_to_gpr, gpr_to_xmm]
2892 }
2893
2894 fn gen_dummy_use(reg: Reg) -> Self {
2895 Inst::DummyUse { reg }
2896 }
2897
2898 fn worst_case_size() -> CodeOffset {
2899 15
2900 }
2901
2902 fn ref_type_regclass(_: &settings::Flags) -> RegClass {
2903 RegClass::Int
2904 }
2905
2906 fn is_safepoint(&self) -> bool {
2907 match self {
2908 Inst::CallKnown { .. } | Inst::CallUnknown { .. } => true,
2909 _ => false,
2910 }
2911 }
2912
2913 fn function_alignment() -> FunctionAlignment {
2914 FunctionAlignment {
2915 minimum: 1,
2916 preferred: 32,
2919 }
2920 }
2921
2922 type LabelUse = LabelUse;
2923
2924 const TRAP_OPCODE: &'static [u8] = &[0x0f, 0x0b];
2925}
2926
2927pub struct EmitInfo {
2929 pub(super) flags: settings::Flags,
2930 isa_flags: x64_settings::Flags,
2931}
2932
2933impl EmitInfo {
2934 pub fn new(flags: settings::Flags, isa_flags: x64_settings::Flags) -> Self {
2936 Self { flags, isa_flags }
2937 }
2938}
2939
2940impl MachInstEmit for Inst {
2941 type State = EmitState;
2942 type Info = EmitInfo;
2943
2944 fn emit(&self, sink: &mut MachBuffer<Inst>, info: &Self::Info, state: &mut Self::State) {
2945 emit::emit(self, sink, info, state);
2946 }
2947
2948 fn pretty_print_inst(&self, _: &mut Self::State) -> String {
2949 PrettyPrint::pretty_print(self, 0)
2950 }
2951}
2952
2953#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2955pub enum LabelUse {
2956 JmpRel32,
2960
2961 PCRel32,
2964}
2965
2966impl MachInstLabelUse for LabelUse {
2967 const ALIGN: CodeOffset = 1;
2968
2969 fn max_pos_range(self) -> CodeOffset {
2970 match self {
2971 LabelUse::JmpRel32 | LabelUse::PCRel32 => 0x7fff_ffff,
2972 }
2973 }
2974
2975 fn max_neg_range(self) -> CodeOffset {
2976 match self {
2977 LabelUse::JmpRel32 | LabelUse::PCRel32 => 0x8000_0000,
2978 }
2979 }
2980
2981 fn patch_size(self) -> CodeOffset {
2982 match self {
2983 LabelUse::JmpRel32 | LabelUse::PCRel32 => 4,
2984 }
2985 }
2986
2987 fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
2988 let pc_rel = (label_offset as i64) - (use_offset as i64);
2989 debug_assert!(pc_rel <= self.max_pos_range() as i64);
2990 debug_assert!(pc_rel >= -(self.max_neg_range() as i64));
2991 let pc_rel = pc_rel as u32;
2992 match self {
2993 LabelUse::JmpRel32 => {
2994 let addend = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
2995 let value = pc_rel.wrapping_add(addend).wrapping_sub(4);
2996 buffer.copy_from_slice(&value.to_le_bytes()[..]);
2997 }
2998 LabelUse::PCRel32 => {
2999 let addend = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
3000 let value = pc_rel.wrapping_add(addend);
3001 buffer.copy_from_slice(&value.to_le_bytes()[..]);
3002 }
3003 }
3004 }
3005
3006 fn supports_veneer(self) -> bool {
3007 match self {
3008 LabelUse::JmpRel32 | LabelUse::PCRel32 => false,
3009 }
3010 }
3011
3012 fn veneer_size(self) -> CodeOffset {
3013 match self {
3014 LabelUse::JmpRel32 | LabelUse::PCRel32 => 0,
3015 }
3016 }
3017
3018 fn worst_case_veneer_size() -> CodeOffset {
3019 0
3020 }
3021
3022 fn generate_veneer(self, _: &mut [u8], _: CodeOffset) -> (CodeOffset, LabelUse) {
3023 match self {
3024 LabelUse::JmpRel32 | LabelUse::PCRel32 => {
3025 panic!("Veneer not supported for JumpRel32 label-use.");
3026 }
3027 }
3028 }
3029
3030 fn from_reloc(reloc: Reloc, addend: Addend) -> Option<Self> {
3031 match (reloc, addend) {
3032 (Reloc::X86CallPCRel4, -4) => Some(LabelUse::JmpRel32),
3033 _ => None,
3034 }
3035 }
3036}