1pub mod evex;
47pub mod instruction_table;
48pub mod opcode;
49pub mod operand;
50pub mod request;
51pub mod types;
52pub mod utils;
53pub mod validation;
54pub mod vex;
55pub mod xop;
56
57pub use evex::{
59 EvexBroadcast, EvexInfo, EvexOpcodeMap, EvexPrefix, EvexVectorLength, EvexZeroingMode,
60};
61pub use instruction_table::{
62 EncodingPrefix, InstructionEncoding, OperandEncoding, OperandType, lookup_encoding,
63};
64pub use operand::{EncoderOperand, MemoryOperand};
65pub use request::EncoderRequest;
66pub use types::{
67 AddressSizeHint, BranchType, BranchWidth, BroadcastMode, EncodableEncoding, EncoderHints,
68 EncoderMode, OperandSizeHint, RoundingMode,
69};
70pub use validation::{EncoderValidator, ValidationError, ValidationErrorKind, ValidationResult};
71pub use vex::{VexInfo, VexOpcodeMap, VexPrefix, VexVectorLength};
72pub use xop::{XopEncoder, XopInfo, XopOpcodeMap, XopPrefix, XopVectorLength};
73
74#[doc(hidden)]
76pub use types::BranchType as ZydisBranchType;
77#[doc(hidden)]
78pub use types::BranchWidth as ZydisBranchWidth;
79#[doc(hidden)]
80pub use types::EncodableEncoding as ZydisEncodableEncoding;
81
82use crate::error::{EncodeError, Error, Result};
83use crate::isa::{MachineMode, Mnemonic, Register};
84
85use opcode::{AvxOpcodeMap, get_avx_opcode, get_mov_imm_opcode, get_mov_imm8_opcode};
87use utils::{
88 displacement_size, encode_modrm, encode_rex, encode_sib, needs_rex_b as check_rex_b,
89 needs_rex_for_8bit, needs_rex_r as check_rex_r, register_id, segment_override_prefix,
90};
91
92fn get_alu_opcode_extension(mnemonic: Mnemonic) -> Option<u8> {
94 match mnemonic {
95 Mnemonic::ADD => Some(0),
96 Mnemonic::OR => Some(1),
97 Mnemonic::AND => Some(4),
98 Mnemonic::SUB => Some(5),
99 Mnemonic::XOR => Some(6),
100 Mnemonic::CMP => Some(7),
101 _ => None,
102 }
103}
104
105fn get_alu_reg_reg_opcode(mnemonic: Mnemonic) -> Option<u8> {
107 match mnemonic {
108 Mnemonic::ADD => Some(0x01), Mnemonic::SUB => Some(0x29), Mnemonic::AND => Some(0x21), Mnemonic::OR => Some(0x09), Mnemonic::XOR => Some(0x31), Mnemonic::CMP => Some(0x39), _ => None,
115 }
116}
117
118fn is_avx_instruction(mnemonic: Mnemonic) -> bool {
120 matches!(
121 mnemonic,
122 Mnemonic::VMOVAPS
123 | Mnemonic::VMOVAPD
124 | Mnemonic::VMOVUPS
125 | Mnemonic::VMOVUPD
126 | Mnemonic::VADDPS
127 | Mnemonic::VADDPD
128 | Mnemonic::VSUBPS
129 | Mnemonic::VSUBPD
130 | Mnemonic::VMULPS
131 | Mnemonic::VMULPD
132 | Mnemonic::VDIVPS
133 | Mnemonic::VDIVPD
134 | Mnemonic::VXORPS
135 | Mnemonic::VXORPD
136 | Mnemonic::VSHUFPS
137 | Mnemonic::VSHUFPD
138 | Mnemonic::VPERM2I128
139 | Mnemonic::VPERM2F128
140 | Mnemonic::VPERMILPS
141 | Mnemonic::VPERMILPD
142 | Mnemonic::VPERMD
143 | Mnemonic::VPERMQ
144 | Mnemonic::VPERMPS
145 | Mnemonic::VPERMPD
146 | Mnemonic::VPSHUFB
147 | Mnemonic::VPSHUFD
148 | Mnemonic::VPSHUFHW
149 | Mnemonic::VPSHUFLW
150 | Mnemonic::VBROADCASTSS
151 | Mnemonic::VEXTRACTF128
152 )
153}
154
155fn is_avx_instruction_with_imm8(mnemonic: Mnemonic) -> bool {
157 matches!(
158 mnemonic,
159 Mnemonic::VSHUFPS
160 | Mnemonic::VSHUFPD
161 | Mnemonic::VPERM2I128
162 | Mnemonic::VPERM2F128
163 | Mnemonic::VPERMILPS
164 | Mnemonic::VPERMILPD
165 | Mnemonic::VPERMQ
166 | Mnemonic::VPERMPD
167 | Mnemonic::VPSHUFD
168 | Mnemonic::VPSHUFHW
169 | Mnemonic::VPSHUFLW
170 | Mnemonic::VEXTRACTF128
171 )
172}
173
174fn is_avx512_capable_instruction(mnemonic: Mnemonic) -> bool {
177 matches!(
178 mnemonic,
179 Mnemonic::VMOVAPS
180 | Mnemonic::VMOVAPD
181 | Mnemonic::VMOVUPS
182 | Mnemonic::VMOVUPD
183 | Mnemonic::VADDPS
184 | Mnemonic::VADDPD
185 | Mnemonic::VSUBPS
186 | Mnemonic::VSUBPD
187 | Mnemonic::VMULPS
188 | Mnemonic::VMULPD
189 | Mnemonic::VDIVPS
190 | Mnemonic::VDIVPD
191 | Mnemonic::VXORPS
192 | Mnemonic::VXORPD
193 )
194}
195
196#[derive(Debug, Clone, Copy)]
240pub struct Encoder {
241 machine_mode: MachineMode,
243 stack_width: u8,
245}
246
247impl Encoder {
248 pub fn new(mode: MachineMode, stack_width: u8) -> Result<Self> {
274 match stack_width {
276 16 | 32 | 64 => {}
277 _ => return Err(Error::InvalidStackWidth),
278 }
279
280 Ok(Self {
281 machine_mode: mode,
282 stack_width,
283 })
284 }
285
286 #[must_use]
288 pub const fn machine_mode(&self) -> MachineMode {
289 self.machine_mode
290 }
291
292 #[must_use]
294 pub const fn stack_width(&self) -> u8 {
295 self.stack_width
296 }
297
298 pub fn encode(&self, request: &EncoderRequest) -> Result<alloc::vec::Vec<u8>> {
330 let mut buffer = [0u8; 15]; let len = self.encode_to_buffer(request, &mut buffer)?;
332 Ok(buffer[..len].to_vec())
333 }
334
335 pub fn encode_to_buffer(&self, request: &EncoderRequest, buffer: &mut [u8]) -> Result<usize> {
369 if buffer.len() < 15 {
371 return Err(Error::BufferTooSmall);
372 }
373
374 let offset = 0usize;
375
376 match request.mnemonic {
378 Mnemonic::NOP => {
379 buffer[offset] = 0x90;
381 return Ok(1);
382 }
383 Mnemonic::RET => {
384 buffer[offset] = 0xC3;
386 return Ok(1);
387 }
388 Mnemonic::INT3 => {
389 buffer[offset] = 0xCC;
391 return Ok(1);
392 }
393 Mnemonic::HLT => {
394 buffer[offset] = 0xF4;
396 return Ok(1);
397 }
398 Mnemonic::CLC => {
399 buffer[offset] = 0xF8;
400 return Ok(1);
401 }
402 Mnemonic::STC => {
403 buffer[offset] = 0xF9;
404 return Ok(1);
405 }
406 Mnemonic::CLI => {
407 buffer[offset] = 0xFA;
408 return Ok(1);
409 }
410 Mnemonic::STI => {
411 buffer[offset] = 0xFB;
412 return Ok(1);
413 }
414 Mnemonic::CLD => {
415 buffer[offset] = 0xFC;
416 return Ok(1);
417 }
418 Mnemonic::STD => {
419 buffer[offset] = 0xFD;
420 return Ok(1);
421 }
422 _ => {}
423 }
424
425 if request.mnemonic == Mnemonic::MOV && request.operand_count == 2 {
427 if let (Some(EncoderOperand::Reg(dst_reg)), Some(EncoderOperand::Imm(imm))) =
428 (request.operand(0), request.operand(1))
429 {
430 return self.encode_mov_reg_imm(*dst_reg, *imm, buffer);
431 }
432 }
433
434 if request.mnemonic == Mnemonic::MOV && request.operand_count == 2 {
436 if let (Some(EncoderOperand::Reg(dst_reg)), Some(EncoderOperand::Reg(src_reg))) =
437 (request.operand(0), request.operand(1))
438 {
439 return self.encode_mov_reg_reg(*dst_reg, *src_reg, buffer);
440 }
441 }
442
443 if request.mnemonic == Mnemonic::JMP && request.operand_count == 1 {
445 if let Some(EncoderOperand::Rel(offset)) = request.operand(0) {
446 return self.encode_jmp_rel(*offset, buffer);
447 }
448 }
449
450 if is_conditional_jump(request.mnemonic) && request.operand_count == 1 {
452 if let Some(EncoderOperand::Rel(offset)) = request.operand(0) {
453 return self.encode_jcc_rel(request.mnemonic, *offset, buffer);
454 }
455 }
456
457 if request.mnemonic == Mnemonic::LEA && request.operand_count == 2 {
459 if let (Some(EncoderOperand::Reg(dst_reg)), Some(EncoderOperand::Mem(mem))) =
460 (request.operand(0), request.operand(1))
461 {
462 return self.encode_lea(*dst_reg, mem, buffer);
463 }
464 }
465
466 if get_alu_opcode_extension(request.mnemonic).is_some() && request.operand_count == 2 {
468 if let (Some(EncoderOperand::Reg(dst_reg)), Some(EncoderOperand::Imm(imm))) =
469 (request.operand(0), request.operand(1))
470 {
471 return self.encode_alu_reg_imm(*dst_reg, *imm, request.mnemonic, buffer);
472 }
473 }
474
475 if get_alu_reg_reg_opcode(request.mnemonic).is_some() && request.operand_count == 2 {
477 if let (Some(EncoderOperand::Reg(dst_reg)), Some(EncoderOperand::Reg(src_reg))) =
478 (request.operand(0), request.operand(1))
479 {
480 return self.encode_alu_reg_reg(*dst_reg, *src_reg, request.mnemonic, buffer);
481 }
482 }
483
484 if request.mnemonic == Mnemonic::PUSH && request.operand_count == 1 {
486 if let Some(EncoderOperand::Reg(reg)) = request.operand(0) {
487 return self.encode_push_pop_reg(*reg, true, buffer);
488 }
489 }
490
491 if request.mnemonic == Mnemonic::POP && request.operand_count == 1 {
493 if let Some(EncoderOperand::Reg(reg)) = request.operand(0) {
494 return self.encode_push_pop_reg(*reg, false, buffer);
495 }
496 }
497
498 if matches!(
500 request.mnemonic,
501 Mnemonic::SHL | Mnemonic::SHR | Mnemonic::SAR | Mnemonic::SAL
502 ) && request.operand_count == 2
503 {
504 if let (Some(EncoderOperand::Reg(dst_reg)), Some(EncoderOperand::Reg(src_reg))) =
505 (request.operand(0), request.operand(1))
506 {
507 if *src_reg == Register::CL {
509 return self.encode_shift_cl(*dst_reg, request.mnemonic, buffer);
510 }
511 }
512 }
513
514 if is_avx512_capable_instruction(request.mnemonic) && request.operand_count == 3 {
517 let has_zmm = request.has_zmm_operand();
518 let has_mask = request.has_mask();
519 let is_zeroing = request.is_zeroing();
520
521 if has_zmm || has_mask || is_zeroing {
523 if let (
524 Some(EncoderOperand::Reg(dst_reg)),
525 Some(EncoderOperand::Reg(src1_reg)),
526 Some(EncoderOperand::Reg(src2_reg)),
527 ) = (request.operand(0), request.operand(1), request.operand(2))
528 {
529 return self.encode_avx512_reg_reg(
531 *dst_reg,
532 *src1_reg,
533 *src2_reg,
534 request.mnemonic,
535 request.mask_reg,
536 request.zeroing_mode,
537 buffer,
538 );
539 }
540
541 if let (
542 Some(EncoderOperand::Reg(dst_reg)),
543 Some(EncoderOperand::Reg(src1_reg)),
544 Some(EncoderOperand::Mem(mem)),
545 ) = (request.operand(0), request.operand(1), request.operand(2))
546 {
547 return self.encode_avx512_reg_mem(
549 *dst_reg,
550 *src1_reg,
551 mem,
552 request.mnemonic,
553 request.mask_reg,
554 request.zeroing_mode,
555 buffer,
556 );
557 }
558 }
559 }
560
561 if is_avx_instruction(request.mnemonic) && request.operand_count == 3 {
565 if let (
566 Some(EncoderOperand::Reg(dst_reg)),
567 Some(EncoderOperand::Reg(src1_reg)),
568 Some(EncoderOperand::Reg(src2_reg)),
569 ) = (request.operand(0), request.operand(1), request.operand(2))
570 {
571 return self.encode_avx_reg_reg(
573 *dst_reg,
574 *src1_reg,
575 *src2_reg,
576 request.mnemonic,
577 buffer,
578 );
579 }
580
581 if let (
582 Some(EncoderOperand::Reg(dst_reg)),
583 Some(EncoderOperand::Reg(src1_reg)),
584 Some(EncoderOperand::Mem(mem)),
585 ) = (request.operand(0), request.operand(1), request.operand(2))
586 {
587 return self.encode_avx_reg_mem(*dst_reg, *src1_reg, mem, request.mnemonic, buffer);
589 }
590 }
591
592 if is_avx_instruction_with_imm8(request.mnemonic) && request.operand_count == 4 {
595 if let (
596 Some(EncoderOperand::Reg(dst_reg)),
597 Some(EncoderOperand::Reg(src1_reg)),
598 Some(EncoderOperand::Reg(src2_reg)),
599 Some(EncoderOperand::Imm(imm8)),
600 ) = (
601 request.operand(0),
602 request.operand(1),
603 request.operand(2),
604 request.operand(3),
605 ) {
606 return self.encode_avx_reg_reg_imm8(
608 *dst_reg,
609 *src1_reg,
610 *src2_reg,
611 *imm8 as u8,
612 request.mnemonic,
613 buffer,
614 );
615 }
616
617 if let (
618 Some(EncoderOperand::Reg(dst_reg)),
619 Some(EncoderOperand::Reg(src1_reg)),
620 Some(EncoderOperand::Mem(mem)),
621 Some(EncoderOperand::Imm(imm8)),
622 ) = (
623 request.operand(0),
624 request.operand(1),
625 request.operand(2),
626 request.operand(3),
627 ) {
628 return self.encode_avx_reg_mem_imm8(
630 *dst_reg,
631 *src1_reg,
632 mem,
633 *imm8 as u8,
634 request.mnemonic,
635 buffer,
636 );
637 }
638 }
639
640 if request.mnemonic == Mnemonic::VBROADCASTSS && request.operand_count == 2 {
643 if let (Some(EncoderOperand::Reg(dst_reg)), Some(EncoderOperand::Mem(mem))) =
644 (request.operand(0), request.operand(1))
645 {
646 return self.encode_avx_broadcast_mem(*dst_reg, mem, request.mnemonic, buffer);
647 }
648 }
649
650 if request.mnemonic == Mnemonic::VEXTRACTF128 && request.operand_count == 3 {
653 if let (
654 Some(EncoderOperand::Reg(dst_reg)),
655 Some(EncoderOperand::Reg(src_reg)),
656 Some(EncoderOperand::Imm(imm8)),
657 ) = (request.operand(0), request.operand(1), request.operand(2))
658 {
659 return self.encode_avx_extractf128(*dst_reg, *src_reg, *imm8 as u8, buffer);
661 }
662 }
663
664 Err(Error::NotYetImplemented(
666 alloc::format!("encoding for {:?}", request.mnemonic).leak(),
667 ))
668 }
669
670 fn encode_mov_reg_imm(&self, reg: Register, imm: u64, buffer: &mut [u8]) -> Result<usize> {
672 let mut offset = 0usize;
673 let reg_size = reg.size();
674
675 if reg_size == 8 {
677 let needs_rex = needs_rex_for_8bit(reg);
678
679 if needs_rex {
680 buffer[offset] = encode_rex(false, false, false, check_rex_b(reg));
681 offset += 1;
682 }
683
684 buffer[offset] = get_mov_imm8_opcode(register_id(reg));
685 offset += 1;
686 buffer[offset] = imm as u8;
687 offset += 1;
688
689 return Ok(offset);
690 }
691
692 let needs_rex_w = reg_size == 64 && self.machine_mode == MachineMode::Long64;
695
696 let needs_66 = reg_size == 16;
698
699 let needs_rex_b = check_rex_b(reg);
701
702 if needs_66 {
704 buffer[offset] = 0x66;
705 offset += 1;
706 }
707
708 if needs_rex_w || needs_rex_b {
710 buffer[offset] = encode_rex(needs_rex_w, false, false, needs_rex_b);
711 offset += 1;
712 }
713
714 buffer[offset] = get_mov_imm_opcode(register_id(reg), reg_size == 64);
716 offset += 1;
717
718 let imm_size = if reg_size == 64 {
720 8
721 } else {
722 (reg_size / 8) as usize
723 };
724 for i in 0..imm_size {
725 buffer[offset + i] = ((imm >> (i * 8)) & 0xFF) as u8;
726 }
727 offset += imm_size;
728
729 Ok(offset)
730 }
731
732 fn encode_mov_reg_reg(&self, dst: Register, src: Register, buffer: &mut [u8]) -> Result<usize> {
734 let mut offset = 0usize;
735 let op_size = dst.size().max(src.size());
736
737 if dst.size() != src.size() {
739 return Err(Error::EncodeFailed(EncodeError::InvalidOperandCombination));
740 }
741
742 let needs_66 = op_size == 16;
744 let needs_rex_w = op_size == 64 && self.machine_mode == MachineMode::Long64;
745 let rex_r = check_rex_r(src);
746 let rex_b = check_rex_b(dst);
747
748 let rex_for_8bit = op_size == 8 && (needs_rex_for_8bit(dst) || needs_rex_for_8bit(src));
750
751 if needs_66 {
753 buffer[offset] = 0x66;
754 offset += 1;
755 }
756
757 if needs_rex_w || rex_r || rex_b || rex_for_8bit {
759 buffer[offset] = encode_rex(needs_rex_w, rex_r, false, rex_b);
760 offset += 1;
761 }
762
763 buffer[offset] = 0x8B;
765 offset += 1;
766
767 buffer[offset] = encode_modrm(3, register_id(src), register_id(dst));
769 offset += 1;
770
771 Ok(offset)
772 }
773
774 fn encode_jmp_rel(&self, offset: i32, buffer: &mut [u8]) -> Result<usize> {
776 let mut len = 0usize;
777
778 if (-128..=127).contains(&offset) {
780 buffer[len] = 0xEB; len += 1;
782 buffer[len] = offset as i8 as u8;
783 len += 1;
784 } else {
785 buffer[len] = 0xE9; len += 1;
787 buffer[len..len + 4].copy_from_slice(&offset.to_le_bytes());
788 len += 4;
789 }
790
791 Ok(len)
792 }
793
794 fn encode_jcc_rel(&self, mnemonic: Mnemonic, offset: i32, buffer: &mut [u8]) -> Result<usize> {
796 let condition = get_jcc_condition(mnemonic);
797 let mut len = 0usize;
798
799 if (-128..=127).contains(&offset) {
801 buffer[len] = 0x70 + condition; len += 1;
803 buffer[len] = offset as i8 as u8;
804 len += 1;
805 } else {
806 buffer[len] = 0x0F; len += 1;
808 buffer[len] = 0x80 + condition;
809 len += 1;
810 buffer[len..len + 4].copy_from_slice(&offset.to_le_bytes());
811 len += 4;
812 }
813
814 Ok(len)
815 }
816
817 fn encode_alu_reg_imm(
820 &self,
821 reg: Register,
822 imm: u64,
823 mnemonic: Mnemonic,
824 buffer: &mut [u8],
825 ) -> Result<usize> {
826 let mut offset = 0usize;
827 let reg_size = reg.size();
828
829 let opcode_ext = get_alu_opcode_extension(mnemonic)
831 .ok_or(Error::EncodeFailed(EncodeError::InvalidMnemonic))?;
832
833 let imm8 = imm as u8;
838 let signed_imm = imm8 as i8 as i64;
839 if imm > 255 && imm != (signed_imm as u64) {
843 return Err(Error::EncodeFailed(EncodeError::ImmediateOutOfRange));
845 }
846
847 if reg_size == 8 {
849 let needs_rex = needs_rex_for_8bit(reg);
851
852 if needs_rex {
853 buffer[offset] = encode_rex(false, false, false, check_rex_b(reg));
854 offset += 1;
855 }
856
857 buffer[offset] = 0x80; offset += 1;
859 buffer[offset] = encode_modrm(3, opcode_ext, register_id(reg));
860 offset += 1;
861 buffer[offset] = imm as u8;
862 offset += 1;
863
864 return Ok(offset);
865 }
866
867 let needs_rex_w = reg_size == 64 && self.machine_mode == MachineMode::Long64;
869 let needs_66 = reg_size == 16;
870 let needs_rex_b = check_rex_b(reg);
871
872 if needs_66 {
874 buffer[offset] = 0x66;
875 offset += 1;
876 }
877
878 if needs_rex_w || needs_rex_b {
880 buffer[offset] = encode_rex(needs_rex_w, false, false, needs_rex_b);
881 offset += 1;
882 }
883
884 buffer[offset] = 0x83;
886 offset += 1;
887
888 buffer[offset] = encode_modrm(3, opcode_ext, register_id(reg));
890 offset += 1;
891
892 buffer[offset] = imm as u8;
894 offset += 1;
895
896 Ok(offset)
897 }
898
899 fn encode_alu_reg_reg(
902 &self,
903 dst: Register,
904 src: Register,
905 mnemonic: Mnemonic,
906 buffer: &mut [u8],
907 ) -> Result<usize> {
908 let mut offset = 0usize;
909 let op_size = dst.size().max(src.size());
910
911 if dst.size() != src.size() {
913 return Err(Error::EncodeFailed(EncodeError::InvalidOperandCombination));
914 }
915
916 let opcode = get_alu_reg_reg_opcode(mnemonic)
918 .ok_or(Error::EncodeFailed(EncodeError::InvalidMnemonic))?;
919
920 let needs_66 = op_size == 16;
922 let needs_rex_w = op_size == 64 && self.machine_mode == MachineMode::Long64;
923 let rex_r = check_rex_r(src);
924 let rex_b = check_rex_b(dst);
925
926 let rex_for_8bit = op_size == 8 && (needs_rex_for_8bit(dst) || needs_rex_for_8bit(src));
928
929 if needs_66 {
931 buffer[offset] = 0x66;
932 offset += 1;
933 }
934
935 if needs_rex_w || rex_r || rex_b || rex_for_8bit {
937 buffer[offset] = encode_rex(needs_rex_w, rex_r, false, rex_b);
938 offset += 1;
939 }
940
941 buffer[offset] = opcode;
943 offset += 1;
944
945 buffer[offset] = encode_modrm(3, register_id(src), register_id(dst));
947 offset += 1;
948
949 Ok(offset)
950 }
951
952 fn encode_push_pop_reg(
954 &self,
955 reg: Register,
956 is_push: bool,
957 buffer: &mut [u8],
958 ) -> Result<usize> {
959 let mut offset = 0usize;
960 let reg_size = reg.size();
961
962 if reg_size == 64 && self.machine_mode == MachineMode::Long64 {
964 let needs_rex_b = check_rex_b(reg);
965
966 if needs_rex_b {
968 buffer[offset] = encode_rex(false, false, false, true);
969 offset += 1;
970 }
971
972 buffer[offset] = if is_push {
974 0x50 + register_id(reg)
975 } else {
976 0x58 + register_id(reg)
977 };
978 offset += 1;
979
980 return Ok(offset);
981 }
982
983 let needs_66 = reg_size == 16;
985 let needs_rex_w = reg_size == 64;
986 let needs_rex_b = check_rex_b(reg);
987
988 if needs_66 {
990 buffer[offset] = 0x66;
991 offset += 1;
992 }
993
994 if needs_rex_w || needs_rex_b {
996 buffer[offset] = encode_rex(needs_rex_w, false, false, needs_rex_b);
997 offset += 1;
998 }
999
1000 if is_push {
1002 buffer[offset] = 0xFF; offset += 1;
1004 buffer[offset] = encode_modrm(3, 6, register_id(reg)); offset += 1;
1006 } else {
1007 buffer[offset] = 0x8F; offset += 1;
1009 buffer[offset] = encode_modrm(3, 0, register_id(reg)); offset += 1;
1011 }
1012
1013 Ok(offset)
1014 }
1015
1016 fn encode_shift_cl(
1019 &self,
1020 reg: Register,
1021 mnemonic: Mnemonic,
1022 buffer: &mut [u8],
1023 ) -> Result<usize> {
1024 let mut offset = 0usize;
1025 let reg_size = reg.size();
1026
1027 let opcode_ext = match mnemonic {
1029 Mnemonic::SHL | Mnemonic::SAL => 4,
1030 Mnemonic::SHR => 5,
1031 Mnemonic::SAR => 7,
1032 _ => return Err(Error::EncodeFailed(EncodeError::InvalidMnemonic)),
1033 };
1034
1035 let needs_66 = reg_size == 16;
1036 let needs_rex_w = reg_size == 64 && self.machine_mode == MachineMode::Long64;
1037 let needs_rex_b = check_rex_b(reg);
1038
1039 let rex_for_8bit = reg_size == 8 && needs_rex_for_8bit(reg);
1041
1042 if needs_66 {
1044 buffer[offset] = 0x66;
1045 offset += 1;
1046 }
1047
1048 if needs_rex_w || needs_rex_b || rex_for_8bit {
1050 buffer[offset] = encode_rex(needs_rex_w, false, false, needs_rex_b);
1051 offset += 1;
1052 }
1053
1054 buffer[offset] = if reg_size == 8 { 0xD2 } else { 0xD3 };
1056 offset += 1;
1057
1058 buffer[offset] = encode_modrm(3, opcode_ext, register_id(reg));
1060 offset += 1;
1061
1062 Ok(offset)
1063 }
1064
1065 fn encode_lea(&self, dst: Register, mem: &MemoryOperand, buffer: &mut [u8]) -> Result<usize> {
1067 let mut offset = 0usize;
1068
1069 let addr_size = if mem.base != Register::None {
1071 mem.base.size()
1072 } else if mem.index != Register::None {
1073 mem.index.size()
1074 } else {
1075 64 };
1077
1078 if mem.segment != Register::None && mem.segment != Register::DS {
1080 let seg_prefix = segment_override_prefix(mem.segment);
1081 if seg_prefix != 0 {
1082 buffer[offset] = seg_prefix;
1083 offset += 1;
1084 }
1085 }
1086
1087 let needs_rex_w = dst.size() == 64;
1089 let rex_r = check_rex_r(dst);
1090 let rex_b = mem.base != Register::None && check_rex_b(mem.base);
1091 let rex_x = mem.index != Register::None && check_rex_r(mem.index);
1092
1093 if addr_size == 32 && self.machine_mode == MachineMode::Long64 {
1095 buffer[offset] = 0x67;
1096 offset += 1;
1097 }
1098
1099 if needs_rex_w || rex_r || rex_x || rex_b {
1101 buffer[offset] = encode_rex(needs_rex_w, rex_r, rex_x, rex_b);
1102 offset += 1;
1103 }
1104
1105 buffer[offset] = 0x8D;
1107 offset += 1;
1108
1109 offset += self.encode_memory_operand(dst, mem, &mut buffer[offset..])?;
1111
1112 Ok(offset)
1113 }
1114
1115 fn encode_memory_operand(
1124 &self,
1125 reg: Register,
1126 mem: &MemoryOperand,
1127 buffer: &mut [u8],
1128 ) -> Result<usize> {
1129 let mut offset = 0usize;
1130 let reg_id = register_id(reg);
1131 let base_id = if mem.base != Register::None {
1132 register_id(mem.base)
1133 } else {
1134 0
1135 };
1136 let index_id = if mem.index != Register::None {
1137 register_id(mem.index)
1138 } else {
1139 0
1140 };
1141
1142 if mem.is_rip_relative() {
1144 buffer[offset] = encode_modrm(0, reg_id, 5);
1145 offset += 1;
1146 buffer[offset..offset + 4].copy_from_slice(&(mem.displacement as i32).to_le_bytes());
1147 offset += 4;
1148 return Ok(offset);
1149 }
1150
1151 if mem.base == Register::None {
1153 if mem.index != Register::None {
1154 if is_stack_register(mem.index) {
1157 return Err(Error::EncodeFailed(EncodeError::InvalidOperandCombination));
1158 }
1159 buffer[offset] = encode_modrm(0, reg_id, 4); offset += 1;
1161 buffer[offset] = encode_sib(mem.scale, index_id, 5); offset += 1;
1163 buffer[offset..offset + 4]
1164 .copy_from_slice(&(mem.displacement as i32).to_le_bytes());
1165 offset += 4;
1166 } else {
1167 buffer[offset] = encode_modrm(0, reg_id, 4); offset += 1;
1170 buffer[offset] = encode_sib(1, 4, 5); offset += 1;
1172 buffer[offset..offset + 4]
1173 .copy_from_slice(&(mem.displacement as i32).to_le_bytes());
1174 offset += 4;
1175 }
1176 return Ok(offset);
1177 }
1178
1179 if mem.index != Register::None && is_stack_register(mem.index) {
1182 return Err(Error::EncodeFailed(EncodeError::InvalidOperandCombination));
1183 }
1184
1185 let needs_sib = mem.requires_sib();
1186 let disp_size = displacement_size(mem.displacement);
1187
1188 let base_is_rbp = is_rbp_family(mem.base);
1190
1191 let effective_disp_size = if base_is_rbp && disp_size == 0 {
1193 1 } else {
1195 disp_size
1196 };
1197
1198 let mod_val = match effective_disp_size {
1200 0 => 0,
1201 1 => 1,
1202 _ => 2,
1203 };
1204
1205 if needs_sib {
1206 buffer[offset] = encode_modrm(mod_val, reg_id, 4); offset += 1;
1209
1210 let sib_base = if base_is_rbp && disp_size == 0 {
1214 5 } else {
1216 base_id
1217 };
1218 buffer[offset] = encode_sib(mem.scale, index_id, sib_base);
1219 offset += 1;
1220 } else {
1221 if base_is_rbp && disp_size == 0 {
1224 buffer[offset] = encode_modrm(1, reg_id, 5); } else {
1226 buffer[offset] = encode_modrm(mod_val, reg_id, base_id);
1227 }
1228 offset += 1;
1229 }
1230
1231 match effective_disp_size {
1233 1 => {
1234 buffer[offset] = mem.displacement as i8 as u8;
1235 offset += 1;
1236 }
1237 2 => {
1238 buffer[offset..offset + 2]
1239 .copy_from_slice(&(mem.displacement as i16).to_le_bytes());
1240 offset += 2;
1241 }
1242 4 | 8 => {
1243 buffer[offset..offset + 4]
1244 .copy_from_slice(&(mem.displacement as i32).to_le_bytes());
1245 offset += 4;
1246 }
1247 _ => {}
1248 }
1249
1250 Ok(offset)
1251 }
1252
1253 fn encode_avx_reg_reg(
1261 &self,
1262 dst: Register,
1263 src1: Register,
1264 src2: Register,
1265 mnemonic: Mnemonic,
1266 buffer: &mut [u8],
1267 ) -> Result<usize> {
1268 let mut offset = 0usize;
1269
1270 let avx_info =
1272 get_avx_opcode(mnemonic).ok_or(Error::EncodeFailed(EncodeError::InvalidMnemonic))?;
1273
1274 let vex_map = match avx_info.opcode_map {
1276 AvxOpcodeMap::Map0F => vex::VexOpcodeMap::Map0F,
1277 AvxOpcodeMap::Map0F38 => vex::VexOpcodeMap::Map0F38,
1278 AvxOpcodeMap::Map0F3A => vex::VexOpcodeMap::Map0F3A,
1279 };
1280
1281 let vex_info = VexInfo::new()
1283 .with_map(vex_map)
1284 .with_prefix(avx_info.vex_prefix)
1285 .with_128bit()
1286 .with_vvvv(dst); let vex_bytes = vex::encode_vex(&vex_info, Some(src1), Some(src2), None);
1290 buffer[offset..offset + vex_bytes.len()].copy_from_slice(&vex_bytes);
1291 offset += vex_bytes.len();
1292
1293 buffer[offset] = avx_info.opcode;
1295 offset += 1;
1296
1297 buffer[offset] = encode_modrm(3, register_id(src1), register_id(src2));
1299 offset += 1;
1300
1301 if avx_info.has_imm8 {
1303 return Err(Error::EncodeFailed(EncodeError::InvalidOperandCombination));
1306 }
1307
1308 Ok(offset)
1309 }
1310
1311 fn encode_avx_reg_mem(
1315 &self,
1316 dst: Register,
1317 src1: Register,
1318 mem: &MemoryOperand,
1319 mnemonic: Mnemonic,
1320 buffer: &mut [u8],
1321 ) -> Result<usize> {
1322 let mut offset = 0usize;
1323
1324 let avx_info =
1326 get_avx_opcode(mnemonic).ok_or(Error::EncodeFailed(EncodeError::InvalidMnemonic))?;
1327
1328 let vex_map = match avx_info.opcode_map {
1330 AvxOpcodeMap::Map0F => vex::VexOpcodeMap::Map0F,
1331 AvxOpcodeMap::Map0F38 => vex::VexOpcodeMap::Map0F38,
1332 AvxOpcodeMap::Map0F3A => vex::VexOpcodeMap::Map0F3A,
1333 };
1334
1335 let vex_info = VexInfo::new()
1337 .with_map(vex_map)
1338 .with_prefix(avx_info.vex_prefix)
1339 .with_128bit()
1340 .with_vvvv(dst); let vex_bytes = vex::encode_vex(&vex_info, Some(src1), Some(mem.base), Some(mem.index));
1344 buffer[offset..offset + vex_bytes.len()].copy_from_slice(&vex_bytes);
1345 offset += vex_bytes.len();
1346
1347 buffer[offset] = avx_info.opcode;
1349 offset += 1;
1350
1351 offset += self.encode_avx_memory_operand(src1, mem, &mut buffer[offset..])?;
1354
1355 Ok(offset)
1356 }
1357
1358 fn encode_avx_reg_reg_imm8(
1361 &self,
1362 dst: Register,
1363 src1: Register,
1364 src2: Register,
1365 imm8: u8,
1366 mnemonic: Mnemonic,
1367 buffer: &mut [u8],
1368 ) -> Result<usize> {
1369 let mut offset = 0usize;
1370
1371 let avx_info =
1373 get_avx_opcode(mnemonic).ok_or(Error::EncodeFailed(EncodeError::InvalidMnemonic))?;
1374
1375 let vex_map = match avx_info.opcode_map {
1377 AvxOpcodeMap::Map0F => vex::VexOpcodeMap::Map0F,
1378 AvxOpcodeMap::Map0F38 => vex::VexOpcodeMap::Map0F38,
1379 AvxOpcodeMap::Map0F3A => vex::VexOpcodeMap::Map0F3A,
1380 };
1381
1382 let vector_length = if matches!(mnemonic, Mnemonic::VPERM2I128) {
1384 vex::VexVectorLength::L256
1386 } else {
1387 vex::VexVectorLength::L128
1388 };
1389
1390 let vex_info = VexInfo::new()
1392 .with_map(vex_map)
1393 .with_prefix(avx_info.vex_prefix)
1394 .with_vector_length(vector_length)
1395 .with_vvvv(dst);
1396
1397 let vex_bytes = vex::encode_vex(&vex_info, Some(src1), Some(src2), None);
1399 buffer[offset..offset + vex_bytes.len()].copy_from_slice(&vex_bytes);
1400 offset += vex_bytes.len();
1401
1402 buffer[offset] = avx_info.opcode;
1404 offset += 1;
1405
1406 buffer[offset] = encode_modrm(3, register_id(src1), register_id(src2));
1408 offset += 1;
1409
1410 buffer[offset] = imm8;
1412 offset += 1;
1413
1414 Ok(offset)
1415 }
1416
1417 fn encode_avx_reg_mem_imm8(
1420 &self,
1421 dst: Register,
1422 src1: Register,
1423 mem: &MemoryOperand,
1424 imm8: u8,
1425 mnemonic: Mnemonic,
1426 buffer: &mut [u8],
1427 ) -> Result<usize> {
1428 let mut offset = 0usize;
1429
1430 let avx_info =
1432 get_avx_opcode(mnemonic).ok_or(Error::EncodeFailed(EncodeError::InvalidMnemonic))?;
1433
1434 let vex_map = match avx_info.opcode_map {
1436 AvxOpcodeMap::Map0F => vex::VexOpcodeMap::Map0F,
1437 AvxOpcodeMap::Map0F38 => vex::VexOpcodeMap::Map0F38,
1438 AvxOpcodeMap::Map0F3A => vex::VexOpcodeMap::Map0F3A,
1439 };
1440
1441 let vex_info = VexInfo::new()
1443 .with_map(vex_map)
1444 .with_prefix(avx_info.vex_prefix)
1445 .with_128bit()
1446 .with_vvvv(dst);
1447
1448 let vex_bytes = vex::encode_vex(&vex_info, Some(src1), Some(mem.base), Some(mem.index));
1450 buffer[offset..offset + vex_bytes.len()].copy_from_slice(&vex_bytes);
1451 offset += vex_bytes.len();
1452
1453 buffer[offset] = avx_info.opcode;
1455 offset += 1;
1456
1457 offset += self.encode_avx_memory_operand(src1, mem, &mut buffer[offset..])?;
1459
1460 buffer[offset] = imm8;
1462 offset += 1;
1463
1464 Ok(offset)
1465 }
1466
1467 fn encode_avx_broadcast_mem(
1470 &self,
1471 dst: Register,
1472 mem: &MemoryOperand,
1473 mnemonic: Mnemonic,
1474 buffer: &mut [u8],
1475 ) -> Result<usize> {
1476 let mut offset = 0usize;
1477
1478 let avx_info =
1480 get_avx_opcode(mnemonic).ok_or(Error::EncodeFailed(EncodeError::InvalidMnemonic))?;
1481
1482 let vex_info = VexInfo::new()
1484 .with_map(vex::VexOpcodeMap::Map0F38)
1485 .with_prefix(avx_info.vex_prefix)
1486 .with_128bit();
1487 let vex_bytes = vex::encode_vex(&vex_info, Some(dst), Some(mem.base), Some(mem.index));
1491 buffer[offset..offset + vex_bytes.len()].copy_from_slice(&vex_bytes);
1492 offset += vex_bytes.len();
1493
1494 buffer[offset] = avx_info.opcode;
1496 offset += 1;
1497
1498 offset += self.encode_avx_memory_operand(dst, mem, &mut buffer[offset..])?;
1501
1502 Ok(offset)
1503 }
1504
1505 fn encode_avx_extractf128(
1508 &self,
1509 dst: Register,
1510 src: Register,
1511 imm8: u8,
1512 buffer: &mut [u8],
1513 ) -> Result<usize> {
1514 let mut offset = 0usize;
1515
1516 let avx_info = get_avx_opcode(Mnemonic::VEXTRACTF128)
1518 .ok_or(Error::EncodeFailed(EncodeError::InvalidMnemonic))?;
1519
1520 let vex_info = VexInfo::new()
1522 .with_map(vex::VexOpcodeMap::Map0F3A)
1523 .with_prefix(avx_info.vex_prefix)
1524 .with_256bit();
1525 let vex_bytes = vex::encode_vex(&vex_info, Some(src), Some(dst), None);
1529 buffer[offset..offset + vex_bytes.len()].copy_from_slice(&vex_bytes);
1530 offset += vex_bytes.len();
1531
1532 buffer[offset] = avx_info.opcode;
1534 offset += 1;
1535
1536 buffer[offset] = encode_modrm(3, register_id(src), register_id(dst));
1538 offset += 1;
1539
1540 buffer[offset] = imm8;
1542 offset += 1;
1543
1544 Ok(offset)
1545 }
1546
1547 fn encode_avx_memory_operand(
1550 &self,
1551 reg: Register,
1552 mem: &MemoryOperand,
1553 buffer: &mut [u8],
1554 ) -> Result<usize> {
1555 let mut offset = 0usize;
1556 let reg_id = register_id(reg);
1557 let base_id = if mem.base != Register::None {
1558 register_id(mem.base)
1559 } else {
1560 0
1561 };
1562 let index_id = if mem.index != Register::None {
1563 register_id(mem.index)
1564 } else {
1565 0
1566 };
1567
1568 if mem.is_rip_relative() {
1570 buffer[offset] = encode_modrm(0, reg_id, 5);
1571 offset += 1;
1572 buffer[offset..offset + 4].copy_from_slice(&(mem.displacement as i32).to_le_bytes());
1573 offset += 4;
1574 return Ok(offset);
1575 }
1576
1577 if mem.base == Register::None {
1579 if mem.index != Register::None {
1580 if is_stack_register(mem.index) {
1582 return Err(Error::EncodeFailed(EncodeError::InvalidOperandCombination));
1583 }
1584 buffer[offset] = encode_modrm(0, reg_id, 4); offset += 1;
1586 buffer[offset] = encode_sib(mem.scale, index_id, 5); offset += 1;
1588 buffer[offset..offset + 4]
1589 .copy_from_slice(&(mem.displacement as i32).to_le_bytes());
1590 offset += 4;
1591 } else {
1592 buffer[offset] = encode_modrm(0, reg_id, 4); offset += 1;
1595 buffer[offset] = encode_sib(1, 4, 5); offset += 1;
1597 buffer[offset..offset + 4]
1598 .copy_from_slice(&(mem.displacement as i32).to_le_bytes());
1599 offset += 4;
1600 }
1601 return Ok(offset);
1602 }
1603
1604 if mem.index != Register::None && is_stack_register(mem.index) {
1606 return Err(Error::EncodeFailed(EncodeError::InvalidOperandCombination));
1607 }
1608
1609 let needs_sib = mem.requires_sib();
1610 let disp_size = displacement_size(mem.displacement);
1611
1612 let base_is_rbp = is_rbp_family(mem.base);
1614
1615 let effective_disp_size = if base_is_rbp && disp_size == 0 {
1617 1 } else {
1619 disp_size
1620 };
1621
1622 let mod_val = match effective_disp_size {
1624 0 => 0,
1625 1 => 1,
1626 _ => 2,
1627 };
1628
1629 if needs_sib {
1630 buffer[offset] = encode_modrm(mod_val, reg_id, 4); offset += 1;
1633
1634 let sib_base = if base_is_rbp && disp_size == 0 {
1635 5 } else {
1637 base_id
1638 };
1639 buffer[offset] = encode_sib(mem.scale, index_id, sib_base);
1640 offset += 1;
1641 } else {
1642 if base_is_rbp && disp_size == 0 {
1644 buffer[offset] = encode_modrm(1, reg_id, 5); } else {
1646 buffer[offset] = encode_modrm(mod_val, reg_id, base_id);
1647 }
1648 offset += 1;
1649 }
1650
1651 match effective_disp_size {
1653 1 => {
1654 buffer[offset] = mem.displacement as i8 as u8;
1655 offset += 1;
1656 }
1657 2 => {
1658 buffer[offset..offset + 2]
1659 .copy_from_slice(&(mem.displacement as i16).to_le_bytes());
1660 offset += 2;
1661 }
1662 4 | 8 => {
1663 buffer[offset..offset + 4]
1664 .copy_from_slice(&(mem.displacement as i32).to_le_bytes());
1665 offset += 4;
1666 }
1667 _ => {}
1668 }
1669
1670 Ok(offset)
1671 }
1672
1673 #[allow(clippy::too_many_arguments)]
1681 fn encode_avx512_reg_reg(
1682 &self,
1683 dst: Register,
1684 src1: Register,
1685 src2: Register,
1686 mnemonic: Mnemonic,
1687 mask_reg: Option<Register>,
1688 zeroing: evex::EvexZeroingMode,
1689 buffer: &mut [u8],
1690 ) -> Result<usize> {
1691 let mut offset = 0usize;
1692
1693 let avx_info =
1695 get_avx_opcode(mnemonic).ok_or(Error::EncodeFailed(EncodeError::InvalidMnemonic))?;
1696
1697 let vector_length = if dst.is_zmm() || src1.is_zmm() || src2.is_zmm() {
1699 EvexVectorLength::L512
1700 } else if dst.is_ymm() || src1.is_ymm() || src2.is_ymm() {
1701 EvexVectorLength::L256
1702 } else {
1703 EvexVectorLength::L128
1704 };
1705
1706 let evex_prefix = match avx_info.vex_prefix {
1708 vex::VexPrefix::None => EvexPrefix::None,
1709 vex::VexPrefix::Prefix66 => EvexPrefix::Prefix66,
1710 vex::VexPrefix::PrefixF3 => EvexPrefix::PrefixF3,
1711 vex::VexPrefix::PrefixF2 => EvexPrefix::PrefixF2,
1712 };
1713
1714 let evex_info = EvexInfo::new()
1716 .with_map(EvexOpcodeMap::Map0F)
1717 .with_prefix(evex_prefix)
1718 .with_vvvv(dst)
1719 .with_mask_opt(mask_reg)
1720 .with_zeroing(zeroing);
1721
1722 let evex_info = match vector_length {
1724 EvexVectorLength::L128 => evex_info.with_128bit(),
1725 EvexVectorLength::L256 => evex_info.with_256bit(),
1726 EvexVectorLength::L512 => evex_info.with_512bit(),
1727 };
1728
1729 let evex_bytes = evex::encode_evex(&evex_info, Some(src1), None, Some(src2));
1731 buffer[offset..offset + evex_bytes.len()].copy_from_slice(&evex_bytes);
1732 offset += evex_bytes.len();
1733
1734 buffer[offset] = avx_info.opcode;
1736 offset += 1;
1737
1738 buffer[offset] = encode_modrm(
1741 3,
1742 evex::evex_register_id(src1),
1743 evex::evex_register_id(src2),
1744 );
1745 offset += 1;
1746
1747 Ok(offset)
1748 }
1749
1750 #[allow(clippy::too_many_arguments)]
1754 fn encode_avx512_reg_mem(
1755 &self,
1756 dst: Register,
1757 src1: Register,
1758 mem: &MemoryOperand,
1759 mnemonic: Mnemonic,
1760 mask_reg: Option<Register>,
1761 zeroing: evex::EvexZeroingMode,
1762 buffer: &mut [u8],
1763 ) -> Result<usize> {
1764 let mut offset = 0usize;
1765
1766 let avx_info =
1768 get_avx_opcode(mnemonic).ok_or(Error::EncodeFailed(EncodeError::InvalidMnemonic))?;
1769
1770 let vector_length = if dst.is_zmm() || src1.is_zmm() {
1772 EvexVectorLength::L512
1773 } else if dst.is_ymm() || src1.is_ymm() {
1774 EvexVectorLength::L256
1775 } else {
1776 EvexVectorLength::L128
1777 };
1778
1779 let evex_prefix = match avx_info.vex_prefix {
1781 vex::VexPrefix::None => EvexPrefix::None,
1782 vex::VexPrefix::Prefix66 => EvexPrefix::Prefix66,
1783 vex::VexPrefix::PrefixF3 => EvexPrefix::PrefixF3,
1784 vex::VexPrefix::PrefixF2 => EvexPrefix::PrefixF2,
1785 };
1786
1787 let evex_info = EvexInfo::new()
1789 .with_map(EvexOpcodeMap::Map0F)
1790 .with_prefix(evex_prefix)
1791 .with_vvvv(dst)
1792 .with_mask_opt(mask_reg)
1793 .with_zeroing(zeroing);
1794
1795 let evex_info = match vector_length {
1797 EvexVectorLength::L128 => evex_info.with_128bit(),
1798 EvexVectorLength::L256 => evex_info.with_256bit(),
1799 EvexVectorLength::L512 => evex_info.with_512bit(),
1800 };
1801
1802 let evex_bytes = evex::encode_evex(&evex_info, Some(src1), Some(mem.index), Some(mem.base));
1804 buffer[offset..offset + evex_bytes.len()].copy_from_slice(&evex_bytes);
1805 offset += evex_bytes.len();
1806
1807 buffer[offset] = avx_info.opcode;
1809 offset += 1;
1810
1811 offset += self.encode_avx512_memory_operand(src1, mem, &mut buffer[offset..])?;
1813
1814 Ok(offset)
1815 }
1816
1817 fn encode_avx512_memory_operand(
1820 &self,
1821 reg: Register,
1822 mem: &MemoryOperand,
1823 buffer: &mut [u8],
1824 ) -> Result<usize> {
1825 let mut offset = 0usize;
1826 let reg_id = evex::evex_register_id(reg);
1827 let base_id = if mem.base != Register::None {
1828 evex::evex_register_id(mem.base)
1829 } else {
1830 0
1831 };
1832 let index_id = if mem.index != Register::None {
1833 evex::evex_register_id(mem.index)
1834 } else {
1835 0
1836 };
1837
1838 if mem.is_rip_relative() {
1840 buffer[offset] = encode_modrm(0, reg_id, 5);
1841 offset += 1;
1842 buffer[offset..offset + 4].copy_from_slice(&(mem.displacement as i32).to_le_bytes());
1843 offset += 4;
1844 return Ok(offset);
1845 }
1846
1847 if mem.base == Register::None {
1849 if mem.index != Register::None {
1850 if is_stack_register(mem.index) {
1852 return Err(Error::EncodeFailed(EncodeError::InvalidOperandCombination));
1853 }
1854 buffer[offset] = encode_modrm(0, reg_id, 4); offset += 1;
1856 buffer[offset] = encode_sib(mem.scale, index_id, 5); offset += 1;
1858 buffer[offset..offset + 4]
1859 .copy_from_slice(&(mem.displacement as i32).to_le_bytes());
1860 offset += 4;
1861 } else {
1862 buffer[offset] = encode_modrm(0, reg_id, 4); offset += 1;
1865 buffer[offset] = encode_sib(1, 4, 5); offset += 1;
1867 buffer[offset..offset + 4]
1868 .copy_from_slice(&(mem.displacement as i32).to_le_bytes());
1869 offset += 4;
1870 }
1871 return Ok(offset);
1872 }
1873
1874 if mem.index != Register::None && is_stack_register(mem.index) {
1876 return Err(Error::EncodeFailed(EncodeError::InvalidOperandCombination));
1877 }
1878
1879 let needs_sib = mem.requires_sib();
1880 let disp_size = displacement_size(mem.displacement);
1881
1882 let base_is_rbp = is_rbp_family(mem.base);
1884
1885 let effective_disp_size = if base_is_rbp && disp_size == 0 {
1887 1 } else {
1889 disp_size
1890 };
1891
1892 let mod_val = match effective_disp_size {
1894 0 => 0,
1895 1 => 1,
1896 _ => 2,
1897 };
1898
1899 if needs_sib {
1900 buffer[offset] = encode_modrm(mod_val, reg_id, 4); offset += 1;
1903
1904 let sib_base = if base_is_rbp && disp_size == 0 {
1905 5 } else {
1907 base_id
1908 };
1909 buffer[offset] = encode_sib(mem.scale, index_id, sib_base);
1910 offset += 1;
1911 } else {
1912 if base_is_rbp && disp_size == 0 {
1914 buffer[offset] = encode_modrm(1, reg_id, 5); } else {
1916 buffer[offset] = encode_modrm(mod_val, reg_id, base_id);
1917 }
1918 offset += 1;
1919 }
1920
1921 match effective_disp_size {
1923 1 => {
1924 buffer[offset] = mem.displacement as i8 as u8;
1925 offset += 1;
1926 }
1927 2 => {
1928 buffer[offset..offset + 2]
1929 .copy_from_slice(&(mem.displacement as i16).to_le_bytes());
1930 offset += 2;
1931 }
1932 4 | 8 => {
1933 buffer[offset..offset + 4]
1934 .copy_from_slice(&(mem.displacement as i32).to_le_bytes());
1935 offset += 4;
1936 }
1937 _ => {}
1938 }
1939
1940 Ok(offset)
1941 }
1942}
1943
1944fn is_stack_register(reg: Register) -> bool {
1946 matches!(
1947 reg,
1948 Register::RSP
1949 | Register::ESP
1950 | Register::SP
1951 | Register::R12
1952 | Register::R12D
1953 | Register::R12W
1954 )
1955}
1956
1957fn is_rbp_family(reg: Register) -> bool {
1959 matches!(
1960 reg,
1961 Register::RBP
1962 | Register::R13
1963 | Register::EBP
1964 | Register::R13D
1965 | Register::BP
1966 | Register::R13W
1967 )
1968}
1969
1970fn is_conditional_jump(mnemonic: Mnemonic) -> bool {
1972 matches!(
1973 mnemonic,
1974 Mnemonic::JO
1975 | Mnemonic::JNO
1976 | Mnemonic::JB
1977 | Mnemonic::JNB
1978 | Mnemonic::JAE
1979 | Mnemonic::JZ
1980 | Mnemonic::JE
1981 | Mnemonic::JNZ
1982 | Mnemonic::JNE
1983 | Mnemonic::JBE
1984 | Mnemonic::JNG
1985 | Mnemonic::JS
1986 | Mnemonic::JNS
1987 | Mnemonic::JP
1988 | Mnemonic::JNP
1989 | Mnemonic::JPE
1990 | Mnemonic::JPO
1991 | Mnemonic::JC
1992 | Mnemonic::JNC
1993 | Mnemonic::JL
1994 | Mnemonic::JGE
1995 | Mnemonic::JLE
1996 | Mnemonic::JG
1997 | Mnemonic::JNL
1998 )
1999}
2000
2001fn get_jcc_condition(mnemonic: Mnemonic) -> u8 {
2003 match mnemonic {
2004 Mnemonic::JO => 0,
2005 Mnemonic::JNO => 1,
2006 Mnemonic::JB | Mnemonic::JC => 2, Mnemonic::JNB | Mnemonic::JAE | Mnemonic::JNC | Mnemonic::JNL => 3, Mnemonic::JZ | Mnemonic::JE => 4,
2009 Mnemonic::JNZ | Mnemonic::JNE => 5,
2010 Mnemonic::JBE | Mnemonic::JNG => 6, Mnemonic::JS => 8,
2013 Mnemonic::JNS => 9,
2014 Mnemonic::JP | Mnemonic::JPE => 10,
2015 Mnemonic::JNP | Mnemonic::JPO => 11,
2016 Mnemonic::JL => 12,
2017 Mnemonic::JGE => 13,
2018 Mnemonic::JLE => 14,
2019 Mnemonic::JG => 15,
2020 _ => 0,
2021 }
2022}
2023
2024impl Default for Encoder {
2025 fn default() -> Self {
2026 Self::new(MachineMode::Long64, 64).unwrap()
2027 }
2028}
2029
2030#[cfg(test)]
2031mod tests {
2032 use super::*;
2033 use alloc::vec;
2034
2035 #[test]
2036 fn test_encoder_new() {
2037 let encoder = Encoder::new(MachineMode::Long64, 64);
2038 assert!(encoder.is_ok());
2039
2040 let encoder = encoder.unwrap();
2041 assert_eq!(encoder.machine_mode(), MachineMode::Long64);
2042 assert_eq!(encoder.stack_width(), 64);
2043 }
2044
2045 #[test]
2046 fn test_encoder_invalid_stack_width() {
2047 let encoder = Encoder::new(MachineMode::Long64, 128);
2048 assert!(encoder.is_err());
2049 }
2050
2051 #[test]
2052 fn test_encoder_default() {
2053 let encoder = Encoder::default();
2054 assert_eq!(encoder.machine_mode(), MachineMode::Long64);
2055 assert_eq!(encoder.stack_width(), 64);
2056 }
2057
2058 #[test]
2063 fn test_encode_lea_rip_relative() {
2064 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2065
2066 let mem = MemoryOperand::base_disp(Register::RIP, 0x1000);
2068 let request = EncoderRequest::new(Mnemonic::LEA)
2069 .with_reg(Register::RAX)
2070 .with_mem(mem);
2071 let bytes = encoder.encode(&request).unwrap();
2072
2073 assert_eq!(bytes[0], 0x48); assert_eq!(bytes[1], 0x8D); assert_eq!(bytes[2] & 0xC7, 0x05); assert_eq!(bytes[3], 0x00);
2080 assert_eq!(bytes[4], 0x10);
2081 assert_eq!(bytes[5], 0x00);
2082 assert_eq!(bytes[6], 0x00);
2083 }
2084
2085 #[test]
2086 fn test_encode_lea_sib() {
2087 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2088
2089 let mem = MemoryOperand::full(Register::DS, Register::RAX, Register::RCX, 4, 0x10);
2091 let request = EncoderRequest::new(Mnemonic::LEA)
2092 .with_reg(Register::RAX)
2093 .with_mem(mem);
2094 let bytes = encoder.encode(&request).unwrap();
2095
2096 assert!(bytes.len() >= 5); assert_eq!(bytes[0], 0x48); assert_eq!(bytes[1], 0x8D); assert_eq!(bytes[2] & 0x07, 0x04); }
2103
2104 #[test]
2105 fn test_encode_lea_base_disp() {
2106 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2107
2108 let mem = MemoryOperand::base_disp(Register::RBP, -16);
2110 let request = EncoderRequest::new(Mnemonic::LEA)
2111 .with_reg(Register::RAX)
2112 .with_mem(mem);
2113 let bytes = encoder.encode(&request).unwrap();
2114
2115 assert!(bytes.len() >= 4); assert_eq!(bytes[0], 0x48); assert_eq!(bytes[1], 0x8D); }
2120
2121 #[test]
2122 fn test_encode_lea_rsp_base() {
2123 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2124
2125 let mem = MemoryOperand::base_disp(Register::RSP, 8);
2127 let request = EncoderRequest::new(Mnemonic::LEA)
2128 .with_reg(Register::RAX)
2129 .with_mem(mem);
2130 let bytes = encoder.encode(&request).unwrap();
2131
2132 assert!(bytes.len() >= 5); assert_eq!(bytes[0], 0x48); assert_eq!(bytes[1], 0x8D); assert_eq!(bytes[2] & 0x07, 0x04); }
2139
2140 #[test]
2141 fn test_encode_segment_override() {
2142 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2143
2144 let mem = MemoryOperand::base_disp(Register::None, 0x1000).with_segment(Register::FS);
2146 let request = EncoderRequest::new(Mnemonic::LEA)
2147 .with_reg(Register::RAX)
2148 .with_mem(mem);
2149 let bytes = encoder.encode(&request).unwrap();
2150
2151 assert_eq!(bytes[0], 0x64); }
2154
2155 #[test]
2156 fn test_encode_lea_gs_segment() {
2157 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2158
2159 let mem = MemoryOperand::base(Register::RCX).with_segment(Register::GS);
2161 let request = EncoderRequest::new(Mnemonic::LEA)
2162 .with_reg(Register::RAX)
2163 .with_mem(mem);
2164 let bytes = encoder.encode(&request).unwrap();
2165
2166 assert_eq!(bytes[0], 0x65); }
2169
2170 #[test]
2171 fn test_encode_absolute_addressing() {
2172 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2173
2174 let mem = MemoryOperand::base_disp(Register::None, 0x12345678);
2176 let request = EncoderRequest::new(Mnemonic::LEA)
2177 .with_reg(Register::RAX)
2178 .with_mem(mem);
2179 let bytes = encoder.encode(&request).unwrap();
2180
2181 assert!(bytes.len() >= 8); assert_eq!(bytes[2] & 0x07, 0x04); }
2185
2186 #[test]
2187 fn test_encode_index_scale_no_base() {
2188 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2189
2190 let mem = MemoryOperand::full(Register::DS, Register::None, Register::RCX, 4, 0x1000);
2192 let request = EncoderRequest::new(Mnemonic::LEA)
2193 .with_reg(Register::RAX)
2194 .with_mem(mem);
2195 let bytes = encoder.encode(&request).unwrap();
2196
2197 assert!(bytes.len() >= 8); }
2200
2201 #[test]
2202 fn test_encode_invalid_rsp_index() {
2203 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2204
2205 let mem = MemoryOperand::full(Register::DS, Register::RAX, Register::RSP, 4, 0);
2207 let request = EncoderRequest::new(Mnemonic::LEA)
2208 .with_reg(Register::RAX)
2209 .with_mem(mem);
2210 let result = encoder.encode(&request);
2211
2212 assert!(result.is_err());
2214 }
2215
2216 #[test]
2217 fn test_is_stack_register() {
2218 assert!(is_stack_register(Register::RSP));
2219 assert!(is_stack_register(Register::ESP));
2220 assert!(is_stack_register(Register::SP));
2221 assert!(is_stack_register(Register::R12));
2222 assert!(is_stack_register(Register::R12D));
2223 assert!(is_stack_register(Register::R12W));
2224 assert!(!is_stack_register(Register::RAX));
2225 assert!(!is_stack_register(Register::RCX));
2226 }
2227
2228 #[test]
2229 fn test_is_rbp_family() {
2230 assert!(is_rbp_family(Register::RBP));
2231 assert!(is_rbp_family(Register::R13));
2232 assert!(is_rbp_family(Register::EBP));
2233 assert!(is_rbp_family(Register::R13D));
2234 assert!(is_rbp_family(Register::BP));
2235 assert!(is_rbp_family(Register::R13W));
2236 assert!(!is_rbp_family(Register::RAX));
2237 assert!(!is_rbp_family(Register::RSP));
2238 }
2239
2240 #[test]
2245 fn test_encode_add_reg_imm() {
2246 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2247
2248 let request = EncoderRequest::new(Mnemonic::ADD)
2250 .with_reg(Register::RAX)
2251 .with_imm(1);
2252 let bytes = encoder.encode(&request).unwrap();
2253 assert_eq!(bytes, vec![0x48, 0x83, 0xC0, 0x01]);
2255 }
2256
2257 #[test]
2258 fn test_encode_sub_reg_imm() {
2259 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2260
2261 let request = EncoderRequest::new(Mnemonic::SUB)
2263 .with_reg(Register::RAX)
2264 .with_imm(5);
2265 let bytes = encoder.encode(&request).unwrap();
2266 assert_eq!(bytes, vec![0x48, 0x83, 0xE8, 0x05]);
2268 }
2269
2270 #[test]
2271 fn test_encode_cmp_reg_imm() {
2272 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2273
2274 let request = EncoderRequest::new(Mnemonic::CMP)
2276 .with_reg(Register::RBX)
2277 .with_imm(0);
2278 let bytes = encoder.encode(&request).unwrap();
2279 assert_eq!(bytes, vec![0x48, 0x83, 0xFB, 0x00]);
2281 }
2282
2283 #[test]
2284 fn test_encode_xor_reg_imm() {
2285 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2286
2287 let request = EncoderRequest::new(Mnemonic::XOR)
2289 .with_reg(Register::RCX)
2290 .with_imm(0xFF);
2291 let bytes = encoder.encode(&request).unwrap();
2292 assert_eq!(bytes, vec![0x48, 0x83, 0xF1, 0xFF]);
2294 }
2295
2296 #[test]
2297 fn test_encode_and_reg_imm() {
2298 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2299
2300 let request = EncoderRequest::new(Mnemonic::AND)
2302 .with_reg(Register::RDX)
2303 .with_imm(0x0F);
2304 let bytes = encoder.encode(&request).unwrap();
2305 assert_eq!(bytes, vec![0x48, 0x83, 0xE2, 0x0F]);
2307 }
2308
2309 #[test]
2310 fn test_encode_or_reg_imm() {
2311 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2312
2313 let request = EncoderRequest::new(Mnemonic::OR)
2315 .with_reg(Register::RSI)
2316 .with_imm(0x80);
2317 let bytes = encoder.encode(&request).unwrap();
2318 assert_eq!(bytes, vec![0x48, 0x83, 0xCE, 0x80]);
2320 }
2321
2322 #[test]
2323 fn test_encode_add_reg_reg() {
2324 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2325
2326 let request = EncoderRequest::new(Mnemonic::ADD)
2328 .with_reg(Register::RAX)
2329 .with_reg(Register::RBX);
2330 let bytes = encoder.encode(&request).unwrap();
2331 assert_eq!(bytes, vec![0x48, 0x01, 0xD8]);
2333 }
2334
2335 #[test]
2336 fn test_encode_sub_reg_reg() {
2337 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2338
2339 let request = EncoderRequest::new(Mnemonic::SUB)
2341 .with_reg(Register::RAX)
2342 .with_reg(Register::RBX);
2343 let bytes = encoder.encode(&request).unwrap();
2344 assert_eq!(bytes, vec![0x48, 0x29, 0xD8]);
2346 }
2347
2348 #[test]
2349 fn test_encode_xor_reg_reg() {
2350 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2351
2352 let request = EncoderRequest::new(Mnemonic::XOR)
2354 .with_reg(Register::RAX)
2355 .with_reg(Register::RCX);
2356 let bytes = encoder.encode(&request).unwrap();
2357 assert_eq!(bytes, vec![0x48, 0x31, 0xC8]);
2359 }
2360
2361 #[test]
2362 fn test_encode_and_reg_reg() {
2363 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2364
2365 let request = EncoderRequest::new(Mnemonic::AND)
2367 .with_reg(Register::RAX)
2368 .with_reg(Register::RDX);
2369 let bytes = encoder.encode(&request).unwrap();
2370 assert_eq!(bytes, vec![0x48, 0x21, 0xD0]);
2372 }
2373
2374 #[test]
2375 fn test_encode_or_reg_reg() {
2376 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2377
2378 let request = EncoderRequest::new(Mnemonic::OR)
2380 .with_reg(Register::RAX)
2381 .with_reg(Register::RSI);
2382 let bytes = encoder.encode(&request).unwrap();
2383 assert_eq!(bytes, vec![0x48, 0x09, 0xF0]);
2385 }
2386
2387 #[test]
2388 fn test_encode_cmp_reg_reg() {
2389 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2390
2391 let request = EncoderRequest::new(Mnemonic::CMP)
2393 .with_reg(Register::RAX)
2394 .with_reg(Register::RBX);
2395 let bytes = encoder.encode(&request).unwrap();
2396 assert_eq!(bytes, vec![0x48, 0x39, 0xD8]);
2398 }
2399
2400 #[test]
2401 fn test_encode_alu_extended_reg() {
2402 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2403
2404 let request = EncoderRequest::new(Mnemonic::ADD)
2406 .with_reg(Register::R8)
2407 .with_imm(1);
2408 let bytes = encoder.encode(&request).unwrap();
2409 assert_eq!(bytes, vec![0x49, 0x83, 0xC0, 0x01]);
2411
2412 let request = EncoderRequest::new(Mnemonic::ADD)
2414 .with_reg(Register::RAX)
2415 .with_reg(Register::R8);
2416 let bytes = encoder.encode(&request).unwrap();
2417 assert_eq!(bytes, vec![0x4C, 0x01, 0xC0]);
2419 }
2420
2421 #[test]
2426 fn test_encode_push_reg() {
2427 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2428
2429 let request = EncoderRequest::new(Mnemonic::PUSH).with_reg(Register::RAX);
2431 let bytes = encoder.encode(&request).unwrap();
2432 assert_eq!(bytes, vec![0x50]);
2434
2435 let request = EncoderRequest::new(Mnemonic::PUSH).with_reg(Register::RBX);
2437 let bytes = encoder.encode(&request).unwrap();
2438 assert_eq!(bytes, vec![0x53]);
2439 }
2440
2441 #[test]
2442 fn test_encode_push_extended_reg() {
2443 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2444
2445 let request = EncoderRequest::new(Mnemonic::PUSH).with_reg(Register::R8);
2447 let bytes = encoder.encode(&request).unwrap();
2448 assert_eq!(bytes, vec![0x41, 0x50]);
2450
2451 let request = EncoderRequest::new(Mnemonic::PUSH).with_reg(Register::R15);
2453 let bytes = encoder.encode(&request).unwrap();
2454 assert_eq!(bytes, vec![0x41, 0x57]);
2456 }
2457
2458 #[test]
2459 fn test_encode_pop_reg() {
2460 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2461
2462 let request = EncoderRequest::new(Mnemonic::POP).with_reg(Register::RAX);
2464 let bytes = encoder.encode(&request).unwrap();
2465 assert_eq!(bytes, vec![0x58]);
2467
2468 let request = EncoderRequest::new(Mnemonic::POP).with_reg(Register::RBX);
2470 let bytes = encoder.encode(&request).unwrap();
2471 assert_eq!(bytes, vec![0x5B]);
2472 }
2473
2474 #[test]
2475 fn test_encode_pop_extended_reg() {
2476 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2477
2478 let request = EncoderRequest::new(Mnemonic::POP).with_reg(Register::R8);
2480 let bytes = encoder.encode(&request).unwrap();
2481 assert_eq!(bytes, vec![0x41, 0x58]);
2483
2484 let request = EncoderRequest::new(Mnemonic::POP).with_reg(Register::R15);
2486 let bytes = encoder.encode(&request).unwrap();
2487 assert_eq!(bytes, vec![0x41, 0x5F]);
2489 }
2490
2491 #[test]
2496 fn test_encode_shl_cl() {
2497 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2498
2499 let request = EncoderRequest::new(Mnemonic::SHL)
2501 .with_reg(Register::RAX)
2502 .with_reg(Register::CL);
2503 let bytes = encoder.encode(&request).unwrap();
2504 assert_eq!(bytes, vec![0x48, 0xD3, 0xE0]);
2506 }
2507
2508 #[test]
2509 fn test_encode_shr_cl() {
2510 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2511
2512 let request = EncoderRequest::new(Mnemonic::SHR)
2514 .with_reg(Register::RBX)
2515 .with_reg(Register::CL);
2516 let bytes = encoder.encode(&request).unwrap();
2517 assert_eq!(bytes, vec![0x48, 0xD3, 0xEB]);
2519 }
2520
2521 #[test]
2522 fn test_encode_sar_cl() {
2523 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2524
2525 let request = EncoderRequest::new(Mnemonic::SAR)
2527 .with_reg(Register::RCX)
2528 .with_reg(Register::CL);
2529 let bytes = encoder.encode(&request).unwrap();
2530 assert_eq!(bytes, vec![0x48, 0xD3, 0xF9]);
2532 }
2533
2534 #[test]
2535 fn test_encode_shl_extended_reg() {
2536 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2537
2538 let request = EncoderRequest::new(Mnemonic::SHL)
2540 .with_reg(Register::R8)
2541 .with_reg(Register::CL);
2542 let bytes = encoder.encode(&request).unwrap();
2543 assert_eq!(bytes, vec![0x49, 0xD3, 0xE0]);
2545 }
2546
2547 #[test]
2552 fn test_encode_vaddps_reg_reg() {
2553 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2554
2555 let request = EncoderRequest::new(Mnemonic::VADDPS)
2559 .with_reg(Register::XMM1)
2560 .with_reg(Register::XMM2)
2561 .with_reg(Register::XMM3);
2562 let bytes = encoder.encode(&request).unwrap();
2563
2564 assert_eq!(bytes.len(), 4);
2570 assert_eq!(bytes[0], 0xC5); assert_eq!(bytes[1], 0xF0);
2573 assert_eq!(bytes[2], 0x58); assert_eq!(bytes[3], 0xD3); }
2576
2577 #[test]
2578 fn test_encode_vaddpd_reg_reg() {
2579 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2580
2581 let request = EncoderRequest::new(Mnemonic::VADDPD)
2584 .with_reg(Register::XMM1)
2585 .with_reg(Register::XMM2)
2586 .with_reg(Register::XMM3);
2587 let bytes = encoder.encode(&request).unwrap();
2588
2589 assert_eq!(bytes.len(), 4);
2591 assert_eq!(bytes[0], 0xC5); assert_eq!(bytes[1], 0xF1);
2594 assert_eq!(bytes[2], 0x58); assert_eq!(bytes[3], 0xD3); }
2597
2598 #[test]
2599 fn test_encode_vsubps_reg_reg() {
2600 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2601
2602 let request = EncoderRequest::new(Mnemonic::VSUBPS)
2605 .with_reg(Register::XMM0)
2606 .with_reg(Register::XMM1)
2607 .with_reg(Register::XMM2);
2608 let bytes = encoder.encode(&request).unwrap();
2609
2610 assert_eq!(bytes.len(), 4);
2611 assert_eq!(bytes[0], 0xC5); assert_eq!(bytes[1], 0xF8);
2614 assert_eq!(bytes[2], 0x5C); assert_eq!(bytes[3], 0xCA);
2617 }
2618
2619 #[test]
2620 fn test_encode_vsubpd_reg_reg() {
2621 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2622
2623 let request = EncoderRequest::new(Mnemonic::VSUBPD)
2626 .with_reg(Register::XMM0)
2627 .with_reg(Register::XMM1)
2628 .with_reg(Register::XMM2);
2629 let bytes = encoder.encode(&request).unwrap();
2630
2631 assert_eq!(bytes.len(), 4);
2632 assert_eq!(bytes[0], 0xC5);
2633 assert_eq!(bytes[1], 0xF9);
2635 assert_eq!(bytes[2], 0x5C);
2636 assert_eq!(bytes[3], 0xCA);
2637 }
2638
2639 #[test]
2640 fn test_encode_vmulps_reg_reg() {
2641 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2642
2643 let request = EncoderRequest::new(Mnemonic::VMULPS)
2646 .with_reg(Register::XMM1)
2647 .with_reg(Register::XMM2)
2648 .with_reg(Register::XMM3);
2649 let bytes = encoder.encode(&request).unwrap();
2650
2651 assert_eq!(bytes.len(), 4);
2652 assert_eq!(bytes[0], 0xC5);
2653 assert_eq!(bytes[1], 0xF0); assert_eq!(bytes[2], 0x59); assert_eq!(bytes[3], 0xD3);
2656 }
2657
2658 #[test]
2659 fn test_encode_vmulpd_reg_reg() {
2660 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2661
2662 let request = EncoderRequest::new(Mnemonic::VMULPD)
2665 .with_reg(Register::XMM1)
2666 .with_reg(Register::XMM2)
2667 .with_reg(Register::XMM3);
2668 let bytes = encoder.encode(&request).unwrap();
2669
2670 assert_eq!(bytes.len(), 4);
2671 assert_eq!(bytes[0], 0xC5);
2672 assert_eq!(bytes[1], 0xF1); assert_eq!(bytes[2], 0x59);
2674 assert_eq!(bytes[3], 0xD3);
2675 }
2676
2677 #[test]
2678 fn test_encode_vxorps_reg_reg() {
2679 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2680
2681 let request = EncoderRequest::new(Mnemonic::VXORPS)
2684 .with_reg(Register::XMM1)
2685 .with_reg(Register::XMM2)
2686 .with_reg(Register::XMM3);
2687 let bytes = encoder.encode(&request).unwrap();
2688
2689 assert_eq!(bytes.len(), 4);
2690 assert_eq!(bytes[0], 0xC5);
2691 assert_eq!(bytes[1], 0xF0);
2692 assert_eq!(bytes[2], 0x57); assert_eq!(bytes[3], 0xD3);
2694 }
2695
2696 #[test]
2697 fn test_encode_vxorpd_reg_reg() {
2698 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2699
2700 let request = EncoderRequest::new(Mnemonic::VXORPD)
2703 .with_reg(Register::XMM1)
2704 .with_reg(Register::XMM2)
2705 .with_reg(Register::XMM3);
2706 let bytes = encoder.encode(&request).unwrap();
2707
2708 assert_eq!(bytes.len(), 4);
2709 assert_eq!(bytes[0], 0xC5);
2710 assert_eq!(bytes[1], 0xF1); assert_eq!(bytes[2], 0x57);
2712 assert_eq!(bytes[3], 0xD3);
2713 }
2714
2715 #[test]
2716 fn test_encode_vaddps_extended_reg() {
2717 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2718
2719 let request = EncoderRequest::new(Mnemonic::VADDPS)
2722 .with_reg(Register::XMM8)
2723 .with_reg(Register::XMM9)
2724 .with_reg(Register::XMM10);
2725 let bytes = encoder.encode(&request).unwrap();
2726
2727 assert_eq!(bytes.len(), 5);
2729 assert_eq!(bytes[0], 0xC4); assert_eq!(bytes[1], 0x41);
2733 assert_eq!(bytes[2], 0x38);
2735 assert_eq!(bytes[3], 0x58); assert_eq!(bytes[4], 0xCA);
2738 }
2739
2740 #[test]
2741 fn test_encode_vaddps_reg_mem() {
2742 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2743
2744 let mem = MemoryOperand::base(Register::RAX);
2746 let request = EncoderRequest::new(Mnemonic::VADDPS)
2747 .with_reg(Register::XMM1)
2748 .with_reg(Register::XMM2)
2749 .with_mem(mem);
2750 let bytes = encoder.encode(&request).unwrap();
2751
2752 assert_eq!(bytes.len(), 4);
2753 assert_eq!(bytes[0], 0xC5); assert_eq!(bytes[1], 0xF0); assert_eq!(bytes[2], 0x58); assert_eq!(bytes[3], 0x10);
2758 }
2759
2760 #[test]
2761 fn test_encode_vaddps_reg_mem_disp() {
2762 let encoder = Encoder::new(MachineMode::Long64, 64).unwrap();
2763
2764 let mem = MemoryOperand::base_disp(Register::RAX, 0x10);
2766 let request = EncoderRequest::new(Mnemonic::VADDPS)
2767 .with_reg(Register::XMM1)
2768 .with_reg(Register::XMM2)
2769 .with_mem(mem);
2770 let bytes = encoder.encode(&request).unwrap();
2771
2772 assert_eq!(bytes.len(), 5);
2773 assert_eq!(bytes[0], 0xC5);
2774 assert_eq!(bytes[1], 0xF0);
2775 assert_eq!(bytes[2], 0x58);
2776 assert_eq!(bytes[3], 0x50);
2778 assert_eq!(bytes[4], 0x10); }
2780}