1mod encoder_data;
5mod enums;
6pub(crate) mod handlers_table;
7#[cfg(feature = "op_code_info")]
8mod instruction_fmt;
9mod mem_op;
10#[cfg(feature = "op_code_info")]
11mod mnemonic_str_tbl;
12#[cfg(feature = "op_code_info")]
13mod op_code;
14#[cfg(feature = "op_code_info")]
15mod op_code_data;
16#[cfg(feature = "op_code_info")]
17mod op_code_fmt;
18mod op_code_handler;
19#[cfg(feature = "op_code_info")]
20pub(crate) mod op_code_tbl;
21#[cfg(feature = "op_code_info")]
22mod op_kind_tables;
23mod ops;
24mod ops_tables;
25#[cfg(test)]
26pub(crate) mod tests;
27
28pub use crate::encoder::enums::*;
29use crate::encoder::handlers_table::*;
30pub use crate::encoder::mem_op::*;
31#[cfg(feature = "op_code_info")]
32pub use crate::encoder::op_code::*;
33use crate::encoder::op_code_handler::OpCodeHandler;
34use crate::iced_constants::IcedConstants;
35use crate::iced_error::IcedError;
36use crate::instruction_internal;
37use crate::*;
38use alloc::string::String;
39use alloc::vec::Vec;
40use core::mem;
41
42#[rustfmt::skip]
45static IMM_SIZES: [u32; 19] = [
46 0,1,2,4,8,3,2,4,6,1,1,1,2,2,2,4,4,1,1,];
66#[allow(missing_debug_implementations)]
91pub struct Encoder {
92 current_rip: u64,
93 buffer: Vec<u8>,
94 handlers: &'static [&'static OpCodeHandler; IcedConstants::CODE_ENUM_COUNT],
95 handler: &'static OpCodeHandler,
96 error_message: String,
97 bitness: u32,
98 eip: u32,
99 displ_addr: u32,
100 imm_addr: u32,
101 immediate: u32,
102 immediate_hi: u32,
106 displ: u32,
107 displ_hi: u32,
109 op_code: u32,
110 internal_vex_wig_lig: u32,
111 internal_vex_lig: u32,
112 internal_evex_wig: u32,
113 internal_evex_lig: u32,
114 #[cfg(feature = "mvex")]
115 internal_mvex_wig: u32,
116 #[cfg(not(feature = "mvex"))]
117 #[allow(dead_code)]
118 internal_mvex_wig: (),
119 prevent_vex2: u32,
120 opsize16_flags: u32,
121 opsize32_flags: u32,
122 adrsize16_flags: u32,
123 adrsize32_flags: u32,
124 encoder_flags: u32, displ_size: DisplSize,
129 imm_size: ImmSize,
130 mod_rm: u8,
131 sib: u8,
132 }
134
135impl Encoder {
136 pub(super) const ERROR_ONLY_1632_BIT_MODE: &'static str = "The instruction can only be used in 16/32-bit mode";
137 pub(super) const ERROR_ONLY_64_BIT_MODE: &'static str = "The instruction can only be used in 64-bit mode";
138
139 #[must_use]
149 #[inline]
150 #[allow(clippy::unwrap_used)]
151 pub fn new(bitness: u32) -> Self {
152 Self::try_new(bitness).unwrap()
153 }
154
155 #[inline]
165 pub fn try_new(bitness: u32) -> Result<Self, IcedError> {
166 Self::try_with_capacity(bitness, 0)
167 }
168
169 #[allow(clippy::missing_inline_in_public_items)]
180 pub fn try_with_capacity(bitness: u32, capacity: usize) -> Result<Self, IcedError> {
181 if bitness != 16 && bitness != 32 && bitness != 64 {
182 return Err(IcedError::new("Invalid bitness"));
183 }
184
185 let opsize16_flags = if bitness != 16 { EncoderFlags::P66 } else { 0 };
186 let opsize32_flags = if bitness == 16 { EncoderFlags::P66 } else { 0 };
187 let adrsize16_flags = if bitness != 16 { EncoderFlags::P67 } else { 0 };
188 let adrsize32_flags = if bitness != 32 { EncoderFlags::P67 } else { 0 };
189
190 let handlers = HANDLERS_TABLE.as_ref();
191
192 #[cfg(feature = "mvex")]
193 const INTERNAL_MVEX_WIG: u32 = 0;
194 #[cfg(not(feature = "mvex"))]
195 const INTERNAL_MVEX_WIG: () = ();
196 Ok(Self {
197 current_rip: 0,
198 handler: handlers[0],
199 handlers,
200 buffer: if capacity == 0 { Vec::new() } else { Vec::with_capacity(capacity) },
201 error_message: String::new(),
202 bitness,
203 eip: 0,
204 displ_addr: 0,
205 imm_addr: 0,
206 immediate: 0,
207 immediate_hi: 0,
208 displ: 0,
209 displ_hi: 0,
210 op_code: 0,
211 internal_vex_wig_lig: 0,
212 internal_vex_lig: 0,
213 internal_evex_wig: 0,
214 internal_evex_lig: 0,
215 internal_mvex_wig: INTERNAL_MVEX_WIG,
216 prevent_vex2: 0,
217 opsize16_flags,
218 opsize32_flags,
219 adrsize16_flags,
220 adrsize32_flags,
221 encoder_flags: 0,
222 displ_size: DisplSize::default(),
223 imm_size: ImmSize::default(),
224 mod_rm: 0,
225 sib: 0,
226 })
227 }
228
229 #[allow(clippy::missing_inline_in_public_items)]
261 pub fn encode(&mut self, instruction: &Instruction, rip: u64) -> Result<usize, IcedError> {
262 self.current_rip = rip;
263 self.eip = rip as u32;
264
265 self.encoder_flags = EncoderFlags::NONE;
266 self.displ_size = DisplSize::None;
267 self.imm_size = ImmSize::None;
268 self.mod_rm = 0;
269 self.sib = 0;
273
274 let handler = self.handlers[instruction.code() as usize];
275 self.handler = handler;
276 self.op_code = handler.op_code;
277 let group_index = handler.group_index;
278 if group_index >= 0 {
279 self.encoder_flags |= EncoderFlags::MOD_RM;
280 self.mod_rm = (group_index as u8) << 3;
281 }
282 let rm_group_index = handler.rm_group_index;
283 if rm_group_index >= 0 {
284 self.encoder_flags |= EncoderFlags::MOD_RM;
285 self.mod_rm |= (rm_group_index as u8) | 0xC0;
286 }
287
288 match handler.enc_flags3 & (EncFlags3::BIT16OR32 | EncFlags3::BIT64) {
289 EncFlags3::BIT16OR32 => {
290 if self.bitness == 64 {
291 self.set_error_message_str(Self::ERROR_ONLY_1632_BIT_MODE);
292 }
293 }
294
295 EncFlags3::BIT64 => {
296 if self.bitness != 64 {
297 self.set_error_message_str(Self::ERROR_ONLY_64_BIT_MODE);
298 }
299 }
300
301 _ => {}
302 }
303
304 match handler.op_size {
305 CodeSize::Unknown => {}
306 CodeSize::Code16 => self.encoder_flags |= self.opsize16_flags,
307 CodeSize::Code32 => self.encoder_flags |= self.opsize32_flags,
308 CodeSize::Code64 => {
309 if (handler.enc_flags3 & EncFlags3::DEFAULT_OP_SIZE64) == 0 {
310 self.encoder_flags |= EncoderFlags::W
311 }
312 }
313 }
314
315 match handler.addr_size {
316 CodeSize::Unknown | CodeSize::Code64 => {}
317 CodeSize::Code16 => self.encoder_flags |= self.adrsize16_flags,
318 CodeSize::Code32 => self.encoder_flags |= self.adrsize32_flags,
319 }
320
321 if !handler.is_special_instr {
322 let ops = &*handler.operands;
323 for (i, op) in ops.iter().copied().enumerate() {
324 op.encode(self, instruction, i as u32);
325 }
326
327 if (handler.enc_flags3 & EncFlags3::FWAIT) != 0 {
328 self.write_byte_internal(0x9B);
329 }
330
331 (handler.encode)(handler, self, instruction);
332
333 let op_code = self.op_code;
334 if !handler.is_2byte_opcode {
335 self.write_byte_internal(op_code);
336 } else {
337 self.write_byte_internal(op_code >> 8);
338 self.write_byte_internal(op_code);
339 }
340
341 if (self.encoder_flags & (EncoderFlags::MOD_RM | EncoderFlags::DISPL)) != 0 {
342 self.write_mod_rm();
343 }
344
345 if self.imm_size != ImmSize::None {
346 self.write_immediate();
347 }
348 } else {
349 (handler.encode)(handler, self, instruction);
350 }
351
352 let instr_len = (self.current_rip as usize).wrapping_sub(rip as usize);
353 if instr_len > IcedConstants::MAX_INSTRUCTION_LENGTH && !handler.is_special_instr {
354 self.set_error_message(format!("Instruction length > {} bytes", IcedConstants::MAX_INSTRUCTION_LENGTH));
355 }
356 if !self.error_message.is_empty() {
357 Err(IcedError::with_string(mem::take(&mut self.error_message)))
358 } else {
359 Ok(instr_len)
360 }
361 }
362
363 #[inline]
364 pub(super) fn set_error_message(&mut self, message: String) {
365 if self.error_message.is_empty() {
366 self.error_message = message;
367 }
368 }
369
370 #[inline]
371 pub(super) fn set_error_message_str(&mut self, message: &str) {
372 if self.error_message.is_empty() {
373 self.error_message.push_str(message);
374 }
375 }
376
377 #[must_use]
378 #[inline]
379 pub(super) fn verify_op_kind(&mut self, operand: u32, expected: OpKind, actual: OpKind) -> bool {
380 if expected == actual {
381 true
382 } else {
383 if cfg!(debug_assertions) {
384 self.set_error_message(format!("Operand {}: Expected: {:?}, actual: {:?}", operand, expected, actual));
385 } else {
386 self.set_error_message(format!(
387 "Operand {}: Expected: OpKind value {}, actual: OpKind value {}",
388 operand, expected as u32, actual as u32
389 ));
390 }
391 false
392 }
393 }
394
395 #[must_use]
396 #[inline]
397 pub(super) fn verify_register(&mut self, operand: u32, expected: Register, actual: Register) -> bool {
398 if expected == actual {
399 true
400 } else {
401 if cfg!(debug_assertions) {
402 self.set_error_message(format!("Operand {}: Expected: {:?}, actual: {:?}", operand, expected, actual));
403 } else {
404 self.set_error_message(format!(
405 "Operand {}: Expected: Register value {}, actual: Register value {}",
406 operand, expected as u32, actual as u32
407 ));
408 }
409 false
410 }
411 }
412
413 #[must_use]
414 #[inline]
415 pub(super) fn verify_register_range(&mut self, operand: u32, register: Register, reg_lo: Register, mut reg_hi: Register) -> bool {
416 if self.bitness != 64 && reg_hi as u32 > (reg_lo as u32).wrapping_add(7) {
419 reg_hi = unsafe { mem::transmute((reg_lo as RegisterUnderlyingType).wrapping_add(7)) };
423 }
424 if reg_lo <= register && register <= reg_hi {
425 true
426 } else {
427 if cfg!(debug_assertions) {
428 self.set_error_message(format!(
429 "Operand {}: Register {:?} is not between {:?} and {:?} (inclusive)",
430 operand, register, reg_lo, reg_hi
431 ));
432 } else {
433 self.set_error_message(format!(
434 "Operand {}: Register {} is not between {} and {} (inclusive)",
435 operand, register as u32, reg_lo as u32, reg_hi as u32
436 ));
437 }
438 false
439 }
440 }
441
442 pub(super) fn add_branch(&mut self, op_kind: OpKind, imm_size: u32, instruction: &Instruction, operand: u32) {
443 if !self.verify_op_kind(operand, op_kind, instruction.op_kind(operand)) {
444 return;
445 }
446
447 let target;
448 match imm_size {
449 1 => match op_kind {
450 OpKind::NearBranch16 => {
451 self.encoder_flags |= self.opsize16_flags;
452 self.imm_size = ImmSize::RipRelSize1_Target16;
453 self.immediate = instruction.near_branch16() as u32;
454 }
455
456 OpKind::NearBranch32 => {
457 self.encoder_flags |= self.opsize32_flags;
458 self.imm_size = ImmSize::RipRelSize1_Target32;
459 self.immediate = instruction.near_branch32();
460 }
461
462 OpKind::NearBranch64 => {
463 self.imm_size = ImmSize::RipRelSize1_Target64;
464 target = instruction.near_branch64();
465 self.immediate = target as u32;
466 self.immediate_hi = (target >> 32) as u32;
467 }
468
469 _ => unreachable!(),
470 },
471
472 2 => match op_kind {
473 OpKind::NearBranch16 => {
474 self.encoder_flags |= self.opsize16_flags;
475 self.imm_size = ImmSize::RipRelSize2_Target16;
476 self.immediate = instruction.near_branch16() as u32;
477 }
478
479 _ => unreachable!(),
480 },
481
482 4 => match op_kind {
483 OpKind::NearBranch32 => {
484 self.encoder_flags |= self.opsize32_flags;
485 self.imm_size = ImmSize::RipRelSize4_Target32;
486 self.immediate = instruction.near_branch32();
487 }
488
489 OpKind::NearBranch64 => {
490 self.imm_size = ImmSize::RipRelSize4_Target64;
491 target = instruction.near_branch64();
492 self.immediate = target as u32;
493 self.immediate_hi = (target >> 32) as u32;
494 }
495
496 _ => unreachable!(),
497 },
498 _ => unreachable!(),
499 }
500 }
501
502 pub(super) fn add_branch_x(&mut self, imm_size: u32, instruction: &Instruction, operand: u32) {
503 if self.bitness == 64 {
504 if !self.verify_op_kind(operand, OpKind::NearBranch64, instruction.op_kind(operand)) {
505 return;
506 }
507
508 let target = instruction.near_branch64();
509 match imm_size {
510 2 => {
511 self.encoder_flags |= EncoderFlags::P66;
512 self.imm_size = ImmSize::RipRelSize2_Target64;
513 self.immediate = target as u32;
514 self.immediate_hi = (target >> 32) as u32;
515 }
516
517 4 => {
518 self.imm_size = ImmSize::RipRelSize4_Target64;
519 self.immediate = target as u32;
520 self.immediate_hi = (target >> 32) as u32;
521 }
522
523 _ => unreachable!(),
524 }
525 } else {
526 if !self.verify_op_kind(operand, OpKind::NearBranch32, instruction.op_kind(operand)) {
527 return;
528 }
529
530 match imm_size {
531 2 => {
532 const _: () = assert!(EncoderFlags::P66 == 0x80);
533 self.encoder_flags |= (self.bitness & 0x20) << 2;
534 self.imm_size = ImmSize::RipRelSize2_Target32;
535 self.immediate = instruction.near_branch32();
536 }
537
538 4 => {
539 const _: () = assert!(EncoderFlags::P66 == 0x80);
540 self.encoder_flags |= (self.bitness & 0x10) << 3;
541 self.imm_size = ImmSize::RipRelSize4_Target32;
542 self.immediate = instruction.near_branch32();
543 }
544
545 _ => unreachable!(),
546 }
547 }
548 }
549
550 pub(super) fn add_branch_disp(&mut self, displ_size: u32, instruction: &Instruction, operand: u32) {
551 debug_assert!(displ_size == 2 || displ_size == 4);
552 let op_kind;
553 match displ_size {
554 2 => {
555 op_kind = OpKind::NearBranch16;
556 self.imm_size = ImmSize::Size2;
557 self.immediate = instruction.near_branch16() as u32;
558 }
559
560 4 => {
561 op_kind = OpKind::NearBranch32;
562 self.imm_size = ImmSize::Size4;
563 self.immediate = instruction.near_branch32();
564 }
565
566 _ => unreachable!(),
567 }
568 let _ = self.verify_op_kind(operand, op_kind, instruction.op_kind(operand));
569 }
570
571 pub(super) fn add_far_branch(&mut self, instruction: &Instruction, operand: u32, size: u32) {
572 if size == 2 {
573 if !self.verify_op_kind(operand, OpKind::FarBranch16, instruction.op_kind(operand)) {
574 return;
575 }
576 self.imm_size = ImmSize::Size2_2;
577 self.immediate = instruction.far_branch16() as u32;
578 self.immediate_hi = instruction.far_branch_selector() as u32;
579 } else {
580 debug_assert_eq!(size, 4);
581 if !self.verify_op_kind(operand, OpKind::FarBranch32, instruction.op_kind(operand)) {
582 return;
583 }
584 self.imm_size = ImmSize::Size4_2;
585 self.immediate = instruction.far_branch32();
586 self.immediate_hi = instruction.far_branch_selector() as u32;
587 }
588 if self.bitness != size.wrapping_mul(8) {
589 self.encoder_flags |= EncoderFlags::P66;
590 }
591 }
592
593 pub(super) fn set_addr_size(&mut self, reg_size: u32) {
594 debug_assert!(reg_size == 2 || reg_size == 4 || reg_size == 8);
595 if self.bitness == 64 {
596 if reg_size == 2 {
597 self.set_error_message(format!("Invalid register size: {}, must be 32-bit or 64-bit", reg_size.wrapping_mul(8)));
598 } else if reg_size == 4 {
599 self.encoder_flags |= EncoderFlags::P67;
600 }
601 } else {
602 if reg_size == 8 {
603 self.set_error_message(format!("Invalid register size: {}, must be 16-bit or 32-bit", reg_size.wrapping_mul(8)));
604 } else if self.bitness == 16 {
605 if reg_size == 4 {
606 self.encoder_flags |= EncoderFlags::P67;
607 }
608 } else {
609 debug_assert_eq!(self.bitness, 32);
610 if reg_size == 2 {
611 self.encoder_flags |= EncoderFlags::P67;
612 }
613 }
614 }
615 }
616
617 pub(super) fn add_abs_mem(&mut self, instruction: &Instruction, operand: u32) {
618 self.encoder_flags |= EncoderFlags::DISPL;
619 let op_kind = instruction.op_kind(operand);
620 if op_kind == OpKind::Memory {
621 if instruction.memory_base() != Register::None || instruction.memory_index() != Register::None {
622 self.set_error_message(format!("Operand {}: Absolute addresses can't have base and/or index regs", operand));
623 return;
624 }
625 if instruction.memory_index_scale() != 1 {
626 self.set_error_message(format!("Operand {}: Absolute addresses must have scale == *1", operand));
627 return;
628 }
629 match instruction.memory_displ_size() {
630 2 => {
631 if self.bitness == 64 {
632 self.set_error_message(format!("Operand {}: 16-bit abs addresses can't be used in 64-bit mode", operand));
633 return;
634 }
635 if self.bitness == 32 {
636 self.encoder_flags |= EncoderFlags::P67;
637 }
638 self.displ_size = DisplSize::Size2;
639 if instruction.memory_displacement64() > u16::MAX as u64 {
640 self.set_error_message(format!("Operand {}: Displacement must fit in a u16", operand));
641 return;
642 }
643 self.displ = instruction.memory_displacement32();
644 }
645 4 => {
646 self.encoder_flags |= self.adrsize32_flags;
647 self.displ_size = DisplSize::Size4;
648 if instruction.memory_displacement64() > u32::MAX as u64 {
649 self.set_error_message(format!("Operand {}: Displacement must fit in a u32", operand));
650 return;
651 }
652 self.displ = instruction.memory_displacement32();
653 }
654 8 => {
655 if self.bitness != 64 {
656 self.set_error_message(format!("Operand {}: 64-bit abs address is only available in 64-bit mode", operand));
657 return;
658 }
659 self.displ_size = DisplSize::Size8;
660 let addr = instruction.memory_displacement64();
661 self.displ = addr as u32;
662 self.displ_hi = (addr >> 32) as u32;
663 }
664 _ => self.set_error_message(format!(
665 "Operand {}: Instruction::memory_displ_size() must be initialized to 2 (16-bit), 4 (32-bit) or 8 (64-bit)",
666 operand
667 )),
668 }
669 } else {
670 if cfg!(debug_assertions) {
671 self.set_error_message(format!("Operand {}: Expected OpKind::Memory, actual: {:?}", operand, op_kind));
672 } else {
673 self.set_error_message(format!("Operand {}: Expected OpKind::Memory, actual: OpKind value {}", operand, op_kind as u32));
674 }
675 }
676 }
677
678 #[allow(clippy::too_many_arguments)]
679 pub(super) fn add_mod_rm_register(&mut self, instruction: &Instruction, operand: u32, reg_lo: Register, reg_hi: Register) {
680 if !self.verify_op_kind(operand, OpKind::Register, instruction.op_kind(operand)) {
681 return;
682 }
683 let reg = instruction.op_register(operand);
684 if !self.verify_register_range(operand, reg, reg_lo, reg_hi) {
685 return;
686 }
687 let mut reg_num = (reg as u32).wrapping_sub(reg_lo as u32);
688 if reg_lo == Register::AL {
689 if reg >= Register::SPL {
690 reg_num -= 4;
691 self.encoder_flags |= EncoderFlags::REX;
692 } else if reg >= Register::AH {
693 self.encoder_flags |= EncoderFlags::HIGH_LEGACY_8_BIT_REGS;
694 }
695 }
696 debug_assert!(reg_num <= 31);
697 self.mod_rm |= ((reg_num & 7) << 3) as u8;
698 self.encoder_flags |= EncoderFlags::MOD_RM;
699 const _: () = assert!(EncoderFlags::R == 4);
700 self.encoder_flags |= (reg_num & 8) >> 1;
701 const _: () = assert!(EncoderFlags::R2 == 0x200);
702 self.encoder_flags |= (reg_num & 0x10) << (9 - 4);
703 }
704
705 pub(super) fn add_reg(&mut self, instruction: &Instruction, operand: u32, reg_lo: Register, reg_hi: Register) {
706 if !self.verify_op_kind(operand, OpKind::Register, instruction.op_kind(operand)) {
707 return;
708 }
709 let reg = instruction.op_register(operand);
710 if !self.verify_register_range(operand, reg, reg_lo, reg_hi) {
711 return;
712 }
713 let mut reg_num = (reg as u32).wrapping_sub(reg_lo as u32);
714 if reg_lo == Register::AL {
715 if reg >= Register::SPL {
716 reg_num -= 4;
717 self.encoder_flags |= EncoderFlags::REX;
718 } else if reg >= Register::AH {
719 self.encoder_flags |= EncoderFlags::HIGH_LEGACY_8_BIT_REGS;
720 }
721 }
722 debug_assert!(reg_num <= 15);
723 self.op_code |= reg_num & 7;
724 const _: () = assert!(EncoderFlags::B == 1);
725 debug_assert!(reg_num <= 15);
726 self.encoder_flags |= reg_num >> 3; }
728
729 #[inline]
730 pub(super) fn add_reg_or_mem(
731 &mut self, instruction: &Instruction, operand: u32, reg_lo: Register, reg_hi: Register, allow_mem_op: bool, allow_reg_op: bool,
732 ) {
733 self.add_reg_or_mem_full(instruction, operand, reg_lo, reg_hi, Register::None, Register::None, allow_mem_op, allow_reg_op);
734 }
735
736 #[allow(clippy::too_many_arguments)]
737 pub(super) fn add_reg_or_mem_full(
738 &mut self, instruction: &Instruction, operand: u32, reg_lo: Register, reg_hi: Register, vsib_index_reg_lo: Register,
739 vsib_index_reg_hi: Register, allow_mem_op: bool, allow_reg_op: bool,
740 ) {
741 let op_kind = instruction.op_kind(operand);
742 self.encoder_flags |= EncoderFlags::MOD_RM;
743 if op_kind == OpKind::Register {
744 if !allow_reg_op {
745 self.set_error_message(format!("Operand {}: register operand is not allowed", operand));
746 return;
747 }
748 let reg = instruction.op_register(operand);
749 if !self.verify_register_range(operand, reg, reg_lo, reg_hi) {
750 return;
751 }
752 let mut reg_num = (reg as u32).wrapping_sub(reg_lo as u32);
753 if reg_lo == Register::AL {
754 if reg >= Register::R8L {
755 reg_num -= 4;
756 } else if reg >= Register::SPL {
757 reg_num -= 4;
758 self.encoder_flags |= EncoderFlags::REX;
759 } else if reg >= Register::AH {
760 self.encoder_flags |= EncoderFlags::HIGH_LEGACY_8_BIT_REGS;
761 }
762 }
763 self.mod_rm |= (reg_num & 7) as u8;
764 self.mod_rm |= 0xC0;
765 const _: () = assert!(EncoderFlags::B == 1);
766 const _: () = assert!(EncoderFlags::X == 2);
767 self.encoder_flags |= (reg_num >> 3) & 3;
768 debug_assert!(reg_num <= 31);
769 } else if op_kind == OpKind::Memory {
770 if !allow_mem_op {
771 self.set_error_message(format!("Operand {}: memory operand is not allowed", operand));
772 return;
773 }
774 if instruction.memory_size().is_broadcast() {
775 self.encoder_flags |= EncoderFlags::BROADCAST;
776 }
777
778 let mut code_size = instruction.code_size();
779 if code_size == CodeSize::Unknown {
780 code_size = if self.bitness == 64 {
781 CodeSize::Code64
782 } else if self.bitness == 32 {
783 CodeSize::Code32
784 } else {
785 debug_assert_eq!(self.bitness, 16);
786 CodeSize::Code16
787 };
788 }
789 let addr_size = instruction_internal::get_address_size_in_bytes(
790 instruction.memory_base(),
791 instruction.memory_index(),
792 instruction.memory_displ_size(),
793 code_size,
794 )
795 .wrapping_mul(8);
796 if addr_size != self.bitness {
797 self.encoder_flags |= EncoderFlags::P67;
798 }
799 if (self.encoder_flags & EncoderFlags::REG_IS_MEMORY) != 0 {
800 let reg_size = Encoder::get_register_op_size(instruction);
801 if reg_size != addr_size {
802 self.set_error_message(format!("Operand {}: Register operand size must equal memory addressing mode (16/32/64)", operand));
803 return;
804 }
805 }
806 if addr_size == 16 {
807 if vsib_index_reg_lo != Register::None {
808 self.set_error_message(format!(
809 "Operand {}: VSIB operands can't use 16-bit addressing. It must be 32-bit or 64-bit addressing",
810 operand
811 ));
812 return;
813 }
814 self.add_mem_op16(instruction, operand);
815 } else {
816 self.add_mem_op(instruction, operand, addr_size, vsib_index_reg_lo, vsib_index_reg_hi);
817 }
818 } else {
819 if cfg!(debug_assertions) {
820 self.set_error_message(format!("Operand {}: Expected a register or memory operand, but op_kind is {:?}", operand, op_kind));
821 } else {
822 self.set_error_message(format!("Operand {}: Expected a register or memory operand, but op_kind is {}", operand, op_kind as u32));
823 }
824 }
825 }
826
827 #[must_use]
828 fn get_register_op_size(instruction: &Instruction) -> u32 {
829 debug_assert_eq!(instruction.op0_kind(), OpKind::Register);
830 if instruction.op0_kind() == OpKind::Register {
831 let reg = instruction.op0_register();
832 if reg.is_gpr64() {
833 64
834 } else if reg.is_gpr32() {
835 32
836 } else if reg.is_gpr16() {
837 16
838 } else {
839 0
840 }
841 } else {
842 0
843 }
844 }
845
846 #[must_use]
847 fn try_convert_to_disp8n(&mut self, instruction: &Instruction, displ: i32) -> Option<i8> {
848 if let Some(try_convert_to_disp8n) = self.handler.try_convert_to_disp8n {
849 (try_convert_to_disp8n)(self.handler, self, instruction, displ)
850 } else if i8::MIN as i32 <= displ && displ <= i8::MAX as i32 {
851 Some(displ as i8)
852 } else {
853 None
854 }
855 }
856
857 #[allow(clippy::needless_return)]
858 fn add_mem_op16(&mut self, instruction: &Instruction, operand: u32) {
859 if self.bitness == 64 {
860 self.set_error_message(format!("Operand {}: 16-bit addressing can't be used by 64-bit code", operand));
861 return;
862 }
863 let base = instruction.memory_base();
864 let index = instruction.memory_index();
865 let mut displ_size = instruction.memory_displ_size();
866 if base == Register::BX && index == Register::SI {
867 } else if base == Register::BX && index == Register::DI {
869 self.mod_rm |= 1;
870 } else if base == Register::BP && index == Register::SI {
871 self.mod_rm |= 2;
872 } else if base == Register::BP && index == Register::DI {
873 self.mod_rm |= 3;
874 } else if base == Register::SI && index == Register::None {
875 self.mod_rm |= 4;
876 } else if base == Register::DI && index == Register::None {
877 self.mod_rm |= 5;
878 } else if base == Register::BP && index == Register::None {
879 self.mod_rm |= 6;
880 } else if base == Register::BX && index == Register::None {
881 self.mod_rm |= 7;
882 } else if base == Register::None && index == Register::None {
883 self.mod_rm |= 6;
884 self.displ_size = DisplSize::Size2;
885 if instruction.memory_displacement64() > u16::MAX as u64 {
886 self.set_error_message(format!("Operand {}: Displacement must fit in a u16", operand));
887 return;
888 }
889 self.displ = instruction.memory_displacement32();
890 } else {
891 if cfg!(debug_assertions) {
892 self.set_error_message(format!("Operand {}: Invalid 16-bit base + index registers: base={:?}, index={:?}", operand, base, index));
893 } else {
894 self.set_error_message(format!(
895 "Operand {}: Invalid 16-bit base + index registers: base={}, index={}",
896 operand, base as u32, index as u32
897 ));
898 }
899 return;
900 }
901
902 if base != Register::None || index != Register::None {
903 if (instruction.memory_displacement64() as i64) < i16::MIN as i64 || (instruction.memory_displacement64() as i64) > u16::MAX as i64 {
904 self.set_error_message(format!("Operand {}: Displacement must fit in an i16 or a u16", operand));
905 return;
906 }
907 self.displ = instruction.memory_displacement32();
908 if displ_size == 0 && base == Register::BP && index == Register::None {
910 displ_size = 1;
911 if self.displ != 0 {
912 self.set_error_message(format!("Operand {}: Displacement must be 0 if displ_size == 0", operand));
913 return;
914 }
915 }
916 if displ_size == 1 {
917 if let Some(compressed_value) = self.try_convert_to_disp8n(instruction, self.displ as i16 as i32) {
918 self.displ = compressed_value as u32;
919 } else {
920 displ_size = 2;
921 }
922 }
923 if displ_size == 0 {
924 if self.displ != 0 {
925 self.set_error_message(format!("Operand {}: Displacement must be 0 if displ_size == 0", operand));
926 return;
927 }
928 } else if displ_size == 1 {
929 if (self.displ as i32) < i8::MIN as i32 || (self.displ as i32) > i8::MAX as i32 {
931 self.set_error_message(format!("Operand {}: Displacement must fit in an i8", operand));
932 return;
933 }
934 self.mod_rm |= 0x40;
935 self.displ_size = DisplSize::Size1;
936 } else if displ_size == 2 {
937 self.mod_rm |= 0x80;
938 self.displ_size = DisplSize::Size2;
939 } else {
940 self.set_error_message(format!("Operand {}: Invalid displacement size: {}, must be 0, 1, or 2", operand, displ_size));
941 }
942 }
943 }
944
945 fn add_mem_op(&mut self, instruction: &Instruction, operand: u32, addr_size: u32, vsib_index_reg_lo: Register, vsib_index_reg_hi: Register) {
946 debug_assert!(addr_size == 32 || addr_size == 64);
947 if self.bitness != 64 && addr_size == 64 {
948 self.set_error_message(format!("Operand {}: 64-bit addressing can only be used in 64-bit mode", operand));
949 return;
950 }
951
952 let base = instruction.memory_base();
953 let index = instruction.memory_index();
954 let mut displ_size = instruction.memory_displ_size();
955
956 let base_lo;
957 let base_hi;
958 let index_lo;
959 let index_hi;
960 if addr_size == 64 {
961 base_lo = Register::RAX;
962 base_hi = Register::R15;
963 } else {
964 debug_assert_eq!(addr_size, 32);
965 base_lo = Register::EAX;
966 base_hi = Register::R15D;
967 }
968 if vsib_index_reg_lo != Register::None {
969 index_lo = vsib_index_reg_lo;
970 index_hi = vsib_index_reg_hi;
971 } else {
972 index_lo = base_lo;
973 index_hi = base_hi;
974 }
975 if base != Register::None && base != Register::RIP && base != Register::EIP && !self.verify_register_range(operand, base, base_lo, base_hi) {
976 return;
977 }
978 if index != Register::None && !self.verify_register_range(operand, index, index_lo, index_hi) {
979 return;
980 }
981
982 if displ_size != 0 && displ_size != 1 && displ_size != 4 && displ_size != 8 {
983 self.set_error_message(format!("Operand {}: Invalid displ size: {}, must be 0, 1, 4, 8", operand, displ_size));
984 return;
985 }
986 if base == Register::RIP || base == Register::EIP {
987 if index != Register::None {
988 self.set_error_message(format!("Operand {}: RIP relative addressing can't use an index register", operand));
989 return;
990 }
991 if instruction_internal::internal_get_memory_index_scale(instruction) != 0 {
992 self.set_error_message(format!("Operand {}: RIP relative addressing must use scale *1", operand));
993 return;
994 }
995 if self.bitness != 64 {
996 self.set_error_message(format!("Operand {}: RIP/EIP relative addressing is only available in 64-bit mode", operand));
997 return;
998 }
999 if (self.encoder_flags & EncoderFlags::MUST_USE_SIB) != 0 {
1000 self.set_error_message(format!("Operand {}: RIP/EIP relative addressing isn't supported", operand));
1001 return;
1002 }
1003 self.mod_rm |= 5;
1004 let target = instruction.memory_displacement64();
1005 if base == Register::RIP {
1006 self.displ_size = DisplSize::RipRelSize4_Target64;
1007 self.displ = target as u32;
1008 self.displ_hi = (target >> 32) as u32;
1009 } else {
1010 self.displ_size = DisplSize::RipRelSize4_Target32;
1011 if target > u32::MAX as u64 {
1012 self.set_error_message(format!("Operand {}: Target address doesn't fit in 32 bits: 0x{:X}", operand, target));
1013 return;
1014 }
1015 self.displ = target as u32;
1016 }
1017 return;
1018 }
1019 let scale = instruction_internal::internal_get_memory_index_scale(instruction);
1020 self.displ = instruction.memory_displacement32();
1021 if addr_size == 64 {
1022 if (instruction.memory_displacement64() as i64) < i32::MIN as i64 || (instruction.memory_displacement64() as i64) > i32::MAX as i64 {
1023 self.set_error_message(format!("Operand {}: Displacement must fit in an i32", operand));
1024 return;
1025 }
1026 } else {
1027 debug_assert_eq!(addr_size, 32);
1028 if (instruction.memory_displacement64() as i64) < i32::MIN as i64 || (instruction.memory_displacement64() as i64) > u32::MAX as i64 {
1029 self.set_error_message(format!("Operand {}: Displacement must fit in an i32 or a u32", operand));
1030 return;
1031 }
1032 }
1033 if base == Register::None && index == Register::None {
1034 if vsib_index_reg_lo != Register::None {
1035 self.set_error_message(format!("Operand {}: VSIB addressing can't use an offset-only address", operand));
1036 return;
1037 }
1038 if self.bitness == 64 || scale != 0 || (self.encoder_flags & EncoderFlags::MUST_USE_SIB) != 0 {
1039 self.mod_rm |= 4;
1040 self.displ_size = DisplSize::Size4;
1041 self.encoder_flags |= EncoderFlags::SIB;
1042 self.sib = (0x25 | (scale << 6)) as u8;
1043 return;
1044 } else {
1045 self.mod_rm |= 5;
1046 self.displ_size = DisplSize::Size4;
1047 return;
1048 }
1049 }
1050
1051 let base_num = if base == Register::None { -1 } else { (base as i32).wrapping_sub(base_lo as i32) };
1052 let index_num = if index == Register::None { -1 } else { (index as i32).wrapping_sub(index_lo as i32) };
1053
1054 if displ_size == 0 && (base_num & 7) == 5 {
1056 displ_size = 1;
1057 if self.displ != 0 {
1058 self.set_error_message(format!("Operand {}: Displacement must be 0 if displ_size == 0", operand));
1059 return;
1060 }
1061 }
1062
1063 if displ_size == 1 {
1064 if let Some(compressed_value) = self.try_convert_to_disp8n(instruction, self.displ as i32) {
1065 self.displ = compressed_value as u32;
1066 } else {
1067 displ_size = addr_size / 8;
1068 }
1069 }
1070
1071 if base == Register::None {
1072 debug_assert!(index != Register::None);
1074 self.displ_size = DisplSize::Size4;
1075 } else if displ_size == 1 {
1076 if (self.displ as i32) < i8::MIN as i32 || (self.displ as i32) > i8::MAX as i32 {
1078 self.set_error_message(format!("Operand {}: Displacement must fit in an i8", operand));
1079 return;
1080 }
1081 self.mod_rm |= 0x40;
1082 self.displ_size = DisplSize::Size1;
1083 } else if displ_size == addr_size / 8 {
1084 self.mod_rm |= 0x80;
1085 self.displ_size = DisplSize::Size4;
1086 } else if displ_size == 0 {
1087 if self.displ != 0 {
1088 self.set_error_message(format!("Operand {}: Displacement must be 0 if displ_size == 0", operand));
1089 return;
1090 }
1091 } else {
1092 self.set_error_message_str("Invalid memory_displ_size() value");
1093 }
1094
1095 if index == Register::None && (base_num & 7) != 4 && scale == 0 && (self.encoder_flags & EncoderFlags::MUST_USE_SIB) == 0 {
1096 debug_assert!(base != Register::None);
1098 self.mod_rm |= (base_num & 7) as u8;
1099 } else {
1100 self.encoder_flags |= EncoderFlags::SIB;
1101 self.sib = (scale << 6) as u8;
1102 self.mod_rm |= 4;
1103 if index == Register::RSP || index == Register::ESP {
1104 self.set_error_message(format!("Operand {}: ESP/RSP can't be used as an index register", operand));
1105 return;
1106 }
1107 if base_num < 0 {
1108 self.sib |= 5;
1109 } else {
1110 self.sib |= (base_num & 7) as u8;
1111 }
1112 if index_num < 0 {
1113 self.sib |= 0x20;
1114 } else {
1115 self.sib |= ((index_num & 7) << 3) as u8;
1116 }
1117 }
1118
1119 if base_num >= 0 {
1120 const _: () = assert!(EncoderFlags::B == 1);
1121 debug_assert!(base_num <= 15); self.encoder_flags |= (base_num as u32) >> 3;
1123 }
1124 if index_num >= 0 {
1125 const _: () = assert!(EncoderFlags::X == 2);
1126 self.encoder_flags |= ((index_num as u32) >> 2) & 2;
1127 self.encoder_flags |= ((index_num as u32) & 0x10) << EncoderFlags::VVVVV_SHIFT;
1128 debug_assert!(index_num <= 31);
1129 }
1130 }
1131
1132 fn write_prefixes(&mut self, instruction: &Instruction, can_write_f3: bool) {
1133 debug_assert_eq!(self.handler.is_special_instr, false);
1134 let seg = instruction.segment_prefix();
1135 if seg != Register::None {
1136 static SEGMENT_OVERRIDES: [u8; 6] = [0x26, 0x2E, 0x36, 0x3E, 0x64, 0x65];
1137 debug_assert!((seg as usize).wrapping_sub(Register::ES as usize) < SEGMENT_OVERRIDES.len());
1138 const _: () = assert!(Register::ES as u32 + 1 == Register::CS as u32);
1139 const _: () = assert!(Register::ES as u32 + 2 == Register::SS as u32);
1140 const _: () = assert!(Register::ES as u32 + 3 == Register::DS as u32);
1141 const _: () = assert!(Register::ES as u32 + 4 == Register::FS as u32);
1142 const _: () = assert!(Register::ES as u32 + 5 == Register::GS as u32);
1143 self.write_byte_internal(unsafe { *SEGMENT_OVERRIDES.get_unchecked((seg as usize).wrapping_sub(Register::ES as usize)) } as u32);
1146 }
1147 if (self.encoder_flags & EncoderFlags::PF0) != 0 || instruction.has_lock_prefix() {
1148 self.write_byte_internal(0xF0);
1149 }
1150 if (self.encoder_flags & EncoderFlags::P66) != 0 {
1151 self.write_byte_internal(0x66);
1152 }
1153 if (self.encoder_flags & EncoderFlags::P67) != 0 {
1154 self.write_byte_internal(0x67);
1155 }
1156 if can_write_f3 && instruction.has_repe_prefix() {
1157 self.write_byte_internal(0xF3);
1158 }
1159 if instruction.has_repne_prefix() {
1160 self.write_byte_internal(0xF2);
1161 }
1162 }
1163
1164 fn write_mod_rm(&mut self) {
1165 debug_assert_eq!(self.handler.is_special_instr, false);
1166 debug_assert!((self.encoder_flags & (EncoderFlags::MOD_RM | EncoderFlags::DISPL)) != 0);
1167 if (self.encoder_flags & EncoderFlags::MOD_RM) != 0 {
1168 self.write_byte_internal(self.mod_rm as u32);
1169 if (self.encoder_flags & EncoderFlags::SIB) != 0 {
1170 self.write_byte_internal(self.sib as u32);
1171 }
1172 }
1173
1174 let mut diff4;
1175 self.displ_addr = self.current_rip as u32;
1176 match self.displ_size {
1177 DisplSize::None => {}
1178 DisplSize::Size1 => self.write_byte_internal(self.displ),
1179
1180 DisplSize::Size2 => {
1181 diff4 = self.displ;
1182 self.write_byte_internal(diff4);
1183 self.write_byte_internal(diff4 >> 8);
1184 }
1185
1186 DisplSize::Size4 => {
1187 diff4 = self.displ;
1188 self.write_byte_internal(diff4);
1189 self.write_byte_internal(diff4 >> 8);
1190 self.write_byte_internal(diff4 >> 16);
1191 self.write_byte_internal(diff4 >> 24);
1192 }
1193
1194 DisplSize::Size8 => {
1195 diff4 = self.displ;
1196 self.write_byte_internal(diff4);
1197 self.write_byte_internal(diff4 >> 8);
1198 self.write_byte_internal(diff4 >> 16);
1199 self.write_byte_internal(diff4 >> 24);
1200 diff4 = self.displ_hi;
1201 self.write_byte_internal(diff4);
1202 self.write_byte_internal(diff4 >> 8);
1203 self.write_byte_internal(diff4 >> 16);
1204 self.write_byte_internal(diff4 >> 24);
1205 }
1206
1207 DisplSize::RipRelSize4_Target32 => {
1208 let eip = (self.current_rip as u32).wrapping_add(4).wrapping_add(IMM_SIZES[self.imm_size as usize]);
1209 diff4 = self.displ.wrapping_sub(eip);
1210 self.write_byte_internal(diff4);
1211 self.write_byte_internal(diff4 >> 8);
1212 self.write_byte_internal(diff4 >> 16);
1213 self.write_byte_internal(diff4 >> 24);
1214 }
1215
1216 DisplSize::RipRelSize4_Target64 => {
1217 let rip = self.current_rip.wrapping_add(4).wrapping_add(IMM_SIZES[self.imm_size as usize] as u64);
1218 let diff8 = ((((self.displ_hi as u64) << 32) | self.displ as u64).wrapping_sub(rip)) as i64;
1219 if diff8 < i32::MIN as i64 || diff8 > i32::MAX as i64 {
1220 self.set_error_message(format!(
1221 "RIP relative distance is too far away: next_ip: 0x{:016X} target: 0x{:08X}, diff = {}, diff must fit in an i32",
1222 rip,
1223 ((self.displ_hi as u64) << 32) | self.displ as u64,
1224 diff8
1225 ));
1226 }
1227 diff4 = diff8 as u32;
1228 self.write_byte_internal(diff4);
1229 self.write_byte_internal(diff4 >> 8);
1230 self.write_byte_internal(diff4 >> 16);
1231 self.write_byte_internal(diff4 >> 24);
1232 }
1233 }
1234 }
1235
1236 fn write_immediate(&mut self) {
1237 debug_assert_eq!(self.handler.is_special_instr, false);
1238 let ip;
1239 let eip;
1240 let rip;
1241 let diff2;
1242 let diff4;
1243 let diff8;
1244 let mut value;
1245 self.imm_addr = self.current_rip as u32;
1246 match self.imm_size {
1247 ImmSize::None => {}
1248
1249 ImmSize::Size1 | ImmSize::SizeIbReg | ImmSize::Size1OpCode => {
1250 self.write_byte_internal(self.immediate);
1251 }
1252
1253 ImmSize::Size2 => {
1254 value = self.immediate;
1255 self.write_byte_internal(value);
1256 self.write_byte_internal(value >> 8);
1257 }
1258
1259 ImmSize::Size4 => {
1260 value = self.immediate;
1261 self.write_byte_internal(value);
1262 self.write_byte_internal(value >> 8);
1263 self.write_byte_internal(value >> 16);
1264 self.write_byte_internal(value >> 24);
1265 }
1266
1267 ImmSize::Size8 => {
1268 value = self.immediate;
1269 self.write_byte_internal(value);
1270 self.write_byte_internal(value >> 8);
1271 self.write_byte_internal(value >> 16);
1272 self.write_byte_internal(value >> 24);
1273 value = self.immediate_hi;
1274 self.write_byte_internal(value);
1275 self.write_byte_internal(value >> 8);
1276 self.write_byte_internal(value >> 16);
1277 self.write_byte_internal(value >> 24);
1278 }
1279
1280 ImmSize::Size2_1 => {
1281 value = self.immediate;
1282 self.write_byte_internal(value);
1283 self.write_byte_internal(value >> 8);
1284 self.write_byte_internal(self.immediate_hi);
1285 }
1286
1287 ImmSize::Size1_1 => {
1288 self.write_byte_internal(self.immediate);
1289 self.write_byte_internal(self.immediate_hi);
1290 }
1291
1292 ImmSize::Size2_2 => {
1293 value = self.immediate;
1294 self.write_byte_internal(value);
1295 self.write_byte_internal(value >> 8);
1296 value = self.immediate_hi;
1297 self.write_byte_internal(value);
1298 self.write_byte_internal(value >> 8);
1299 }
1300
1301 ImmSize::Size4_2 => {
1302 value = self.immediate;
1303 self.write_byte_internal(value);
1304 self.write_byte_internal(value >> 8);
1305 self.write_byte_internal(value >> 16);
1306 self.write_byte_internal(value >> 24);
1307 value = self.immediate_hi;
1308 self.write_byte_internal(value);
1309 self.write_byte_internal(value >> 8);
1310 }
1311
1312 ImmSize::RipRelSize1_Target16 => {
1313 ip = (self.current_rip as u32).wrapping_add(1) as u16;
1314 diff2 = (self.immediate as i16).wrapping_sub(ip as i16);
1315 if diff2 < i8::MIN as i16 || diff2 > i8::MAX as i16 {
1316 self.set_error_message(format!(
1317 "Branch distance is too far away: next_ip: 0x{:04X} target: 0x{:04X}, diff = {}, diff must fit in an i8",
1318 ip, self.immediate as u16, diff2
1319 ));
1320 }
1321 self.write_byte_internal(diff2 as u32);
1322 }
1323
1324 ImmSize::RipRelSize1_Target32 => {
1325 eip = (self.current_rip as u32).wrapping_add(1);
1326 diff4 = self.immediate.wrapping_sub(eip) as i32;
1327 if diff4 < i8::MIN as i32 || diff4 > i8::MAX as i32 {
1328 self.set_error_message(format!(
1329 "Branch distance is too far away: next_ip: 0x{:08X} target: 0x{:08X}, diff = {}, diff must fit in an i8",
1330 eip, self.immediate, diff4
1331 ));
1332 }
1333 self.write_byte_internal(diff4 as u32);
1334 }
1335
1336 ImmSize::RipRelSize1_Target64 => {
1337 rip = self.current_rip.wrapping_add(1);
1338 diff8 = (((self.immediate_hi as u64) << 32) | (self.immediate as u64)).wrapping_sub(rip) as i64;
1339 if diff8 < i8::MIN as i64 || diff8 > i8::MAX as i64 {
1340 self.set_error_message(format!(
1341 "Branch distance is too far away: next_ip: 0x{:016X} target: 0x{:016X}, diff = {}, diff must fit in an i8",
1342 rip,
1343 ((self.immediate_hi as u64) << 32) | (self.immediate as u64),
1344 diff8
1345 ));
1346 }
1347 self.write_byte_internal(diff8 as u32);
1348 }
1349
1350 ImmSize::RipRelSize2_Target16 => {
1351 eip = (self.current_rip as u32).wrapping_add(2);
1352 value = self.immediate.wrapping_sub(eip);
1353 self.write_byte_internal(value);
1354 self.write_byte_internal(value >> 8);
1355 }
1356
1357 ImmSize::RipRelSize2_Target32 => {
1358 eip = (self.current_rip as u32).wrapping_add(2);
1359 diff4 = self.immediate.wrapping_sub(eip) as i32;
1360 if diff4 < i16::MIN as i32 || diff4 > i16::MAX as i32 {
1361 self.set_error_message(format!(
1362 "Branch distance is too far away: next_ip: 0x{:08X} target: 0x{:08X}, diff = {}, diff must fit in an i16",
1363 eip, self.immediate, diff4
1364 ));
1365 }
1366 value = diff4 as u32;
1367 self.write_byte_internal(value);
1368 self.write_byte_internal(value >> 8);
1369 }
1370
1371 ImmSize::RipRelSize2_Target64 => {
1372 rip = self.current_rip.wrapping_add(2);
1373 diff8 = (((self.immediate_hi as u64) << 32) | (self.immediate as u64)).wrapping_sub(rip) as i64;
1374 if diff8 < i16::MIN as i64 || diff8 > i16::MAX as i64 {
1375 self.set_error_message(format!(
1376 "Branch distance is too far away: next_ip: 0x{:016X} target: 0x{:016X}, diff = {}, diff must fit in an i16",
1377 rip,
1378 ((self.immediate_hi as u64) << 32) | (self.immediate as u64),
1379 diff8
1380 ));
1381 }
1382 value = diff8 as u32;
1383 self.write_byte_internal(value);
1384 self.write_byte_internal(value >> 8);
1385 }
1386
1387 ImmSize::RipRelSize4_Target32 => {
1388 eip = (self.current_rip as u32).wrapping_add(4);
1389 value = self.immediate.wrapping_sub(eip);
1390 self.write_byte_internal(value);
1391 self.write_byte_internal(value >> 8);
1392 self.write_byte_internal(value >> 16);
1393 self.write_byte_internal(value >> 24);
1394 }
1395
1396 ImmSize::RipRelSize4_Target64 => {
1397 rip = self.current_rip.wrapping_add(4);
1398 diff8 = (((self.immediate_hi as u64) << 32) | (self.immediate as u64)).wrapping_sub(rip) as i64;
1399 if diff8 < i32::MIN as i64 || diff8 > i32::MAX as i64 {
1400 self.set_error_message(format!(
1401 "Branch distance is too far away: next_ip: 0x{:016X} target: 0x{:016X}, diff = {}, diff must fit in an i32",
1402 rip,
1403 ((self.immediate_hi as u64) << 32) | (self.immediate as u64),
1404 diff8
1405 ));
1406 }
1407 value = diff8 as u32;
1408 self.write_byte_internal(value);
1409 self.write_byte_internal(value >> 8);
1410 self.write_byte_internal(value >> 16);
1411 self.write_byte_internal(value >> 24);
1412 }
1413 }
1414 }
1415
1416 #[inline]
1443 pub fn write_u8(&mut self, value: u8) {
1444 self.write_byte_internal(value as u32);
1445 }
1446
1447 #[inline]
1448 pub(super) fn write_byte_internal(&mut self, value: u32) {
1449 self.buffer.push(value as u8);
1450 self.current_rip = self.current_rip.wrapping_add(1);
1451 }
1452
1453 #[cfg(all(feature = "encoder", feature = "block_encoder"))]
1454 #[inline]
1455 pub(super) fn position(&self) -> usize {
1456 self.buffer.len()
1457 }
1458
1459 #[must_use]
1464 #[inline]
1465 pub fn take_buffer(&mut self) -> Vec<u8> {
1466 mem::take(&mut self.buffer)
1467 }
1468
1469 #[inline]
1473 pub fn set_buffer(&mut self, buffer: Vec<u8>) {
1474 self.buffer = buffer;
1475 }
1476
1477 #[cfg(all(feature = "encoder", feature = "block_encoder"))]
1478 #[inline]
1479 pub(super) fn clear_buffer(&mut self) {
1480 self.buffer.clear()
1481 }
1482
1483 #[must_use]
1486 #[allow(clippy::missing_inline_in_public_items)]
1487 pub fn get_constant_offsets(&self) -> ConstantOffsets {
1488 let mut co = ConstantOffsets::default();
1489
1490 match self.displ_size {
1491 DisplSize::None => {}
1492
1493 DisplSize::Size1 => {
1494 co.displacement_size = 1;
1495 co.displacement_offset = self.displ_addr.wrapping_sub(self.eip) as u8;
1496 }
1497
1498 DisplSize::Size2 => {
1499 co.displacement_size = 2;
1500 co.displacement_offset = self.displ_addr.wrapping_sub(self.eip) as u8;
1501 }
1502
1503 DisplSize::Size4 | DisplSize::RipRelSize4_Target32 | DisplSize::RipRelSize4_Target64 => {
1504 co.displacement_size = 4;
1505 co.displacement_offset = self.displ_addr.wrapping_sub(self.eip) as u8;
1506 }
1507
1508 DisplSize::Size8 => {
1509 co.displacement_size = 8;
1510 co.displacement_offset = self.displ_addr.wrapping_sub(self.eip) as u8;
1511 }
1512 }
1513
1514 match self.imm_size {
1515 ImmSize::None | ImmSize::SizeIbReg | ImmSize::Size1OpCode => {}
1516
1517 ImmSize::Size1 | ImmSize::RipRelSize1_Target16 | ImmSize::RipRelSize1_Target32 | ImmSize::RipRelSize1_Target64 => {
1518 co.immediate_size = 1;
1519 co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
1520 }
1521
1522 ImmSize::Size1_1 => {
1523 co.immediate_size = 1;
1524 co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
1525 co.immediate_size2 = 1;
1526 co.immediate_offset2 = self.imm_addr.wrapping_sub(self.eip).wrapping_add(1) as u8;
1527 }
1528
1529 ImmSize::Size2 | ImmSize::RipRelSize2_Target16 | ImmSize::RipRelSize2_Target32 | ImmSize::RipRelSize2_Target64 => {
1530 co.immediate_size = 2;
1531 co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
1532 }
1533
1534 ImmSize::Size2_1 => {
1535 co.immediate_size = 2;
1536 co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
1537 co.immediate_size2 = 1;
1538 co.immediate_offset2 = self.imm_addr.wrapping_sub(self.eip).wrapping_add(2) as u8;
1539 }
1540
1541 ImmSize::Size2_2 => {
1542 co.immediate_size = 2;
1543 co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
1544 co.immediate_size2 = 2;
1545 co.immediate_offset2 = self.imm_addr.wrapping_sub(self.eip).wrapping_add(2) as u8;
1546 }
1547
1548 ImmSize::Size4 | ImmSize::RipRelSize4_Target32 | ImmSize::RipRelSize4_Target64 => {
1549 co.immediate_size = 4;
1550 co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
1551 }
1552
1553 ImmSize::Size4_2 => {
1554 co.immediate_size = 4;
1555 co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
1556 co.immediate_size2 = 2;
1557 co.immediate_offset2 = self.imm_addr.wrapping_sub(self.eip).wrapping_add(4) as u8;
1558 }
1559
1560 ImmSize::Size8 => {
1561 co.immediate_size = 8;
1562 co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
1563 }
1564 }
1565
1566 co
1567 }
1568
1569 #[must_use]
1571 #[inline]
1572 pub const fn prevent_vex2(&self) -> bool {
1573 self.prevent_vex2 != 0
1574 }
1575
1576 #[inline]
1582 pub fn set_prevent_vex2(&mut self, new_value: bool) {
1583 self.prevent_vex2 = if new_value { u32::MAX } else { 0 };
1584 }
1585
1586 #[must_use]
1588 #[inline]
1589 pub const fn vex_wig(&self) -> u32 {
1590 (self.internal_vex_wig_lig >> 7) & 1
1591 }
1592
1593 #[inline]
1599 pub fn set_vex_wig(&mut self, new_value: u32) {
1600 self.internal_vex_wig_lig = (self.internal_vex_wig_lig & !0x80) | ((new_value & 1) << 7);
1601 }
1602
1603 #[must_use]
1605 #[inline]
1606 pub const fn vex_lig(&self) -> u32 {
1607 (self.internal_vex_wig_lig >> 2) & 1
1608 }
1609
1610 #[inline]
1616 pub fn set_vex_lig(&mut self, new_value: u32) {
1617 self.internal_vex_wig_lig = (self.internal_vex_wig_lig & !4) | ((new_value & 1) << 2);
1618 self.internal_vex_lig = (new_value & 1) << 2;
1619 }
1620
1621 #[must_use]
1623 #[inline]
1624 pub const fn evex_wig(&self) -> u32 {
1625 self.internal_evex_wig >> 7
1626 }
1627
1628 #[inline]
1634 pub fn set_evex_wig(&mut self, new_value: u32) {
1635 self.internal_evex_wig = (new_value & 1) << 7;
1636 }
1637
1638 #[must_use]
1640 #[inline]
1641 pub const fn evex_lig(&self) -> u32 {
1642 self.internal_evex_lig >> 5
1643 }
1644
1645 #[inline]
1651 pub fn set_evex_lig(&mut self, new_value: u32) {
1652 self.internal_evex_lig = (new_value & 3) << 5
1653 }
1654
1655 #[must_use]
1657 #[inline]
1658 #[cfg(feature = "mvex")]
1659 pub const fn mvex_wig(&self) -> u32 {
1660 self.internal_mvex_wig >> 7
1661 }
1662
1663 #[inline]
1669 #[cfg(feature = "mvex")]
1670 pub fn set_mvex_wig(&mut self, new_value: u32) {
1671 self.internal_mvex_wig = (new_value & 1) << 7;
1672 }
1673
1674 #[must_use]
1676 #[inline]
1677 pub const fn bitness(&self) -> u32 {
1678 self.bitness
1679 }
1680}