mod enums;
mod handlers;
mod handlers_3dnow;
#[cfg(not(feature = "no_evex"))]
mod handlers_evex;
mod handlers_fpu;
mod handlers_legacy;
mod handlers_tables;
#[cfg(any(not(feature = "no_vex"), not(feature = "no_xop")))]
mod handlers_vex;
mod table_de;
#[cfg(test)]
pub(crate) mod tests;
use self::handlers::OpCodeHandler;
use self::handlers_tables::TABLES;
use super::iced_constants::IcedConstants;
use super::tuple_type_tbl::get_disp8n;
use super::*;
#[cfg(has_fused_iterator)]
use core::iter::FusedIterator;
use core::{cmp, fmt, mem, ptr, u32};
static PREFIXES1632: [u32; 8] = [0x0000_0000, 0x4040_4040, 0x0000_0000, 0x0000_00F0, 0x0000_0000, 0x0000_0000, 0x0000_0000, 0x000D_0000];
static PREFIXES64: [u32; 8] = [0x0000_0000, 0x4040_4040, 0x0000_FFFF, 0x0000_00F0, 0x0000_0000, 0x0000_0000, 0x0000_0000, 0x000D_0000];
static MEM_REGS_16: [(Register, Register); 8] = [
(Register::BX, Register::SI),
(Register::BX, Register::DI),
(Register::BP, Register::SI),
(Register::BP, Register::DI),
(Register::SI, Register::None),
(Register::DI, Register::None),
(Register::BP, Register::None),
(Register::BX, Register::None),
];
#[derive(Copy, Clone, Eq, PartialEq)]
#[allow(dead_code)]
pub(crate) enum OpSize {
Size16,
Size32,
Size64,
}
#[cfg_attr(feature = "cargo-fmt", rustfmt::skip)]
static GEN_DEBUG_OP_SIZE: [&str; 3] = [
"Size16",
"Size32",
"Size64",
];
impl fmt::Debug for OpSize {
#[inline]
fn fmt<'a>(&self, f: &mut fmt::Formatter<'a>) -> fmt::Result {
write!(f, "{}", GEN_DEBUG_OP_SIZE[*self as usize])?;
Ok(())
}
}
impl Default for OpSize {
#[cfg_attr(has_must_use, must_use)]
#[inline]
fn default() -> Self {
OpSize::Size16
}
}
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[cfg_attr(all(not(feature = "exhaustive_enums"), has_non_exhaustive), non_exhaustive)]
pub enum DecoderError {
None = 0,
InvalidInstruction = 1,
NoMoreBytes = 2,
}
#[cfg_attr(feature = "cargo-fmt", rustfmt::skip)]
static GEN_DEBUG_DECODER_ERROR: [&str; 3] = [
"None",
"InvalidInstruction",
"NoMoreBytes",
];
impl fmt::Debug for DecoderError {
#[inline]
fn fmt<'a>(&self, f: &mut fmt::Formatter<'a>) -> fmt::Result {
write!(f, "{}", GEN_DEBUG_DECODER_ERROR[*self as usize])?;
Ok(())
}
}
impl Default for DecoderError {
#[cfg_attr(has_must_use, must_use)]
#[inline]
fn default() -> Self {
DecoderError::None
}
}
#[allow(missing_copy_implementations)]
#[allow(missing_debug_implementations)]
pub struct DecoderOptions;
impl DecoderOptions {
pub const NONE: u32 = 0x0000_0000;
pub const NO_INVALID_CHECK: u32 = 0x0000_0001;
pub const AMD: u32 = 0x0000_0002;
#[deprecated(since = "1.8.0", note = "Use AMD instead")]
pub const AMD_BRANCHES: u32 = 0x0000_0002;
pub const FORCE_RESERVED_NOP: u32 = 0x0000_0004;
pub const UMOV: u32 = 0x0000_0008;
pub const XBTS: u32 = 0x0000_0010;
pub const CMPXCHG486A: u32 = 0x0000_0020;
pub const OLD_FPU: u32 = 0x0000_0040;
pub const PCOMMIT: u32 = 0x0000_0080;
pub const LOADALL286: u32 = 0x0000_0100;
pub const LOADALL386: u32 = 0x0000_0200;
pub const CL1INVMB: u32 = 0x0000_0400;
pub const MOV_TR: u32 = 0x0000_0800;
pub const JMPE: u32 = 0x0000_1000;
pub const NO_PAUSE: u32 = 0x0000_2000;
pub const NO_WBNOINVD: u32 = 0x0000_4000;
pub const NO_LOCK_MOV_CR: u32 = 0x0000_8000;
#[deprecated(since = "1.9.0", note = "Use NO_LOCK_MOV_CR instead")]
pub const NO_LOCK_MOV_CR0: u32 = 0x0000_8000;
pub const NO_MPFX_0FBC: u32 = 0x0001_0000;
pub const NO_MPFX_0FBD: u32 = 0x0002_0000;
pub const NO_LAHF_SAHF_64: u32 = 0x0004_0000;
pub const MPX: u32 = 0x0008_0000;
pub const CYRIX: u32 = 0x0010_0000;
pub const CYRIX_SMINT_0F7E: u32 = 0x0020_0000;
pub const CYRIX_DMI: u32 = 0x0040_0000;
pub const ALTINST: u32 = 0x0080_0000;
}
pub(crate) struct HandlerFlags;
#[allow(dead_code)]
impl HandlerFlags {
pub(crate) const NONE: u32 = 0x0000_0000;
pub(crate) const XACQUIRE: u32 = 0x0000_0001;
pub(crate) const XRELEASE: u32 = 0x0000_0002;
pub(crate) const XACQUIRE_XRELEASE_NO_LOCK: u32 = 0x0000_0004;
pub(crate) const LOCK: u32 = 0x0000_0008;
}
pub(crate) struct StateFlags;
#[allow(dead_code)]
impl StateFlags {
pub(crate) const ENCODING_MASK: u32 = 0x0000_0007;
pub(crate) const HAS_REX: u32 = 0x0000_0008;
pub(crate) const B: u32 = 0x0000_0010;
pub(crate) const Z: u32 = 0x0000_0020;
pub(crate) const IS_INVALID: u32 = 0x0000_0040;
pub(crate) const W: u32 = 0x0000_0080;
pub(crate) const NO_IMM: u32 = 0x0000_0100;
pub(crate) const ADDR64: u32 = 0x0000_0200;
pub(crate) const BRANCH_IMM8: u32 = 0x0000_0400;
pub(crate) const XBEGIN: u32 = 0x0000_0800;
pub(crate) const LOCK: u32 = 0x0000_1000;
pub(crate) const ALLOW_LOCK: u32 = 0x0000_2000;
pub(crate) const NO_MORE_BYTES: u32 = 0x0000_4000;
pub(crate) const HAS66: u32 = 0x0000_8000;
}
#[derive(Debug, Default)]
struct State {
modrm: u32,
mod_: u32,
reg: u32,
rm: u32,
extra_register_base: u32,
extra_index_register_base: u32,
extra_base_register_base: u32,
extra_index_register_base_vsib: u32,
flags: u32,
mandatory_prefix: u32,
vvvv: u32,
vvvv_invalid_check: u32,
aaa: u32,
extra_register_base_evex: u32,
extra_base_register_base_evex: u32,
vector_length: u32,
operand_size: OpSize,
address_size: OpSize,
}
impl State {
#[cfg_attr(has_must_use, must_use)]
#[inline(always)]
#[cfg(debug_assertions)]
fn encoding(&self) -> EncodingKind {
unsafe { mem::transmute((self.flags & StateFlags::ENCODING_MASK) as u8) }
}
#[cfg_attr(has_must_use, must_use)]
#[inline(always)]
#[cfg(not(debug_assertions))]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::unused_self))]
fn encoding(&self) -> EncodingKind {
EncodingKind::Legacy
}
}
#[allow(missing_debug_implementations)]
#[allow(dead_code)]
pub struct Decoder<'a> {
ip: u64,
displ_index: usize,
prefixes: &'static [u32],
data: &'a [u8],
data_ptr: *const u8,
data_ptr_end: *const u8,
max_data_ptr: *const u8,
instr_start_data_ptr: *const u8,
handlers_xx: *const &'static OpCodeHandler,
#[cfg(not(feature = "no_vex"))]
handlers_vex_0fxx: *const &'static OpCodeHandler,
#[cfg(not(feature = "no_vex"))]
handlers_vex_0f38xx: *const &'static OpCodeHandler,
#[cfg(not(feature = "no_vex"))]
handlers_vex_0f3axx: *const &'static OpCodeHandler,
#[cfg(not(feature = "no_evex"))]
handlers_evex_0fxx: *const &'static OpCodeHandler,
#[cfg(not(feature = "no_evex"))]
handlers_evex_0f38xx: *const &'static OpCodeHandler,
#[cfg(not(feature = "no_evex"))]
handlers_evex_0f3axx: *const &'static OpCodeHandler,
#[cfg(not(feature = "no_xop"))]
handlers_xop8: *const &'static OpCodeHandler,
#[cfg(not(feature = "no_xop"))]
handlers_xop9: *const &'static OpCodeHandler,
#[cfg(not(feature = "no_xop"))]
handlers_xopa: *const &'static OpCodeHandler,
#[cfg(feature = "no_vex")]
handlers_vex_0fxx: (),
#[cfg(feature = "no_vex")]
handlers_vex_0f38xx: (),
#[cfg(feature = "no_vex")]
handlers_vex_0f3axx: (),
#[cfg(feature = "no_evex")]
handlers_evex_0fxx: (),
#[cfg(feature = "no_evex")]
handlers_evex_0f38xx: (),
#[cfg(feature = "no_evex")]
handlers_evex_0f3axx: (),
#[cfg(feature = "no_xop")]
handlers_xop8: (),
#[cfg(feature = "no_xop")]
handlers_xop9: (),
#[cfg(feature = "no_xop")]
handlers_xopa: (),
state: State,
options: u32,
invalid_check_mask: u32,
is64_mode_and_w: u32,
reg15_mask: u32,
bitness: u32,
default_operand_size: OpSize,
default_address_size: OpSize,
default_inverted_operand_size: OpSize,
default_inverted_address_size: OpSize,
is64_mode: bool,
default_code_size: CodeSize,
}
impl<'a> Decoder<'a> {
#[cfg_attr(has_must_use, must_use)]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::missing_inline_in_public_items))]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::let_unit_value))]
#[allow(trivial_casts)]
pub fn new(bitness: u32, data: &'a [u8], options: u32) -> Decoder<'a> {
let prefixes;
let is64_mode;
let default_code_size;
let default_operand_size;
let default_inverted_operand_size;
let default_address_size;
let default_inverted_address_size;
match bitness {
64 => {
is64_mode = true;
default_code_size = CodeSize::Code64;
default_operand_size = OpSize::Size32;
default_inverted_operand_size = OpSize::Size16;
default_address_size = OpSize::Size64;
default_inverted_address_size = OpSize::Size32;
prefixes = &PREFIXES64;
}
32 => {
is64_mode = false;
default_code_size = CodeSize::Code32;
default_operand_size = OpSize::Size32;
default_inverted_operand_size = OpSize::Size16;
default_address_size = OpSize::Size32;
default_inverted_address_size = OpSize::Size16;
prefixes = &PREFIXES1632;
}
16 => {
is64_mode = false;
default_code_size = CodeSize::Code16;
default_operand_size = OpSize::Size16;
default_inverted_operand_size = OpSize::Size32;
default_address_size = OpSize::Size16;
default_inverted_address_size = OpSize::Size32;
prefixes = &PREFIXES1632;
}
_ => panic!(),
}
fn get_handlers(handlers: &'static [&'static OpCodeHandler]) -> *const &'static OpCodeHandler {
debug_assert_eq!(0x100, handlers.len());
handlers.as_ptr()
}
let data_ptr_end: *const u8 = unsafe { data.get_unchecked(data.len()) };
assert!(data_ptr_end >= data.as_ptr());
assert!(unsafe {
data.as_ptr().offset((data.len() as isize).checked_add(IcedConstants::MAX_INSTRUCTION_LENGTH as isize + 4).unwrap()) >= data.as_ptr()
});
let tables = &*TABLES;
#[cfg(not(feature = "no_vex"))]
let handlers_vex_0fxx = get_handlers(&tables.handlers_vex_0fxx);
#[cfg(not(feature = "no_vex"))]
let handlers_vex_0f38xx = get_handlers(&tables.handlers_vex_0f38xx);
#[cfg(not(feature = "no_vex"))]
let handlers_vex_0f3axx = get_handlers(&tables.handlers_vex_0f3axx);
#[cfg(not(feature = "no_evex"))]
let handlers_evex_0fxx = get_handlers(&tables.handlers_evex_0fxx);
#[cfg(not(feature = "no_evex"))]
let handlers_evex_0f38xx = get_handlers(&tables.handlers_evex_0f38xx);
#[cfg(not(feature = "no_evex"))]
let handlers_evex_0f3axx = get_handlers(&tables.handlers_evex_0f3axx);
#[cfg(not(feature = "no_xop"))]
let handlers_xop8 = get_handlers(&tables.handlers_xop8);
#[cfg(not(feature = "no_xop"))]
let handlers_xop9 = get_handlers(&tables.handlers_xop9);
#[cfg(not(feature = "no_xop"))]
let handlers_xopa = get_handlers(&tables.handlers_xopa);
#[cfg(feature = "no_vex")]
let handlers_vex_0fxx = ();
#[cfg(feature = "no_vex")]
let handlers_vex_0f38xx = ();
#[cfg(feature = "no_vex")]
let handlers_vex_0f3axx = ();
#[cfg(feature = "no_evex")]
let handlers_evex_0fxx = ();
#[cfg(feature = "no_evex")]
let handlers_evex_0f38xx = ();
#[cfg(feature = "no_evex")]
let handlers_evex_0f3axx = ();
#[cfg(feature = "no_xop")]
let handlers_xop8 = ();
#[cfg(feature = "no_xop")]
let handlers_xop9 = ();
#[cfg(feature = "no_xop")]
let handlers_xopa = ();
Decoder {
ip: 0,
displ_index: 0,
prefixes,
data,
data_ptr: data.as_ptr(),
data_ptr_end,
max_data_ptr: data.as_ptr(),
instr_start_data_ptr: data.as_ptr(),
handlers_xx: get_handlers(&tables.handlers_xx),
handlers_vex_0fxx,
handlers_vex_0f38xx,
handlers_vex_0f3axx,
handlers_evex_0fxx,
handlers_evex_0f38xx,
handlers_evex_0f3axx,
handlers_xop8,
handlers_xop9,
handlers_xopa,
state: State::default(),
options,
invalid_check_mask: if (options & DecoderOptions::NO_INVALID_CHECK) == 0 { u32::MAX } else { 0 },
is64_mode_and_w: if is64_mode { StateFlags::W } else { 0 },
reg15_mask: if is64_mode { 0xF } else { 0x7 },
default_code_size,
default_operand_size,
default_inverted_operand_size,
default_address_size,
default_inverted_address_size,
is64_mode,
bitness,
}
}
#[cfg_attr(has_must_use, must_use)]
#[inline]
pub fn ip(&self) -> u64 {
self.ip
}
#[inline]
pub fn set_ip(&mut self, new_value: u64) {
self.ip = new_value;
}
#[cfg_attr(has_must_use, must_use)]
#[inline]
pub fn bitness(&self) -> u32 {
self.bitness
}
#[cfg_attr(has_must_use, must_use)]
#[inline]
pub fn max_position(&self) -> usize {
self.data.len()
}
#[cfg_attr(has_must_use, must_use)]
#[inline]
pub fn position(&self) -> usize {
self.data_ptr as usize - self.data.as_ptr() as usize
}
#[inline]
pub fn set_position(&mut self, new_pos: usize) {
assert!(new_pos <= self.data.len());
self.data_ptr = unsafe { self.data.get_unchecked(new_pos) };
}
#[cfg_attr(has_must_use, must_use)]
#[inline]
pub fn can_decode(&self) -> bool {
self.data_ptr != self.data_ptr_end
}
#[inline]
pub fn iter<'b>(&'b mut self) -> DecoderIter<'a, 'b> {
DecoderIter { decoder: self }
}
#[cfg_attr(has_must_use, must_use)]
pub(self) fn read_u8(&mut self) -> usize {
unsafe {
let data_ptr = self.data_ptr;
if data_ptr < self.max_data_ptr {
let result = ptr::read(data_ptr) as usize;
self.data_ptr = data_ptr.offset(1);
result
} else {
if data_ptr == self.data_ptr_end {
self.state.flags |= StateFlags::NO_MORE_BYTES;
}
self.state.flags |= StateFlags::IS_INVALID;
0
}
}
}
#[cfg_attr(has_must_use, must_use)]
pub(self) fn read_u16(&mut self) -> usize {
unsafe {
let data_ptr = self.data_ptr;
if data_ptr.offset(1) < self.max_data_ptr {
let result = u16::from_le(ptr::read_unaligned(data_ptr as *const u16)) as usize;
self.data_ptr = data_ptr.offset(2);
result
} else {
if data_ptr.offset(1) >= self.data_ptr_end {
self.state.flags |= StateFlags::NO_MORE_BYTES;
}
self.state.flags |= StateFlags::IS_INVALID;
0
}
}
}
#[cfg_attr(has_must_use, must_use)]
pub(self) fn read_u32(&mut self) -> usize {
unsafe {
let data_ptr = self.data_ptr;
if data_ptr.offset(3) < self.max_data_ptr {
let result = u32::from_le(ptr::read_unaligned(data_ptr as *const u32)) as usize;
self.data_ptr = data_ptr.offset(4);
result
} else {
if data_ptr.offset(3) >= self.data_ptr_end {
self.state.flags |= StateFlags::NO_MORE_BYTES;
}
self.state.flags |= StateFlags::IS_INVALID;
0
}
}
}
#[cfg_attr(has_must_use, must_use)]
#[inline]
#[deprecated(since = "1.8.0", note = "Use last_error() instead")]
pub fn invalid_no_more_bytes(&self) -> bool {
(self.state.flags & StateFlags::NO_MORE_BYTES) != 0
}
#[cfg_attr(has_must_use, must_use)]
#[inline]
pub fn last_error(&self) -> DecoderError {
if (self.state.flags & StateFlags::NO_MORE_BYTES) != 0 {
DecoderError::NoMoreBytes
} else if (self.state.flags & StateFlags::IS_INVALID) != 0 {
DecoderError::InvalidInstruction
} else {
DecoderError::None
}
}
#[cfg_attr(has_must_use, must_use)]
#[cfg(has_maybe_uninit)]
#[inline]
pub fn decode(&mut self) -> Instruction {
let mut instruction = mem::MaybeUninit::uninit();
unsafe {
self.decode_out_ptr(instruction.as_mut_ptr());
instruction.assume_init()
}
}
#[cfg_attr(has_must_use, must_use)]
#[allow(deprecated_in_future)]
#[cfg(not(has_maybe_uninit))]
#[inline]
pub fn decode(&mut self) -> Instruction {
unsafe {
let mut instruction = mem::uninitialized();
self.decode_out_ptr(&mut instruction);
instruction
}
}
#[inline]
pub fn decode_out(&mut self, instruction: &mut Instruction) {
unsafe {
self.decode_out_ptr(instruction);
}
}
unsafe fn decode_out_ptr(&mut self, instruction: *mut Instruction) {
ptr::write(instruction, Instruction::default());
let instruction = &mut *instruction;
self.state.extra_register_base = 0;
self.state.extra_index_register_base = 0;
self.state.extra_base_register_base = 0;
self.state.extra_index_register_base_vsib = 0;
self.state.flags = 0;
self.state.mandatory_prefix = 0;
self.state.operand_size = self.default_operand_size;
self.state.address_size = self.default_address_size;
let data_ptr = self.data_ptr;
self.instr_start_data_ptr = data_ptr;
self.max_data_ptr = cmp::min(data_ptr.offset(IcedConstants::MAX_INSTRUCTION_LENGTH as isize), self.data_ptr_end);
let mut default_ds_segment = Register::DS;
let mut rex_prefix: usize = 0;
let mut b;
loop {
b = self.read_u8();
if (((*self.prefixes.get_unchecked(b / 32)) >> (b & 31)) & 1) == 0 {
break;
}
match b {
0x26 => {
if !self.is64_mode || default_ds_segment < Register::FS {
instruction.set_segment_prefix(Register::ES);
default_ds_segment = Register::ES;
}
rex_prefix = 0;
}
0x2E => {
if !self.is64_mode || default_ds_segment < Register::FS {
instruction.set_segment_prefix(Register::CS);
default_ds_segment = Register::CS;
}
rex_prefix = 0;
}
0x36 => {
if !self.is64_mode || default_ds_segment < Register::FS {
instruction.set_segment_prefix(Register::SS);
default_ds_segment = Register::SS;
}
rex_prefix = 0;
}
0x3E => {
if !self.is64_mode || default_ds_segment < Register::FS {
instruction.set_segment_prefix(Register::DS);
default_ds_segment = Register::DS;
}
rex_prefix = 0;
}
0x64 => {
instruction.set_segment_prefix(Register::FS);
default_ds_segment = Register::FS;
rex_prefix = 0;
}
0x65 => {
instruction.set_segment_prefix(Register::GS);
default_ds_segment = Register::GS;
rex_prefix = 0;
}
0x66 => {
self.state.flags |= StateFlags::HAS66;
self.state.operand_size = self.default_inverted_operand_size;
if self.state.mandatory_prefix == MandatoryPrefixByte::None as u32 {
self.state.mandatory_prefix = MandatoryPrefixByte::P66 as u32;
}
rex_prefix = 0;
}
0x67 => {
self.state.address_size = self.default_inverted_address_size;
rex_prefix = 0;
}
0xF0 => {
super::instruction_internal::internal_set_has_lock_prefix(instruction);
self.state.flags |= StateFlags::LOCK;
rex_prefix = 0;
}
0xF2 => {
super::instruction_internal::internal_set_has_repne_prefix(instruction);
self.state.mandatory_prefix = MandatoryPrefixByte::PF2 as u32;
rex_prefix = 0;
}
0xF3 => {
super::instruction_internal::internal_set_has_repe_prefix(instruction);
self.state.mandatory_prefix = MandatoryPrefixByte::PF3 as u32;
rex_prefix = 0;
}
_ => {
debug_assert!(self.is64_mode);
debug_assert!(0x40 <= b && b <= 0x4F);
rex_prefix = b;
}
}
}
if rex_prefix != 0 {
if (rex_prefix & 8) != 0 {
self.state.operand_size = OpSize::Size64;
self.state.flags |= StateFlags::HAS_REX | StateFlags::W;
} else {
self.state.flags |= StateFlags::HAS_REX;
}
self.state.extra_register_base = (rex_prefix as u32 & 4) << 1;
self.state.extra_index_register_base = (rex_prefix as u32 & 2) << 2;
self.state.extra_base_register_base = (rex_prefix as u32 & 1) << 3;
}
let tmp_handler = *self.handlers_xx.offset(b as isize);
self.decode_table2(tmp_handler, instruction);
let flags = self.state.flags;
if (flags & (StateFlags::IS_INVALID | StateFlags::LOCK)) != 0 {
if (flags & StateFlags::IS_INVALID) != 0
|| (((flags & (StateFlags::LOCK | StateFlags::ALLOW_LOCK)) & self.invalid_check_mask) == StateFlags::LOCK)
{
*instruction = Instruction::default();
const_assert_eq!(0, Code::INVALID as u32);
if (flags & StateFlags::NO_MORE_BYTES) != 0 {
self.data_ptr = self.max_data_ptr;
}
self.state.flags = flags | StateFlags::IS_INVALID;
}
}
super::instruction_internal::internal_set_code_size(instruction, self.default_code_size);
debug_assert_eq!(self.instr_start_data_ptr, data_ptr);
let instr_len = self.data_ptr as u32 - data_ptr as u32;
debug_assert!(instr_len <= IcedConstants::MAX_INSTRUCTION_LENGTH as u32);
super::instruction_internal::internal_set_len(instruction, instr_len);
let ip = self.ip.wrapping_add(instr_len as u64);
self.ip = ip;
instruction.set_next_ip(ip);
}
#[cfg_attr(has_must_use, must_use)]
#[inline]
pub(self) fn current_ip32(&self) -> u32 {
debug_assert!(self.instr_start_data_ptr <= self.data_ptr);
debug_assert!(self.data_ptr as usize - self.instr_start_data_ptr as usize <= IcedConstants::MAX_INSTRUCTION_LENGTH);
((self.data_ptr as usize - self.instr_start_data_ptr as usize) as u32).wrapping_add(self.ip as u32)
}
#[cfg_attr(has_must_use, must_use)]
#[inline]
pub(self) fn current_ip64(&self) -> u64 {
debug_assert!(self.instr_start_data_ptr <= self.data_ptr);
debug_assert!(self.data_ptr as usize - self.instr_start_data_ptr as usize <= IcedConstants::MAX_INSTRUCTION_LENGTH);
((self.data_ptr as usize - self.instr_start_data_ptr as usize) as u64).wrapping_add(self.ip)
}
const PF3: u32 = MandatoryPrefixByte::PF3 as u32;
const PF2: u32 = MandatoryPrefixByte::PF2 as u32;
pub(self) fn clear_mandatory_prefix(&mut self, instruction: &mut Instruction) {
debug_assert_eq!(EncodingKind::Legacy, self.state.encoding());
match self.state.mandatory_prefix {
Decoder::PF3 => instruction_internal::internal_clear_has_repe_prefix(instruction),
Decoder::PF2 => instruction_internal::internal_clear_has_repne_prefix(instruction),
_ => {}
}
}
#[inline(always)]
pub(self) fn set_xacquire_xrelease(&mut self, instruction: &mut Instruction, flags: u32) {
if (flags & HandlerFlags::XACQUIRE_XRELEASE_NO_LOCK) != 0 || instruction.has_lock_prefix() {
self.set_xacquire_xrelease_core(instruction, flags);
}
}
fn set_xacquire_xrelease_core(&mut self, instruction: &mut Instruction, flags: u32) {
debug_assert!(!((flags & HandlerFlags::XACQUIRE_XRELEASE_NO_LOCK) == 0 && !instruction.has_lock_prefix()));
match self.state.mandatory_prefix {
Decoder::PF2 => {
if (flags & HandlerFlags::XACQUIRE) != 0 {
self.clear_mandatory_prefix_f2(instruction);
super::instruction_internal::internal_set_has_xacquire_prefix(instruction);
}
}
Decoder::PF3 => {
if (flags & HandlerFlags::XRELEASE) != 0 {
self.clear_mandatory_prefix_f3(instruction);
super::instruction_internal::internal_set_has_xrelease_prefix(instruction);
}
}
_ => {}
}
}
#[inline]
fn clear_mandatory_prefix_f3(&self, instruction: &mut Instruction) {
debug_assert_eq!(EncodingKind::Legacy, self.state.encoding());
debug_assert_eq!(MandatoryPrefixByte::PF3 as u32, self.state.mandatory_prefix);
super::instruction_internal::internal_clear_has_repe_prefix(instruction);
}
#[inline]
fn clear_mandatory_prefix_f2(&self, instruction: &mut Instruction) {
debug_assert_eq!(EncodingKind::Legacy, self.state.encoding());
debug_assert_eq!(MandatoryPrefixByte::PF2 as u32, self.state.mandatory_prefix);
super::instruction_internal::internal_clear_has_repne_prefix(instruction);
}
#[inline]
pub(self) fn set_invalid_instruction(&mut self) {
self.state.flags |= StateFlags::IS_INVALID;
}
#[inline(always)]
pub(self) fn decode_table(&mut self, table: *const &OpCodeHandler, instruction: &mut Instruction) {
let b = self.read_u8();
self.decode_table2(unsafe { *table.offset(b as isize) }, instruction);
}
#[inline(always)]
fn decode_table2(&mut self, handler: &OpCodeHandler, instruction: &mut Instruction) {
if handler.has_modrm {
let m = self.read_u8() as u32;
self.state.modrm = m;
self.state.mod_ = m >> 6;
self.state.reg = (m >> 3) & 7;
self.state.rm = m & 7;
}
(handler.decode)(handler, self, instruction);
}
#[inline(always)]
pub(self) fn read_modrm(&mut self) {
let m = self.read_u8() as u32;
self.state.modrm = m;
self.state.mod_ = m >> 6;
self.state.reg = (m >> 3) & 7;
self.state.rm = m & 7;
}
#[cfg(feature = "no_vex")]
pub(self) fn vex2(&mut self, _instruction: &mut Instruction) {
self.set_invalid_instruction();
}
#[cfg(not(feature = "no_vex"))]
pub(self) fn vex2(&mut self, instruction: &mut Instruction) {
if (((self.state.flags & StateFlags::HAS_REX) | self.state.mandatory_prefix) & self.invalid_check_mask) != 0 {
self.set_invalid_instruction();
}
self.state.flags &= !StateFlags::W;
self.state.extra_index_register_base = 0;
self.state.extra_base_register_base = 0;
if cfg!(debug_assertions) {
self.state.flags |= EncodingKind::VEX as u32;
}
let mut b = self.state.modrm;
self.state.extra_register_base = ((b >> 4) ^ 8) & 8;
const_assert_eq!(0, VectorLength::L128 as u32);
const_assert_eq!(1, VectorLength::L256 as u32);
self.state.vector_length = (b >> 2) & 1;
const_assert_eq!(0, MandatoryPrefixByte::None as u32);
const_assert_eq!(1, MandatoryPrefixByte::P66 as u32);
const_assert_eq!(2, MandatoryPrefixByte::PF3 as u32);
const_assert_eq!(3, MandatoryPrefixByte::PF2 as u32);
self.state.mandatory_prefix = b & 3;
b = (!b >> 3) & 0x0F;
self.state.vvvv = b;
self.state.vvvv_invalid_check = b;
let tmp_handlers = self.handlers_vex_0fxx;
self.decode_table(tmp_handlers, instruction);
}
#[cfg(feature = "no_vex")]
pub(self) fn vex3(&mut self, _instruction: &mut Instruction) {
self.set_invalid_instruction();
}
#[cfg(not(feature = "no_vex"))]
pub(self) fn vex3(&mut self, instruction: &mut Instruction) {
if (((self.state.flags & StateFlags::HAS_REX) | self.state.mandatory_prefix) & self.invalid_check_mask) != 0 {
self.set_invalid_instruction();
}
self.state.flags &= !StateFlags::W;
if cfg!(debug_assertions) {
self.state.flags |= EncodingKind::VEX as u32;
}
let b1 = self.state.modrm;
let mut b2 = self.read_u8() as u32;
const_assert_eq!(0x80, StateFlags::W);
self.state.flags |= b2 & 0x80;
const_assert_eq!(0, VectorLength::L128 as u32);
const_assert_eq!(1, VectorLength::L256 as u32);
self.state.vector_length = (b2 >> 2) & 1;
const_assert_eq!(0, MandatoryPrefixByte::None as u32);
const_assert_eq!(1, MandatoryPrefixByte::P66 as u32);
const_assert_eq!(2, MandatoryPrefixByte::PF3 as u32);
const_assert_eq!(3, MandatoryPrefixByte::PF2 as u32);
self.state.mandatory_prefix = b2 & 3;
b2 = (!b2 >> 3) & 0x0F;
if self.is64_mode {
self.state.vvvv = b2;
self.state.vvvv_invalid_check = b2;
let b1x = !b1;
self.state.extra_register_base = (b1x >> 4) & 8;
self.state.extra_index_register_base = (b1x >> 3) & 8;
self.state.extra_base_register_base = (b1x >> 2) & 8;
} else {
self.state.vvvv_invalid_check = b2;
self.state.vvvv = b2 & 0x07;
}
let table = match b1 & 0x1F {
1 => self.handlers_vex_0fxx,
2 => self.handlers_vex_0f38xx,
3 => self.handlers_vex_0f3axx,
_ => {
self.set_invalid_instruction();
return;
}
};
self.decode_table(table, instruction);
}
#[cfg(feature = "no_xop")]
pub(self) fn xop(&mut self, _instruction: &mut Instruction) {
self.set_invalid_instruction();
}
#[cfg(not(feature = "no_xop"))]
pub(self) fn xop(&mut self, instruction: &mut Instruction) {
if (((self.state.flags & StateFlags::HAS_REX) | self.state.mandatory_prefix) & self.invalid_check_mask) != 0 {
self.set_invalid_instruction();
}
self.state.flags &= !StateFlags::W;
if cfg!(debug_assertions) {
self.state.flags |= EncodingKind::XOP as u32;
}
let b1 = self.state.modrm;
let mut b2 = self.read_u8() as u32;
const_assert_eq!(0x80, StateFlags::W);
self.state.flags |= b2 & 0x80;
const_assert_eq!(0, VectorLength::L128 as u32);
const_assert_eq!(1, VectorLength::L256 as u32);
self.state.vector_length = (b2 >> 2) & 1;
const_assert_eq!(0, MandatoryPrefixByte::None as u32);
const_assert_eq!(1, MandatoryPrefixByte::P66 as u32);
const_assert_eq!(2, MandatoryPrefixByte::PF3 as u32);
const_assert_eq!(3, MandatoryPrefixByte::PF2 as u32);
self.state.mandatory_prefix = b2 & 3;
b2 = (!b2 >> 3) & 0x0F;
if self.is64_mode {
self.state.vvvv = b2;
self.state.vvvv_invalid_check = b2;
let b1x = !b1;
self.state.extra_register_base = (b1x >> 4) & 8;
self.state.extra_index_register_base = (b1x >> 3) & 8;
self.state.extra_base_register_base = (b1x >> 2) & 8;
} else {
self.state.vvvv_invalid_check = b2;
self.state.vvvv = b2 & 0x07;
}
let table = match b1 & 0x1F {
8 => self.handlers_xop8,
9 => self.handlers_xop9,
10 => self.handlers_xopa,
_ => {
self.set_invalid_instruction();
return;
}
};
self.decode_table(table, instruction);
}
#[cfg(feature = "no_evex")]
pub(self) fn evex_mvex(&mut self, _instruction: &mut Instruction) {
self.set_invalid_instruction();
}
#[cfg(not(feature = "no_evex"))]
pub(self) fn evex_mvex(&mut self, instruction: &mut Instruction) {
if (((self.state.flags & StateFlags::HAS_REX) | self.state.mandatory_prefix) & self.invalid_check_mask) != 0 {
self.set_invalid_instruction();
}
self.state.flags &= !StateFlags::W;
let p0 = self.state.modrm;
let mut p1 = self.read_u16() as u32;
let p2 = p1 >> 8;
p1 = p1 as u8 as u32;
if (p1 & 4) != 0 {
if (p0 & 0x0C) == 0 {
if cfg!(debug_assertions) {
self.state.flags |= EncodingKind::EVEX as u32;
}
const_assert_eq!(0, MandatoryPrefixByte::None as u32);
const_assert_eq!(1, MandatoryPrefixByte::P66 as u32);
const_assert_eq!(2, MandatoryPrefixByte::PF3 as u32);
const_assert_eq!(3, MandatoryPrefixByte::PF2 as u32);
self.state.mandatory_prefix = p1 & 3;
const_assert_eq!(0x80, StateFlags::W);
self.state.flags |= p1 & 0x80;
let aaa = p2 & 7;
self.state.aaa = aaa;
super::instruction_internal::internal_set_op_mask(instruction, aaa);
if (p2 & 0x80) != 0 {
if (aaa ^ self.invalid_check_mask) == u32::MAX {
self.set_invalid_instruction();
}
self.state.flags |= StateFlags::Z;
super::instruction_internal::internal_set_zeroing_masking(instruction);
}
const_assert_eq!(0x10, StateFlags::B);
self.state.flags |= p2 & 0x10;
const_assert_eq!(0, VectorLength::L128 as u32);
const_assert_eq!(1, VectorLength::L256 as u32);
const_assert_eq!(2, VectorLength::L512 as u32);
const_assert_eq!(3, VectorLength::Unknown as u32);
self.state.vector_length = (p2 >> 5) & 3;
p1 = (!p1 >> 3) & 0x0F;
if self.is64_mode {
let mut tmp = (!p2 & 8) << 1;
self.state.extra_index_register_base_vsib = tmp;
tmp += p1;
self.state.vvvv = tmp;
self.state.vvvv_invalid_check = tmp;
let mut p0x = !p0;
self.state.extra_register_base = (p0x >> 4) & 8;
self.state.extra_index_register_base = (p0x >> 3) & 8;
self.state.extra_register_base_evex = p0x & 0x10;
p0x >>= 2;
self.state.extra_base_register_base_evex = p0x & 0x18;
self.state.extra_base_register_base = p0x & 8;
} else {
self.state.vvvv_invalid_check = p1;
self.state.vvvv = p1 & 0x07;
const_assert_eq!(0x40, StateFlags::IS_INVALID);
self.state.flags |= (!p2 & 8) << 3;
}
let table = match p0 & 3 {
1 => self.handlers_evex_0fxx,
2 => self.handlers_evex_0f38xx,
3 => self.handlers_evex_0f3axx,
_ => {
self.set_invalid_instruction();
return;
}
};
let handler = unsafe { *table.offset(self.read_u8() as isize) };
debug_assert!(handler.has_modrm);
let m = self.read_u8() as u32;
self.state.modrm = m;
self.state.mod_ = m >> 6;
self.state.reg = (m >> 3) & 7;
self.state.rm = m & 7;
const_assert!(StateFlags::B > 3);
if (((self.state.flags & StateFlags::B) | self.state.vector_length) & self.invalid_check_mask) == 3 {
self.set_invalid_instruction();
}
(handler.decode)(handler, self, instruction);
} else {
self.set_invalid_instruction();
}
} else {
self.set_invalid_instruction();
}
}
#[cfg_attr(has_must_use, must_use)]
#[inline(always)]
pub(self) fn read_op_seg_reg(&mut self) -> u32 {
let reg = self.state.reg;
if reg < 6 {
Register::ES as u32 + reg
} else {
self.set_invalid_instruction();
Register::None as u32
}
}
#[inline(always)]
pub(self) fn read_op_mem(&mut self, instruction: &mut Instruction) {
debug_assert_ne!(EncodingKind::EVEX, self.state.encoding());
if self.state.address_size != OpSize::Size16 {
let _ = self.read_op_mem_32_or_64(instruction);
} else {
self.read_op_mem_16(instruction, TupleType::N1);
}
}
#[inline(always)]
#[cfg(any(not(feature = "no_vex"), not(feature = "no_xop")))]
pub(self) fn read_op_mem_sib(&mut self, instruction: &mut Instruction) {
debug_assert_ne!(EncodingKind::EVEX, self.state.encoding());
let is_valid = if self.state.address_size != OpSize::Size16 {
self.read_op_mem_32_or_64(instruction)
} else {
self.read_op_mem_16(instruction, TupleType::N1);
false
};
if self.invalid_check_mask != 0 && !is_valid {
self.set_invalid_instruction();
}
}
#[inline(always)]
pub(self) fn read_op_mem_mpx(&mut self, instruction: &mut Instruction) {
debug_assert_ne!(EncodingKind::EVEX, self.state.encoding());
if self.is64_mode {
self.state.address_size = OpSize::Size64;
let _ = self.read_op_mem_32_or_64(instruction);
} else if self.state.address_size != OpSize::Size16 {
let _ = self.read_op_mem_32_or_64(instruction);
} else {
self.read_op_mem_16(instruction, TupleType::N1);
if self.invalid_check_mask != 0 {
self.set_invalid_instruction();
}
}
}
#[inline(always)]
#[cfg(not(feature = "no_evex"))]
pub(self) fn read_op_mem_tuple_type(&mut self, instruction: &mut Instruction, tuple_type: TupleType) {
debug_assert_eq!(EncodingKind::EVEX, self.state.encoding());
if self.state.address_size != OpSize::Size16 {
let index_reg = if self.state.address_size == OpSize::Size64 { Register::RAX } else { Register::EAX };
let _ = self.read_op_mem_32_or_64_vsib(instruction, index_reg, tuple_type, false);
} else {
self.read_op_mem_16(instruction, tuple_type);
}
}
#[inline(always)]
#[cfg(any(not(feature = "no_evex"), not(feature = "no_vex"), not(feature = "no_xop")))]
pub(self) fn read_op_mem_vsib(&mut self, instruction: &mut Instruction, vsib_index: Register, tuple_type: TupleType) {
let is_valid = if self.state.address_size != OpSize::Size16 {
self.read_op_mem_32_or_64_vsib(instruction, vsib_index, tuple_type, true)
} else {
self.read_op_mem_16(instruction, tuple_type);
false
};
if self.invalid_check_mask != 0 && !is_valid {
self.set_invalid_instruction();
}
}
#[inline(never)]
#[cold]
fn read_op_mem_16(&mut self, instruction: &mut Instruction, tuple_type: TupleType) {
debug_assert!(self.state.address_size == OpSize::Size16);
debug_assert!(self.state.rm <= 7);
let (mut base_reg, index_reg) = unsafe { *MEM_REGS_16.get_unchecked(self.state.rm as usize) };
match self.state.mod_ {
0 => {
if self.state.rm == 6 {
super::instruction_internal::internal_set_memory_displ_size(instruction, 2);
self.displ_index = self.data_ptr as usize;
instruction.set_memory_displacement(self.read_u16() as u32);
base_reg = Register::None;
debug_assert_eq!(Register::None, index_reg);
}
}
1 => {
super::instruction_internal::internal_set_memory_displ_size(instruction, 1);
self.displ_index = self.data_ptr as usize;
if tuple_type == TupleType::N1 {
instruction.set_memory_displacement(self.read_u8() as i8 as u16 as u32);
} else {
instruction.set_memory_displacement(self.disp8n(tuple_type).wrapping_mul(self.read_u8() as i8 as u32) as u16 as u32);
}
}
_ => {
debug_assert_eq!(2, self.state.mod_);
super::instruction_internal::internal_set_memory_displ_size(instruction, 2);
self.displ_index = self.data_ptr as usize;
instruction.set_memory_displacement(self.read_u16() as u32);
}
}
super::instruction_internal::internal_set_memory_base(instruction, base_reg);
super::instruction_internal::internal_set_memory_index(instruction, index_reg);
}
#[cfg_attr(has_must_use, must_use)]
fn read_op_mem_32_or_64(&mut self, instruction: &mut Instruction) -> bool {
debug_assert!(self.state.address_size == OpSize::Size32 || self.state.address_size == OpSize::Size64);
let sib: u32;
let displ_size_scale: u32;
let displ: u32;
let base_reg = if self.state.address_size == OpSize::Size64 { Register::RAX } else { Register::EAX };
match self.state.mod_ {
0 => match self.state.rm {
4 => {
sib = self.read_u8() as u32;
displ_size_scale = 0;
displ = 0;
}
5 => {
if self.state.address_size == OpSize::Size64 {
super::instruction_internal::internal_set_memory_displ_size(instruction, 4);
} else {
super::instruction_internal::internal_set_memory_displ_size(instruction, 3);
}
self.displ_index = self.data_ptr as usize;
instruction.set_memory_displacement(self.read_u32() as u32);
if self.is64_mode {
if self.state.address_size == OpSize::Size64 {
super::instruction_internal::internal_set_memory_base(instruction, Register::RIP);
} else {
super::instruction_internal::internal_set_memory_base(instruction, Register::EIP);
}
}
return false;
}
_ => {
debug_assert!(self.state.rm <= 7 && self.state.rm != 4 && self.state.rm != 5);
super::instruction_internal::internal_set_memory_base_u32(
instruction,
self.state.extra_base_register_base + self.state.rm + base_reg as u32,
);
return false;
}
},
1 => {
if self.state.rm == 4 {
sib = self.read_u8() as u32;
displ_size_scale = 1;
self.displ_index = self.data_ptr as usize;
displ = self.read_u8() as i8 as u32;
} else {
debug_assert!(self.state.rm <= 7 && self.state.rm != 4);
super::instruction_internal::internal_set_memory_displ_size(instruction, 1);
self.displ_index = self.data_ptr as usize;
instruction.set_memory_displacement(self.read_u8() as i8 as u32);
super::instruction_internal::internal_set_memory_base_u32(
instruction,
self.state.extra_base_register_base + self.state.rm + base_reg as u32,
);
return false;
}
}
_ => {
debug_assert_eq!(2, self.state.mod_);
if self.state.rm == 4 {
sib = self.read_u8() as u32;
displ_size_scale = if self.state.address_size == OpSize::Size64 { 4 } else { 3 };
self.displ_index = self.data_ptr as usize;
displ = self.read_u32() as u32;
} else {
debug_assert!(self.state.rm <= 7 && self.state.rm != 4);
if self.state.address_size == OpSize::Size64 {
super::instruction_internal::internal_set_memory_displ_size(instruction, 4);
} else {
super::instruction_internal::internal_set_memory_displ_size(instruction, 3);
}
self.displ_index = self.data_ptr as usize;
instruction.set_memory_displacement(self.read_u32() as u32);
super::instruction_internal::internal_set_memory_base_u32(
instruction,
self.state.extra_base_register_base + self.state.rm + base_reg as u32,
);
return false;
}
}
}
let index = ((sib >> 3) & 7) + self.state.extra_index_register_base;
let base = sib & 7;
super::instruction_internal::internal_set_memory_index_scale(instruction, sib >> 6);
if index != 4 {
super::instruction_internal::internal_set_memory_index_u32(instruction, index + base_reg as u32);
}
if base == 5 && self.state.mod_ == 0 {
if self.state.address_size == OpSize::Size64 {
super::instruction_internal::internal_set_memory_displ_size(instruction, 4);
} else {
super::instruction_internal::internal_set_memory_displ_size(instruction, 3);
}
self.displ_index = self.data_ptr as usize;
instruction.set_memory_displacement(self.read_u32() as u32);
} else {
super::instruction_internal::internal_set_memory_base_u32(instruction, base + self.state.extra_base_register_base + base_reg as u32);
super::instruction_internal::internal_set_memory_displ_size(instruction, displ_size_scale);
instruction.set_memory_displacement(displ);
}
true
}
#[cfg_attr(has_must_use, must_use)]
#[cfg(any(not(feature = "no_evex"), not(feature = "no_vex"), not(feature = "no_xop")))]
fn read_op_mem_32_or_64_vsib(&mut self, instruction: &mut Instruction, index_reg: Register, tuple_type: TupleType, is_vsib: bool) -> bool {
debug_assert!(self.state.address_size == OpSize::Size32 || self.state.address_size == OpSize::Size64);
let sib: u32;
let displ_size_scale: u32;
let displ: u32;
let base_reg = if self.state.address_size == OpSize::Size64 { Register::RAX } else { Register::EAX };
match self.state.mod_ {
0 => match self.state.rm {
4 => {
sib = self.read_u8() as u32;
displ_size_scale = 0;
displ = 0;
}
5 => {
if self.state.address_size == OpSize::Size64 {
super::instruction_internal::internal_set_memory_displ_size(instruction, 4);
} else {
super::instruction_internal::internal_set_memory_displ_size(instruction, 3);
}
self.displ_index = self.data_ptr as usize;
instruction.set_memory_displacement(self.read_u32() as u32);
if self.is64_mode {
if self.state.address_size == OpSize::Size64 {
super::instruction_internal::internal_set_memory_base(instruction, Register::RIP);
} else {
super::instruction_internal::internal_set_memory_base(instruction, Register::EIP);
}
}
return false;
}
_ => {
debug_assert!(self.state.rm <= 7 && self.state.rm != 4 && self.state.rm != 5);
super::instruction_internal::internal_set_memory_base_u32(
instruction,
self.state.extra_base_register_base + self.state.rm + base_reg as u32,
);
return false;
}
},
1 => {
if self.state.rm == 4 {
sib = self.read_u8() as u32;
displ_size_scale = 1;
self.displ_index = self.data_ptr as usize;
if tuple_type == TupleType::N1 {
displ = self.read_u8() as i8 as u32;
} else {
displ = self.disp8n(tuple_type).wrapping_mul(self.read_u8() as i8 as u32);
}
} else {
debug_assert!(self.state.rm <= 7 && self.state.rm != 4);
super::instruction_internal::internal_set_memory_displ_size(instruction, 1);
self.displ_index = self.data_ptr as usize;
if tuple_type == TupleType::N1 {
instruction.set_memory_displacement(self.read_u8() as i8 as u32);
} else {
instruction.set_memory_displacement(self.disp8n(tuple_type).wrapping_mul(self.read_u8() as i8 as u32));
}
super::instruction_internal::internal_set_memory_base_u32(
instruction,
self.state.extra_base_register_base + self.state.rm + base_reg as u32,
);
return false;
}
}
_ => {
debug_assert_eq!(2, self.state.mod_);
if self.state.rm == 4 {
sib = self.read_u8() as u32;
displ_size_scale = if self.state.address_size == OpSize::Size64 { 4 } else { 3 };
self.displ_index = self.data_ptr as usize;
displ = self.read_u32() as u32;
} else {
debug_assert!(self.state.rm <= 7 && self.state.rm != 4);
if self.state.address_size == OpSize::Size64 {
super::instruction_internal::internal_set_memory_displ_size(instruction, 4);
} else {
super::instruction_internal::internal_set_memory_displ_size(instruction, 3);
}
self.displ_index = self.data_ptr as usize;
instruction.set_memory_displacement(self.read_u32() as u32);
super::instruction_internal::internal_set_memory_base_u32(
instruction,
self.state.extra_base_register_base + self.state.rm + base_reg as u32,
);
return false;
}
}
}
let index = ((sib >> 3) & 7) + self.state.extra_index_register_base;
let base = sib & 7;
super::instruction_internal::internal_set_memory_index_scale(instruction, sib >> 6);
if !is_vsib {
if index != 4 {
super::instruction_internal::internal_set_memory_index_u32(instruction, index + index_reg as u32);
}
} else {
super::instruction_internal::internal_set_memory_index_u32(
instruction,
index + self.state.extra_index_register_base_vsib + index_reg as u32,
);
}
if base == 5 && self.state.mod_ == 0 {
if self.state.address_size == OpSize::Size64 {
super::instruction_internal::internal_set_memory_displ_size(instruction, 4);
} else {
super::instruction_internal::internal_set_memory_displ_size(instruction, 3);
}
self.displ_index = self.data_ptr as usize;
instruction.set_memory_displacement(self.read_u32() as u32);
} else {
super::instruction_internal::internal_set_memory_base_u32(instruction, base + self.state.extra_base_register_base + base_reg as u32);
super::instruction_internal::internal_set_memory_displ_size(instruction, displ_size_scale);
instruction.set_memory_displacement(displ);
}
true
}
#[cfg_attr(has_must_use, must_use)]
#[inline(always)]
fn disp8n(&self, tuple_type: TupleType) -> u32 {
get_disp8n(tuple_type, (self.state.flags & StateFlags::B) != 0)
}
#[cfg_attr(has_must_use, must_use)]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::missing_inline_in_public_items))]
pub fn get_constant_offsets(&self, instruction: &Instruction) -> ConstantOffsets {
let mut constant_offsets = ConstantOffsets::default();
let displ_size = instruction.memory_displ_size();
if displ_size != 0 {
constant_offsets.displacement_offset = (self.displ_index - self.instr_start_data_ptr as usize) as u8;
if displ_size == 8 && (self.state.flags & StateFlags::ADDR64) == 0 {
constant_offsets.displacement_size = 4;
} else {
constant_offsets.displacement_size = displ_size as u8;
}
}
if (self.state.flags & StateFlags::NO_IMM) == 0 {
let mut extra_imm_sub = 0;
for i in (0..instruction.op_count()).rev() {
match instruction.try_op_kind(i).unwrap_or(OpKind::Register) {
OpKind::Immediate8 | OpKind::Immediate8to16 | OpKind::Immediate8to32 | OpKind::Immediate8to64 => {
constant_offsets.immediate_offset = (instruction.len() - extra_imm_sub - 1) as u8;
constant_offsets.immediate_size = 1;
break;
}
OpKind::Immediate16 => {
constant_offsets.immediate_offset = (instruction.len() - extra_imm_sub - 2) as u8;
constant_offsets.immediate_size = 2;
break;
}
OpKind::Immediate32 | OpKind::Immediate32to64 => {
constant_offsets.immediate_offset = (instruction.len() - extra_imm_sub - 4) as u8;
constant_offsets.immediate_size = 4;
break;
}
OpKind::Immediate64 => {
constant_offsets.immediate_offset = (instruction.len() - extra_imm_sub - 8) as u8;
constant_offsets.immediate_size = 8;
break;
}
OpKind::Immediate8_2nd => {
constant_offsets.immediate_offset2 = (instruction.len() - 1) as u8;
constant_offsets.immediate_size2 = 1;
extra_imm_sub = 1;
}
OpKind::NearBranch16 => {
if (self.state.flags & StateFlags::BRANCH_IMM8) != 0 {
constant_offsets.immediate_offset = (instruction.len() - 1) as u8;
constant_offsets.immediate_size = 1;
} else if (self.state.flags & StateFlags::XBEGIN) == 0 {
constant_offsets.immediate_offset = (instruction.len() - 2) as u8;
constant_offsets.immediate_size = 2;
} else {
debug_assert!((self.state.flags & StateFlags::XBEGIN) != 0);
if self.state.operand_size != OpSize::Size16 {
constant_offsets.immediate_offset = (instruction.len() - 4) as u8;
constant_offsets.immediate_size = 4;
} else {
constant_offsets.immediate_offset = (instruction.len() - 2) as u8;
constant_offsets.immediate_size = 2;
}
}
}
OpKind::NearBranch32 | OpKind::NearBranch64 => {
if (self.state.flags & StateFlags::BRANCH_IMM8) != 0 {
constant_offsets.immediate_offset = (instruction.len() - 1) as u8;
constant_offsets.immediate_size = 1;
} else if (self.state.flags & StateFlags::XBEGIN) == 0 {
constant_offsets.immediate_offset = (instruction.len() - 4) as u8;
constant_offsets.immediate_size = 4;
} else {
debug_assert!((self.state.flags & StateFlags::XBEGIN) != 0);
if self.state.operand_size != OpSize::Size16 {
constant_offsets.immediate_offset = (instruction.len() - 4) as u8;
constant_offsets.immediate_size = 4;
} else {
constant_offsets.immediate_offset = (instruction.len() - 2) as u8;
constant_offsets.immediate_size = 2;
}
}
}
OpKind::FarBranch16 => {
constant_offsets.immediate_offset = (instruction.len() - (2 + 2)) as u8;
constant_offsets.immediate_size = 2;
constant_offsets.immediate_offset2 = (instruction.len() - 2) as u8;
constant_offsets.immediate_size2 = 2;
}
OpKind::FarBranch32 => {
constant_offsets.immediate_offset = (instruction.len() - (4 + 2)) as u8;
constant_offsets.immediate_size = 4;
constant_offsets.immediate_offset2 = (instruction.len() - 2) as u8;
constant_offsets.immediate_size2 = 2;
}
_ => {}
}
}
}
constant_offsets
}
}
#[allow(missing_debug_implementations)]
pub struct DecoderIter<'a: 'b, 'b> {
decoder: &'b mut Decoder<'a>,
}
impl<'a, 'b> Iterator for DecoderIter<'a, 'b> {
type Item = Instruction;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.decoder.can_decode() {
Some(self.decoder.decode())
} else {
None
}
}
}
#[cfg(has_fused_iterator)]
impl<'a, 'b> FusedIterator for DecoderIter<'a, 'b> {}
#[allow(missing_debug_implementations)]
pub struct DecoderIntoIter<'a> {
decoder: Decoder<'a>,
}
impl<'a> Iterator for DecoderIntoIter<'a> {
type Item = Instruction;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.decoder.can_decode() {
Some(self.decoder.decode())
} else {
None
}
}
}
#[cfg(has_fused_iterator)]
impl<'a> FusedIterator for DecoderIntoIter<'a> {}
impl<'a> IntoIterator for Decoder<'a> {
type Item = Instruction;
type IntoIter = DecoderIntoIter<'a>;
#[cfg_attr(has_must_use, must_use)]
#[inline]
fn into_iter(self) -> Self::IntoIter {
DecoderIntoIter { decoder: self }
}
}
impl<'a: 'b, 'b> IntoIterator for &'b mut Decoder<'a> {
type Item = Instruction;
type IntoIter = DecoderIter<'a, 'b>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
DecoderIter { decoder: self }
}
}