mod encoder_data;
mod enums;
pub(crate) mod handlers_table;
#[cfg(feature = "op_code_info")]
mod instruction_fmt;
mod mem_op;
#[cfg(feature = "op_code_info")]
mod mnemonic_str_tbl;
#[cfg(feature = "op_code_info")]
mod op_code;
#[cfg(feature = "op_code_info")]
mod op_code_data;
#[cfg(feature = "op_code_info")]
mod op_code_fmt;
mod op_code_handler;
#[cfg(feature = "op_code_info")]
pub(crate) mod op_code_tbl;
#[cfg(feature = "op_code_info")]
mod op_kind_tables;
mod ops;
mod ops_tables;
#[cfg(test)]
pub(crate) mod tests;
pub use crate::encoder::enums::*;
use crate::encoder::handlers_table::*;
pub use crate::encoder::mem_op::*;
#[cfg(feature = "op_code_info")]
pub use crate::encoder::op_code::*;
use crate::encoder::op_code_handler::OpCodeHandler;
use crate::iced_constants::IcedConstants;
use crate::iced_error::IcedError;
use crate::instruction_internal;
use crate::*;
use alloc::string::String;
use alloc::vec::Vec;
use core::mem;
#[rustfmt::skip]
static IMM_SIZES: [u32; 19] = [
0, 1, 2, 4, 8, 3, 2, 4, 6, 1, 1, 1, 2, 2, 2, 4, 4, 1, 1,];
#[allow(missing_debug_implementations)]
pub struct Encoder {
current_rip: u64,
buffer: Vec<u8>,
handlers: &'static [&'static OpCodeHandler; IcedConstants::CODE_ENUM_COUNT],
handler: &'static OpCodeHandler,
error_message: String,
bitness: u32,
eip: u32,
displ_addr: u32,
imm_addr: u32,
immediate: u32,
immediate_hi: u32,
displ: u32,
displ_hi: u32,
op_code: u32,
internal_vex_wig_lig: u32,
internal_vex_lig: u32,
internal_evex_wig: u32,
internal_evex_lig: u32,
#[cfg(feature = "mvex")]
internal_mvex_wig: u32,
#[cfg(not(feature = "mvex"))]
#[allow(dead_code)]
internal_mvex_wig: (),
prevent_vex2: u32,
opsize16_flags: u32,
opsize32_flags: u32,
adrsize16_flags: u32,
adrsize32_flags: u32,
encoder_flags: u32, displ_size: DisplSize,
imm_size: ImmSize,
mod_rm: u8,
sib: u8,
}
impl Encoder {
pub(super) const ERROR_ONLY_1632_BIT_MODE: &'static str = "The instruction can only be used in 16/32-bit mode";
pub(super) const ERROR_ONLY_64_BIT_MODE: &'static str = "The instruction can only be used in 64-bit mode";
#[must_use]
#[inline]
#[allow(clippy::unwrap_used)]
pub fn new(bitness: u32) -> Self {
Self::try_new(bitness).unwrap()
}
#[inline]
pub fn try_new(bitness: u32) -> Result<Self, IcedError> {
Self::try_with_capacity(bitness, 0)
}
#[allow(clippy::missing_inline_in_public_items)]
pub fn try_with_capacity(bitness: u32, capacity: usize) -> Result<Self, IcedError> {
if bitness != 16 && bitness != 32 && bitness != 64 {
return Err(IcedError::new("Invalid bitness"));
}
let opsize16_flags = if bitness != 16 { EncoderFlags::P66 } else { 0 };
let opsize32_flags = if bitness == 16 { EncoderFlags::P66 } else { 0 };
let adrsize16_flags = if bitness != 16 { EncoderFlags::P67 } else { 0 };
let adrsize32_flags = if bitness != 32 { EncoderFlags::P67 } else { 0 };
let handlers = HANDLERS_TABLE.as_ref();
#[cfg(feature = "mvex")]
const INTERNAL_MVEX_WIG: u32 = 0;
#[cfg(not(feature = "mvex"))]
const INTERNAL_MVEX_WIG: () = ();
Ok(Self {
current_rip: 0,
handler: handlers[0],
handlers,
buffer: if capacity == 0 { Vec::new() } else { Vec::with_capacity(capacity) },
error_message: String::new(),
bitness,
eip: 0,
displ_addr: 0,
imm_addr: 0,
immediate: 0,
immediate_hi: 0,
displ: 0,
displ_hi: 0,
op_code: 0,
internal_vex_wig_lig: 0,
internal_vex_lig: 0,
internal_evex_wig: 0,
internal_evex_lig: 0,
internal_mvex_wig: INTERNAL_MVEX_WIG,
prevent_vex2: 0,
opsize16_flags,
opsize32_flags,
adrsize16_flags,
adrsize32_flags,
encoder_flags: 0,
displ_size: DisplSize::default(),
imm_size: ImmSize::default(),
mod_rm: 0,
sib: 0,
})
}
#[allow(clippy::missing_inline_in_public_items)]
pub fn encode(&mut self, instruction: &Instruction, rip: u64) -> Result<usize, IcedError> {
self.current_rip = rip;
self.eip = rip as u32;
self.encoder_flags = EncoderFlags::NONE;
self.displ_size = DisplSize::None;
self.imm_size = ImmSize::None;
self.mod_rm = 0;
self.sib = 0;
let handler = self.handlers[instruction.code() as usize];
self.handler = handler;
self.op_code = handler.op_code;
let group_index = handler.group_index;
if group_index >= 0 {
self.encoder_flags |= EncoderFlags::MOD_RM;
self.mod_rm = (group_index as u8) << 3;
}
let rm_group_index = handler.rm_group_index;
if rm_group_index >= 0 {
self.encoder_flags |= EncoderFlags::MOD_RM;
self.mod_rm |= (rm_group_index as u8) | 0xC0;
}
match handler.enc_flags3 & (EncFlags3::BIT16OR32 | EncFlags3::BIT64) {
EncFlags3::BIT16OR32 => {
if self.bitness == 64 {
self.set_error_message_str(Self::ERROR_ONLY_1632_BIT_MODE);
}
}
EncFlags3::BIT64 => {
if self.bitness != 64 {
self.set_error_message_str(Self::ERROR_ONLY_64_BIT_MODE);
}
}
_ => {}
}
match handler.op_size {
CodeSize::Unknown => {}
CodeSize::Code16 => self.encoder_flags |= self.opsize16_flags,
CodeSize::Code32 => self.encoder_flags |= self.opsize32_flags,
CodeSize::Code64 => {
if (handler.enc_flags3 & EncFlags3::DEFAULT_OP_SIZE64) == 0 {
self.encoder_flags |= EncoderFlags::W
}
}
}
match handler.addr_size {
CodeSize::Unknown | CodeSize::Code64 => {}
CodeSize::Code16 => self.encoder_flags |= self.adrsize16_flags,
CodeSize::Code32 => self.encoder_flags |= self.adrsize32_flags,
}
if !handler.is_special_instr {
let ops = &*handler.operands;
for (i, op) in ops.iter().copied().enumerate() {
op.encode(self, instruction, i as u32);
}
if (handler.enc_flags3 & EncFlags3::FWAIT) != 0 {
self.write_byte_internal(0x9B);
}
(handler.encode)(handler, self, instruction);
let op_code = self.op_code;
if !handler.is_2byte_opcode {
self.write_byte_internal(op_code);
} else {
self.write_byte_internal(op_code >> 8);
self.write_byte_internal(op_code);
}
if (self.encoder_flags & (EncoderFlags::MOD_RM | EncoderFlags::DISPL)) != 0 {
self.write_mod_rm();
}
if self.imm_size != ImmSize::None {
self.write_immediate();
}
} else {
(handler.encode)(handler, self, instruction);
}
let instr_len = (self.current_rip as usize).wrapping_sub(rip as usize);
if instr_len > IcedConstants::MAX_INSTRUCTION_LENGTH && !handler.is_special_instr {
self.set_error_message(format!("Instruction length > {} bytes", IcedConstants::MAX_INSTRUCTION_LENGTH));
}
if !self.error_message.is_empty() {
Err(IcedError::with_string(mem::take(&mut self.error_message)))
} else {
Ok(instr_len)
}
}
#[inline]
pub(super) fn set_error_message(&mut self, message: String) {
if self.error_message.is_empty() {
self.error_message = message;
}
}
#[inline]
pub(super) fn set_error_message_str(&mut self, message: &str) {
if self.error_message.is_empty() {
self.error_message.push_str(message);
}
}
#[must_use]
#[inline]
pub(super) fn verify_op_kind(&mut self, operand: u32, expected: OpKind, actual: OpKind) -> bool {
if expected == actual {
true
} else {
if cfg!(debug_assertions) {
self.set_error_message(format!("Operand {}: Expected: {:?}, actual: {:?}", operand, expected, actual));
} else {
self.set_error_message(format!(
"Operand {}: Expected: OpKind value {}, actual: OpKind value {}",
operand, expected as u32, actual as u32
));
}
false
}
}
#[must_use]
#[inline]
pub(super) fn verify_register(&mut self, operand: u32, expected: Register, actual: Register) -> bool {
if expected == actual {
true
} else {
if cfg!(debug_assertions) {
self.set_error_message(format!("Operand {}: Expected: {:?}, actual: {:?}", operand, expected, actual));
} else {
self.set_error_message(format!(
"Operand {}: Expected: Register value {}, actual: Register value {}",
operand, expected as u32, actual as u32
));
}
false
}
}
#[must_use]
#[inline]
pub(super) fn verify_register_range(&mut self, operand: u32, register: Register, reg_lo: Register, mut reg_hi: Register) -> bool {
if self.bitness != 64 && reg_hi as u32 > (reg_lo as u32).wrapping_add(7) {
reg_hi = unsafe { mem::transmute((reg_lo as RegisterUnderlyingType).wrapping_add(7)) };
}
if reg_lo <= register && register <= reg_hi {
true
} else {
if cfg!(debug_assertions) {
self.set_error_message(format!(
"Operand {}: Register {:?} is not between {:?} and {:?} (inclusive)",
operand, register, reg_lo, reg_hi
));
} else {
self.set_error_message(format!(
"Operand {}: Register {} is not between {} and {} (inclusive)",
operand, register as u32, reg_lo as u32, reg_hi as u32
));
}
false
}
}
pub(super) fn add_branch(&mut self, op_kind: OpKind, imm_size: u32, instruction: &Instruction, operand: u32) {
if !self.verify_op_kind(operand, op_kind, instruction.op_kind(operand)) {
return;
}
let target;
match imm_size {
1 => match op_kind {
OpKind::NearBranch16 => {
self.encoder_flags |= self.opsize16_flags;
self.imm_size = ImmSize::RipRelSize1_Target16;
self.immediate = instruction.near_branch16() as u32;
}
OpKind::NearBranch32 => {
self.encoder_flags |= self.opsize32_flags;
self.imm_size = ImmSize::RipRelSize1_Target32;
self.immediate = instruction.near_branch32();
}
OpKind::NearBranch64 => {
self.imm_size = ImmSize::RipRelSize1_Target64;
target = instruction.near_branch64();
self.immediate = target as u32;
self.immediate_hi = (target >> 32) as u32;
}
_ => unreachable!(),
},
2 => match op_kind {
OpKind::NearBranch16 => {
self.encoder_flags |= self.opsize16_flags;
self.imm_size = ImmSize::RipRelSize2_Target16;
self.immediate = instruction.near_branch16() as u32;
}
_ => unreachable!(),
},
4 => match op_kind {
OpKind::NearBranch32 => {
self.encoder_flags |= self.opsize32_flags;
self.imm_size = ImmSize::RipRelSize4_Target32;
self.immediate = instruction.near_branch32();
}
OpKind::NearBranch64 => {
self.imm_size = ImmSize::RipRelSize4_Target64;
target = instruction.near_branch64();
self.immediate = target as u32;
self.immediate_hi = (target >> 32) as u32;
}
_ => unreachable!(),
},
_ => unreachable!(),
}
}
pub(super) fn add_branch_x(&mut self, imm_size: u32, instruction: &Instruction, operand: u32) {
if self.bitness == 64 {
if !self.verify_op_kind(operand, OpKind::NearBranch64, instruction.op_kind(operand)) {
return;
}
let target = instruction.near_branch64();
match imm_size {
2 => {
self.encoder_flags |= EncoderFlags::P66;
self.imm_size = ImmSize::RipRelSize2_Target64;
self.immediate = target as u32;
self.immediate_hi = (target >> 32) as u32;
}
4 => {
self.imm_size = ImmSize::RipRelSize4_Target64;
self.immediate = target as u32;
self.immediate_hi = (target >> 32) as u32;
}
_ => unreachable!(),
}
} else {
if !self.verify_op_kind(operand, OpKind::NearBranch32, instruction.op_kind(operand)) {
return;
}
match imm_size {
2 => {
const _: () = assert!(EncoderFlags::P66 == 0x80);
self.encoder_flags |= (self.bitness & 0x20) << 2;
self.imm_size = ImmSize::RipRelSize2_Target32;
self.immediate = instruction.near_branch32();
}
4 => {
const _: () = assert!(EncoderFlags::P66 == 0x80);
self.encoder_flags |= (self.bitness & 0x10) << 3;
self.imm_size = ImmSize::RipRelSize4_Target32;
self.immediate = instruction.near_branch32();
}
_ => unreachable!(),
}
}
}
pub(super) fn add_branch_disp(&mut self, displ_size: u32, instruction: &Instruction, operand: u32) {
debug_assert!(displ_size == 2 || displ_size == 4);
let op_kind;
match displ_size {
2 => {
op_kind = OpKind::NearBranch16;
self.imm_size = ImmSize::Size2;
self.immediate = instruction.near_branch16() as u32;
}
4 => {
op_kind = OpKind::NearBranch32;
self.imm_size = ImmSize::Size4;
self.immediate = instruction.near_branch32();
}
_ => unreachable!(),
}
let _ = self.verify_op_kind(operand, op_kind, instruction.op_kind(operand));
}
pub(super) fn add_far_branch(&mut self, instruction: &Instruction, operand: u32, size: u32) {
if size == 2 {
if !self.verify_op_kind(operand, OpKind::FarBranch16, instruction.op_kind(operand)) {
return;
}
self.imm_size = ImmSize::Size2_2;
self.immediate = instruction.far_branch16() as u32;
self.immediate_hi = instruction.far_branch_selector() as u32;
} else {
debug_assert_eq!(size, 4);
if !self.verify_op_kind(operand, OpKind::FarBranch32, instruction.op_kind(operand)) {
return;
}
self.imm_size = ImmSize::Size4_2;
self.immediate = instruction.far_branch32();
self.immediate_hi = instruction.far_branch_selector() as u32;
}
if self.bitness != size.wrapping_mul(8) {
self.encoder_flags |= EncoderFlags::P66;
}
}
pub(super) fn set_addr_size(&mut self, reg_size: u32) {
debug_assert!(reg_size == 2 || reg_size == 4 || reg_size == 8);
if self.bitness == 64 {
if reg_size == 2 {
self.set_error_message(format!("Invalid register size: {}, must be 32-bit or 64-bit", reg_size.wrapping_mul(8)));
} else if reg_size == 4 {
self.encoder_flags |= EncoderFlags::P67;
}
} else {
if reg_size == 8 {
self.set_error_message(format!("Invalid register size: {}, must be 16-bit or 32-bit", reg_size.wrapping_mul(8)));
} else if self.bitness == 16 {
if reg_size == 4 {
self.encoder_flags |= EncoderFlags::P67;
}
} else {
debug_assert_eq!(self.bitness, 32);
if reg_size == 2 {
self.encoder_flags |= EncoderFlags::P67;
}
}
}
}
pub(super) fn add_abs_mem(&mut self, instruction: &Instruction, operand: u32) {
self.encoder_flags |= EncoderFlags::DISPL;
let op_kind = instruction.op_kind(operand);
if op_kind == OpKind::Memory {
if instruction.memory_base() != Register::None || instruction.memory_index() != Register::None {
self.set_error_message(format!("Operand {}: Absolute addresses can't have base and/or index regs", operand));
return;
}
if instruction.memory_index_scale() != 1 {
self.set_error_message(format!("Operand {}: Absolute addresses must have scale == *1", operand));
return;
}
match instruction.memory_displ_size() {
2 => {
if self.bitness == 64 {
self.set_error_message(format!("Operand {}: 16-bit abs addresses can't be used in 64-bit mode", operand));
return;
}
if self.bitness == 32 {
self.encoder_flags |= EncoderFlags::P67;
}
self.displ_size = DisplSize::Size2;
if instruction.memory_displacement64() > u16::MAX as u64 {
self.set_error_message(format!("Operand {}: Displacement must fit in a u16", operand));
return;
}
self.displ = instruction.memory_displacement32();
}
4 => {
self.encoder_flags |= self.adrsize32_flags;
self.displ_size = DisplSize::Size4;
if instruction.memory_displacement64() > u32::MAX as u64 {
self.set_error_message(format!("Operand {}: Displacement must fit in a u32", operand));
return;
}
self.displ = instruction.memory_displacement32();
}
8 => {
if self.bitness != 64 {
self.set_error_message(format!("Operand {}: 64-bit abs address is only available in 64-bit mode", operand));
return;
}
self.displ_size = DisplSize::Size8;
let addr = instruction.memory_displacement64();
self.displ = addr as u32;
self.displ_hi = (addr >> 32) as u32;
}
_ => self.set_error_message(format!(
"Operand {}: Instruction::memory_displ_size() must be initialized to 2 (16-bit), 4 (32-bit) or 8 (64-bit)",
operand
)),
}
} else {
if cfg!(debug_assertions) {
self.set_error_message(format!("Operand {}: Expected OpKind::Memory, actual: {:?}", operand, op_kind));
} else {
self.set_error_message(format!("Operand {}: Expected OpKind::Memory, actual: OpKind value {}", operand, op_kind as u32));
}
}
}
#[allow(clippy::too_many_arguments)]
pub(super) fn add_mod_rm_register(&mut self, instruction: &Instruction, operand: u32, reg_lo: Register, reg_hi: Register) {
if !self.verify_op_kind(operand, OpKind::Register, instruction.op_kind(operand)) {
return;
}
let reg = instruction.op_register(operand);
if !self.verify_register_range(operand, reg, reg_lo, reg_hi) {
return;
}
let mut reg_num = (reg as u32).wrapping_sub(reg_lo as u32);
if reg_lo == Register::AL {
if reg >= Register::SPL {
reg_num -= 4;
self.encoder_flags |= EncoderFlags::REX;
} else if reg >= Register::AH {
self.encoder_flags |= EncoderFlags::HIGH_LEGACY_8_BIT_REGS;
}
}
debug_assert!(reg_num <= 31);
self.mod_rm |= ((reg_num & 7) << 3) as u8;
self.encoder_flags |= EncoderFlags::MOD_RM;
const _: () = assert!(EncoderFlags::R == 4);
self.encoder_flags |= (reg_num & 8) >> 1;
const _: () = assert!(EncoderFlags::R2 == 0x200);
self.encoder_flags |= (reg_num & 0x10) << (9 - 4);
}
pub(super) fn add_reg(&mut self, instruction: &Instruction, operand: u32, reg_lo: Register, reg_hi: Register) {
if !self.verify_op_kind(operand, OpKind::Register, instruction.op_kind(operand)) {
return;
}
let reg = instruction.op_register(operand);
if !self.verify_register_range(operand, reg, reg_lo, reg_hi) {
return;
}
let mut reg_num = (reg as u32).wrapping_sub(reg_lo as u32);
if reg_lo == Register::AL {
if reg >= Register::SPL {
reg_num -= 4;
self.encoder_flags |= EncoderFlags::REX;
} else if reg >= Register::AH {
self.encoder_flags |= EncoderFlags::HIGH_LEGACY_8_BIT_REGS;
}
}
debug_assert!(reg_num <= 15);
self.op_code |= reg_num & 7;
const _: () = assert!(EncoderFlags::B == 1);
debug_assert!(reg_num <= 15);
self.encoder_flags |= reg_num >> 3; }
#[inline]
pub(super) fn add_reg_or_mem(
&mut self, instruction: &Instruction, operand: u32, reg_lo: Register, reg_hi: Register, allow_mem_op: bool, allow_reg_op: bool,
) {
self.add_reg_or_mem_full(instruction, operand, reg_lo, reg_hi, Register::None, Register::None, allow_mem_op, allow_reg_op);
}
#[allow(clippy::too_many_arguments)]
pub(super) fn add_reg_or_mem_full(
&mut self, instruction: &Instruction, operand: u32, reg_lo: Register, reg_hi: Register, vsib_index_reg_lo: Register,
vsib_index_reg_hi: Register, allow_mem_op: bool, allow_reg_op: bool,
) {
let op_kind = instruction.op_kind(operand);
self.encoder_flags |= EncoderFlags::MOD_RM;
if op_kind == OpKind::Register {
if !allow_reg_op {
self.set_error_message(format!("Operand {}: register operand is not allowed", operand));
return;
}
let reg = instruction.op_register(operand);
if !self.verify_register_range(operand, reg, reg_lo, reg_hi) {
return;
}
let mut reg_num = (reg as u32).wrapping_sub(reg_lo as u32);
if reg_lo == Register::AL {
if reg >= Register::R8L {
reg_num -= 4;
} else if reg >= Register::SPL {
reg_num -= 4;
self.encoder_flags |= EncoderFlags::REX;
} else if reg >= Register::AH {
self.encoder_flags |= EncoderFlags::HIGH_LEGACY_8_BIT_REGS;
}
}
self.mod_rm |= (reg_num & 7) as u8;
self.mod_rm |= 0xC0;
const _: () = assert!(EncoderFlags::B == 1);
const _: () = assert!(EncoderFlags::X == 2);
self.encoder_flags |= (reg_num >> 3) & 3;
debug_assert!(reg_num <= 31);
} else if op_kind == OpKind::Memory {
if !allow_mem_op {
self.set_error_message(format!("Operand {}: memory operand is not allowed", operand));
return;
}
if instruction.memory_size().is_broadcast() {
self.encoder_flags |= EncoderFlags::BROADCAST;
}
let mut code_size = instruction.code_size();
if code_size == CodeSize::Unknown {
code_size = if self.bitness == 64 {
CodeSize::Code64
} else if self.bitness == 32 {
CodeSize::Code32
} else {
debug_assert_eq!(self.bitness, 16);
CodeSize::Code16
};
}
let addr_size = instruction_internal::get_address_size_in_bytes(
instruction.memory_base(),
instruction.memory_index(),
instruction.memory_displ_size(),
code_size,
)
.wrapping_mul(8);
if addr_size != self.bitness {
self.encoder_flags |= EncoderFlags::P67;
}
if (self.encoder_flags & EncoderFlags::REG_IS_MEMORY) != 0 {
let reg_size = Encoder::get_register_op_size(instruction);
if reg_size != addr_size {
self.set_error_message(format!("Operand {}: Register operand size must equal memory addressing mode (16/32/64)", operand));
return;
}
}
if addr_size == 16 {
if vsib_index_reg_lo != Register::None {
self.set_error_message(format!(
"Operand {}: VSIB operands can't use 16-bit addressing. It must be 32-bit or 64-bit addressing",
operand
));
return;
}
self.add_mem_op16(instruction, operand);
} else {
self.add_mem_op(instruction, operand, addr_size, vsib_index_reg_lo, vsib_index_reg_hi);
}
} else {
if cfg!(debug_assertions) {
self.set_error_message(format!("Operand {}: Expected a register or memory operand, but op_kind is {:?}", operand, op_kind));
} else {
self.set_error_message(format!("Operand {}: Expected a register or memory operand, but op_kind is {}", operand, op_kind as u32));
}
}
}
#[must_use]
fn get_register_op_size(instruction: &Instruction) -> u32 {
debug_assert_eq!(instruction.op0_kind(), OpKind::Register);
if instruction.op0_kind() == OpKind::Register {
let reg = instruction.op0_register();
if reg.is_gpr64() {
64
} else if reg.is_gpr32() {
32
} else if reg.is_gpr16() {
16
} else {
0
}
} else {
0
}
}
#[must_use]
fn try_convert_to_disp8n(&mut self, instruction: &Instruction, displ: i32) -> Option<i8> {
if let Some(try_convert_to_disp8n) = self.handler.try_convert_to_disp8n {
(try_convert_to_disp8n)(self.handler, self, instruction, displ)
} else if i8::MIN as i32 <= displ && displ <= i8::MAX as i32 {
Some(displ as i8)
} else {
None
}
}
#[allow(clippy::needless_return)]
fn add_mem_op16(&mut self, instruction: &Instruction, operand: u32) {
if self.bitness == 64 {
self.set_error_message(format!("Operand {}: 16-bit addressing can't be used by 64-bit code", operand));
return;
}
let base = instruction.memory_base();
let index = instruction.memory_index();
let mut displ_size = instruction.memory_displ_size();
if base == Register::BX && index == Register::SI {
} else if base == Register::BX && index == Register::DI {
self.mod_rm |= 1;
} else if base == Register::BP && index == Register::SI {
self.mod_rm |= 2;
} else if base == Register::BP && index == Register::DI {
self.mod_rm |= 3;
} else if base == Register::SI && index == Register::None {
self.mod_rm |= 4;
} else if base == Register::DI && index == Register::None {
self.mod_rm |= 5;
} else if base == Register::BP && index == Register::None {
self.mod_rm |= 6;
} else if base == Register::BX && index == Register::None {
self.mod_rm |= 7;
} else if base == Register::None && index == Register::None {
self.mod_rm |= 6;
self.displ_size = DisplSize::Size2;
if instruction.memory_displacement64() > u16::MAX as u64 {
self.set_error_message(format!("Operand {}: Displacement must fit in a u16", operand));
return;
}
self.displ = instruction.memory_displacement32();
} else {
if cfg!(debug_assertions) {
self.set_error_message(format!("Operand {}: Invalid 16-bit base + index registers: base={:?}, index={:?}", operand, base, index));
} else {
self.set_error_message(format!(
"Operand {}: Invalid 16-bit base + index registers: base={}, index={}",
operand, base as u32, index as u32
));
}
return;
}
if base != Register::None || index != Register::None {
if (instruction.memory_displacement64() as i64) < i16::MIN as i64 || (instruction.memory_displacement64() as i64) > u16::MAX as i64 {
self.set_error_message(format!("Operand {}: Displacement must fit in an i16 or a u16", operand));
return;
}
self.displ = instruction.memory_displacement32();
if displ_size == 0 && base == Register::BP && index == Register::None {
displ_size = 1;
if self.displ != 0 {
self.set_error_message(format!("Operand {}: Displacement must be 0 if displ_size == 0", operand));
return;
}
}
if displ_size == 1 {
if let Some(compressed_value) = self.try_convert_to_disp8n(instruction, self.displ as i16 as i32) {
self.displ = compressed_value as u32;
} else {
displ_size = 2;
}
}
if displ_size == 0 {
if self.displ != 0 {
self.set_error_message(format!("Operand {}: Displacement must be 0 if displ_size == 0", operand));
return;
}
} else if displ_size == 1 {
if (self.displ as i32) < i8::MIN as i32 || (self.displ as i32) > i8::MAX as i32 {
self.set_error_message(format!("Operand {}: Displacement must fit in an i8", operand));
return;
}
self.mod_rm |= 0x40;
self.displ_size = DisplSize::Size1;
} else if displ_size == 2 {
self.mod_rm |= 0x80;
self.displ_size = DisplSize::Size2;
} else {
self.set_error_message(format!("Operand {}: Invalid displacement size: {}, must be 0, 1, or 2", operand, displ_size));
}
}
}
fn add_mem_op(&mut self, instruction: &Instruction, operand: u32, addr_size: u32, vsib_index_reg_lo: Register, vsib_index_reg_hi: Register) {
debug_assert!(addr_size == 32 || addr_size == 64);
if self.bitness != 64 && addr_size == 64 {
self.set_error_message(format!("Operand {}: 64-bit addressing can only be used in 64-bit mode", operand));
return;
}
let base = instruction.memory_base();
let index = instruction.memory_index();
let mut displ_size = instruction.memory_displ_size();
let base_lo;
let base_hi;
let index_lo;
let index_hi;
if addr_size == 64 {
base_lo = Register::RAX;
base_hi = Register::R15;
} else {
debug_assert_eq!(addr_size, 32);
base_lo = Register::EAX;
base_hi = Register::R15D;
}
if vsib_index_reg_lo != Register::None {
index_lo = vsib_index_reg_lo;
index_hi = vsib_index_reg_hi;
} else {
index_lo = base_lo;
index_hi = base_hi;
}
if base != Register::None && base != Register::RIP && base != Register::EIP && !self.verify_register_range(operand, base, base_lo, base_hi) {
return;
}
if index != Register::None && !self.verify_register_range(operand, index, index_lo, index_hi) {
return;
}
if displ_size != 0 && displ_size != 1 && displ_size != 4 && displ_size != 8 {
self.set_error_message(format!("Operand {}: Invalid displ size: {}, must be 0, 1, 4, 8", operand, displ_size));
return;
}
if base == Register::RIP || base == Register::EIP {
if index != Register::None {
self.set_error_message(format!("Operand {}: RIP relative addressing can't use an index register", operand));
return;
}
if instruction_internal::internal_get_memory_index_scale(instruction) != 0 {
self.set_error_message(format!("Operand {}: RIP relative addressing must use scale *1", operand));
return;
}
if self.bitness != 64 {
self.set_error_message(format!("Operand {}: RIP/EIP relative addressing is only available in 64-bit mode", operand));
return;
}
if (self.encoder_flags & EncoderFlags::MUST_USE_SIB) != 0 {
self.set_error_message(format!("Operand {}: RIP/EIP relative addressing isn't supported", operand));
return;
}
self.mod_rm |= 5;
let target = instruction.memory_displacement64();
if base == Register::RIP {
self.displ_size = DisplSize::RipRelSize4_Target64;
self.displ = target as u32;
self.displ_hi = (target >> 32) as u32;
} else {
self.displ_size = DisplSize::RipRelSize4_Target32;
if target > u32::MAX as u64 {
self.set_error_message(format!("Operand {}: Target address doesn't fit in 32 bits: 0x{:X}", operand, target));
return;
}
self.displ = target as u32;
}
return;
}
let scale = instruction_internal::internal_get_memory_index_scale(instruction);
self.displ = instruction.memory_displacement32();
if addr_size == 64 {
if (instruction.memory_displacement64() as i64) < i32::MIN as i64 || (instruction.memory_displacement64() as i64) > i32::MAX as i64 {
self.set_error_message(format!("Operand {}: Displacement must fit in an i32", operand));
return;
}
} else {
debug_assert_eq!(addr_size, 32);
if (instruction.memory_displacement64() as i64) < i32::MIN as i64 || (instruction.memory_displacement64() as i64) > u32::MAX as i64 {
self.set_error_message(format!("Operand {}: Displacement must fit in an i32 or a u32", operand));
return;
}
}
if base == Register::None && index == Register::None {
if vsib_index_reg_lo != Register::None {
self.set_error_message(format!("Operand {}: VSIB addressing can't use an offset-only address", operand));
return;
}
if self.bitness == 64 || scale != 0 || (self.encoder_flags & EncoderFlags::MUST_USE_SIB) != 0 {
self.mod_rm |= 4;
self.displ_size = DisplSize::Size4;
self.encoder_flags |= EncoderFlags::SIB;
self.sib = (0x25 | (scale << 6)) as u8;
return;
} else {
self.mod_rm |= 5;
self.displ_size = DisplSize::Size4;
return;
}
}
let base_num = if base == Register::None { -1 } else { (base as i32).wrapping_sub(base_lo as i32) };
let index_num = if index == Register::None { -1 } else { (index as i32).wrapping_sub(index_lo as i32) };
if displ_size == 0 && (base_num & 7) == 5 {
displ_size = 1;
if self.displ != 0 {
self.set_error_message(format!("Operand {}: Displacement must be 0 if displ_size == 0", operand));
return;
}
}
if displ_size == 1 {
if let Some(compressed_value) = self.try_convert_to_disp8n(instruction, self.displ as i32) {
self.displ = compressed_value as u32;
} else {
displ_size = addr_size / 8;
}
}
if base == Register::None {
debug_assert!(index != Register::None);
self.displ_size = DisplSize::Size4;
} else if displ_size == 1 {
if (self.displ as i32) < i8::MIN as i32 || (self.displ as i32) > i8::MAX as i32 {
self.set_error_message(format!("Operand {}: Displacement must fit in an i8", operand));
return;
}
self.mod_rm |= 0x40;
self.displ_size = DisplSize::Size1;
} else if displ_size == addr_size / 8 {
self.mod_rm |= 0x80;
self.displ_size = DisplSize::Size4;
} else if displ_size == 0 {
if self.displ != 0 {
self.set_error_message(format!("Operand {}: Displacement must be 0 if displ_size == 0", operand));
return;
}
} else {
self.set_error_message_str("Invalid memory_displ_size() value");
}
if index == Register::None && (base_num & 7) != 4 && scale == 0 && (self.encoder_flags & EncoderFlags::MUST_USE_SIB) == 0 {
debug_assert!(base != Register::None);
self.mod_rm |= (base_num & 7) as u8;
} else {
self.encoder_flags |= EncoderFlags::SIB;
self.sib = (scale << 6) as u8;
self.mod_rm |= 4;
if index == Register::RSP || index == Register::ESP {
self.set_error_message(format!("Operand {}: ESP/RSP can't be used as an index register", operand));
return;
}
if base_num < 0 {
self.sib |= 5;
} else {
self.sib |= (base_num & 7) as u8;
}
if index_num < 0 {
self.sib |= 0x20;
} else {
self.sib |= ((index_num & 7) << 3) as u8;
}
}
if base_num >= 0 {
const _: () = assert!(EncoderFlags::B == 1);
debug_assert!(base_num <= 15); self.encoder_flags |= (base_num as u32) >> 3;
}
if index_num >= 0 {
const _: () = assert!(EncoderFlags::X == 2);
self.encoder_flags |= ((index_num as u32) >> 2) & 2;
self.encoder_flags |= ((index_num as u32) & 0x10) << EncoderFlags::VVVVV_SHIFT;
debug_assert!(index_num <= 31);
}
}
fn write_prefixes(&mut self, instruction: &Instruction, can_write_f3: bool) {
debug_assert_eq!(self.handler.is_special_instr, false);
let seg = instruction.segment_prefix();
if seg != Register::None {
static SEGMENT_OVERRIDES: [u8; 6] = [0x26, 0x2E, 0x36, 0x3E, 0x64, 0x65];
debug_assert!((seg as usize).wrapping_sub(Register::ES as usize) < SEGMENT_OVERRIDES.len());
const _: () = assert!(Register::ES as u32 + 1 == Register::CS as u32);
const _: () = assert!(Register::ES as u32 + 2 == Register::SS as u32);
const _: () = assert!(Register::ES as u32 + 3 == Register::DS as u32);
const _: () = assert!(Register::ES as u32 + 4 == Register::FS as u32);
const _: () = assert!(Register::ES as u32 + 5 == Register::GS as u32);
self.write_byte_internal(unsafe { *SEGMENT_OVERRIDES.get_unchecked((seg as usize).wrapping_sub(Register::ES as usize)) } as u32);
}
if (self.encoder_flags & EncoderFlags::PF0) != 0 || instruction.has_lock_prefix() {
self.write_byte_internal(0xF0);
}
if (self.encoder_flags & EncoderFlags::P66) != 0 {
self.write_byte_internal(0x66);
}
if (self.encoder_flags & EncoderFlags::P67) != 0 {
self.write_byte_internal(0x67);
}
if can_write_f3 && instruction.has_repe_prefix() {
self.write_byte_internal(0xF3);
}
if instruction.has_repne_prefix() {
self.write_byte_internal(0xF2);
}
}
fn write_mod_rm(&mut self) {
debug_assert_eq!(self.handler.is_special_instr, false);
debug_assert!((self.encoder_flags & (EncoderFlags::MOD_RM | EncoderFlags::DISPL)) != 0);
if (self.encoder_flags & EncoderFlags::MOD_RM) != 0 {
self.write_byte_internal(self.mod_rm as u32);
if (self.encoder_flags & EncoderFlags::SIB) != 0 {
self.write_byte_internal(self.sib as u32);
}
}
let mut diff4;
self.displ_addr = self.current_rip as u32;
match self.displ_size {
DisplSize::None => {}
DisplSize::Size1 => self.write_byte_internal(self.displ),
DisplSize::Size2 => {
diff4 = self.displ;
self.write_byte_internal(diff4);
self.write_byte_internal(diff4 >> 8);
}
DisplSize::Size4 => {
diff4 = self.displ;
self.write_byte_internal(diff4);
self.write_byte_internal(diff4 >> 8);
self.write_byte_internal(diff4 >> 16);
self.write_byte_internal(diff4 >> 24);
}
DisplSize::Size8 => {
diff4 = self.displ;
self.write_byte_internal(diff4);
self.write_byte_internal(diff4 >> 8);
self.write_byte_internal(diff4 >> 16);
self.write_byte_internal(diff4 >> 24);
diff4 = self.displ_hi;
self.write_byte_internal(diff4);
self.write_byte_internal(diff4 >> 8);
self.write_byte_internal(diff4 >> 16);
self.write_byte_internal(diff4 >> 24);
}
DisplSize::RipRelSize4_Target32 => {
let eip = (self.current_rip as u32).wrapping_add(4).wrapping_add(IMM_SIZES[self.imm_size as usize]);
diff4 = self.displ.wrapping_sub(eip);
self.write_byte_internal(diff4);
self.write_byte_internal(diff4 >> 8);
self.write_byte_internal(diff4 >> 16);
self.write_byte_internal(diff4 >> 24);
}
DisplSize::RipRelSize4_Target64 => {
let rip = self.current_rip.wrapping_add(4).wrapping_add(IMM_SIZES[self.imm_size as usize] as u64);
let diff8 = ((((self.displ_hi as u64) << 32) | self.displ as u64).wrapping_sub(rip)) as i64;
if diff8 < i32::MIN as i64 || diff8 > i32::MAX as i64 {
self.set_error_message(format!(
"RIP relative distance is too far away: next_ip: 0x{:016X} target: 0x{:08X}, diff = {}, diff must fit in an i32",
rip,
((self.displ_hi as u64) << 32) | self.displ as u64,
diff8
));
}
diff4 = diff8 as u32;
self.write_byte_internal(diff4);
self.write_byte_internal(diff4 >> 8);
self.write_byte_internal(diff4 >> 16);
self.write_byte_internal(diff4 >> 24);
}
}
}
fn write_immediate(&mut self) {
debug_assert_eq!(self.handler.is_special_instr, false);
let ip;
let eip;
let rip;
let diff2;
let diff4;
let diff8;
let mut value;
self.imm_addr = self.current_rip as u32;
match self.imm_size {
ImmSize::None => {}
ImmSize::Size1 | ImmSize::SizeIbReg | ImmSize::Size1OpCode => {
self.write_byte_internal(self.immediate);
}
ImmSize::Size2 => {
value = self.immediate;
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
}
ImmSize::Size4 => {
value = self.immediate;
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
self.write_byte_internal(value >> 16);
self.write_byte_internal(value >> 24);
}
ImmSize::Size8 => {
value = self.immediate;
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
self.write_byte_internal(value >> 16);
self.write_byte_internal(value >> 24);
value = self.immediate_hi;
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
self.write_byte_internal(value >> 16);
self.write_byte_internal(value >> 24);
}
ImmSize::Size2_1 => {
value = self.immediate;
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
self.write_byte_internal(self.immediate_hi);
}
ImmSize::Size1_1 => {
self.write_byte_internal(self.immediate);
self.write_byte_internal(self.immediate_hi);
}
ImmSize::Size2_2 => {
value = self.immediate;
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
value = self.immediate_hi;
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
}
ImmSize::Size4_2 => {
value = self.immediate;
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
self.write_byte_internal(value >> 16);
self.write_byte_internal(value >> 24);
value = self.immediate_hi;
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
}
ImmSize::RipRelSize1_Target16 => {
ip = (self.current_rip as u32).wrapping_add(1) as u16;
diff2 = (self.immediate as i16).wrapping_sub(ip as i16);
if diff2 < i8::MIN as i16 || diff2 > i8::MAX as i16 {
self.set_error_message(format!(
"Branch distance is too far away: next_ip: 0x{:04X} target: 0x{:04X}, diff = {}, diff must fit in an i8",
ip, self.immediate as u16, diff2
));
}
self.write_byte_internal(diff2 as u32);
}
ImmSize::RipRelSize1_Target32 => {
eip = (self.current_rip as u32).wrapping_add(1);
diff4 = self.immediate.wrapping_sub(eip) as i32;
if diff4 < i8::MIN as i32 || diff4 > i8::MAX as i32 {
self.set_error_message(format!(
"Branch distance is too far away: next_ip: 0x{:08X} target: 0x{:08X}, diff = {}, diff must fit in an i8",
eip, self.immediate, diff4
));
}
self.write_byte_internal(diff4 as u32);
}
ImmSize::RipRelSize1_Target64 => {
rip = self.current_rip.wrapping_add(1);
diff8 = (((self.immediate_hi as u64) << 32) | (self.immediate as u64)).wrapping_sub(rip) as i64;
if diff8 < i8::MIN as i64 || diff8 > i8::MAX as i64 {
self.set_error_message(format!(
"Branch distance is too far away: next_ip: 0x{:016X} target: 0x{:016X}, diff = {}, diff must fit in an i8",
rip,
((self.immediate_hi as u64) << 32) | (self.immediate as u64),
diff8
));
}
self.write_byte_internal(diff8 as u32);
}
ImmSize::RipRelSize2_Target16 => {
eip = (self.current_rip as u32).wrapping_add(2);
value = self.immediate.wrapping_sub(eip);
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
}
ImmSize::RipRelSize2_Target32 => {
eip = (self.current_rip as u32).wrapping_add(2);
diff4 = self.immediate.wrapping_sub(eip) as i32;
if diff4 < i16::MIN as i32 || diff4 > i16::MAX as i32 {
self.set_error_message(format!(
"Branch distance is too far away: next_ip: 0x{:08X} target: 0x{:08X}, diff = {}, diff must fit in an i16",
eip, self.immediate, diff4
));
}
value = diff4 as u32;
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
}
ImmSize::RipRelSize2_Target64 => {
rip = self.current_rip.wrapping_add(2);
diff8 = (((self.immediate_hi as u64) << 32) | (self.immediate as u64)).wrapping_sub(rip) as i64;
if diff8 < i16::MIN as i64 || diff8 > i16::MAX as i64 {
self.set_error_message(format!(
"Branch distance is too far away: next_ip: 0x{:016X} target: 0x{:016X}, diff = {}, diff must fit in an i16",
rip,
((self.immediate_hi as u64) << 32) | (self.immediate as u64),
diff8
));
}
value = diff8 as u32;
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
}
ImmSize::RipRelSize4_Target32 => {
eip = (self.current_rip as u32).wrapping_add(4);
value = self.immediate.wrapping_sub(eip);
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
self.write_byte_internal(value >> 16);
self.write_byte_internal(value >> 24);
}
ImmSize::RipRelSize4_Target64 => {
rip = self.current_rip.wrapping_add(4);
diff8 = (((self.immediate_hi as u64) << 32) | (self.immediate as u64)).wrapping_sub(rip) as i64;
if diff8 < i32::MIN as i64 || diff8 > i32::MAX as i64 {
self.set_error_message(format!(
"Branch distance is too far away: next_ip: 0x{:016X} target: 0x{:016X}, diff = {}, diff must fit in an i32",
rip,
((self.immediate_hi as u64) << 32) | (self.immediate as u64),
diff8
));
}
value = diff8 as u32;
self.write_byte_internal(value);
self.write_byte_internal(value >> 8);
self.write_byte_internal(value >> 16);
self.write_byte_internal(value >> 24);
}
}
}
#[inline]
pub fn write_u8(&mut self, value: u8) {
self.write_byte_internal(value as u32);
}
#[inline]
pub(super) fn write_byte_internal(&mut self, value: u32) {
self.buffer.push(value as u8);
self.current_rip = self.current_rip.wrapping_add(1);
}
#[cfg(all(feature = "encoder", feature = "block_encoder"))]
#[inline]
pub(super) fn position(&self) -> usize {
self.buffer.len()
}
#[must_use]
#[inline]
pub fn take_buffer(&mut self) -> Vec<u8> {
mem::take(&mut self.buffer)
}
#[inline]
pub fn set_buffer(&mut self, buffer: Vec<u8>) {
self.buffer = buffer;
}
#[cfg(all(feature = "encoder", feature = "block_encoder"))]
#[inline]
pub(super) fn clear_buffer(&mut self) {
self.buffer.clear()
}
#[must_use]
#[allow(clippy::missing_inline_in_public_items)]
pub fn get_constant_offsets(&self) -> ConstantOffsets {
let mut co = ConstantOffsets::default();
match self.displ_size {
DisplSize::None => {}
DisplSize::Size1 => {
co.displacement_size = 1;
co.displacement_offset = self.displ_addr.wrapping_sub(self.eip) as u8;
}
DisplSize::Size2 => {
co.displacement_size = 2;
co.displacement_offset = self.displ_addr.wrapping_sub(self.eip) as u8;
}
DisplSize::Size4 | DisplSize::RipRelSize4_Target32 | DisplSize::RipRelSize4_Target64 => {
co.displacement_size = 4;
co.displacement_offset = self.displ_addr.wrapping_sub(self.eip) as u8;
}
DisplSize::Size8 => {
co.displacement_size = 8;
co.displacement_offset = self.displ_addr.wrapping_sub(self.eip) as u8;
}
}
match self.imm_size {
ImmSize::None | ImmSize::SizeIbReg | ImmSize::Size1OpCode => {}
ImmSize::Size1 | ImmSize::RipRelSize1_Target16 | ImmSize::RipRelSize1_Target32 | ImmSize::RipRelSize1_Target64 => {
co.immediate_size = 1;
co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
}
ImmSize::Size1_1 => {
co.immediate_size = 1;
co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
co.immediate_size2 = 1;
co.immediate_offset2 = self.imm_addr.wrapping_sub(self.eip).wrapping_add(1) as u8;
}
ImmSize::Size2 | ImmSize::RipRelSize2_Target16 | ImmSize::RipRelSize2_Target32 | ImmSize::RipRelSize2_Target64 => {
co.immediate_size = 2;
co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
}
ImmSize::Size2_1 => {
co.immediate_size = 2;
co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
co.immediate_size2 = 1;
co.immediate_offset2 = self.imm_addr.wrapping_sub(self.eip).wrapping_add(2) as u8;
}
ImmSize::Size2_2 => {
co.immediate_size = 2;
co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
co.immediate_size2 = 2;
co.immediate_offset2 = self.imm_addr.wrapping_sub(self.eip).wrapping_add(2) as u8;
}
ImmSize::Size4 | ImmSize::RipRelSize4_Target32 | ImmSize::RipRelSize4_Target64 => {
co.immediate_size = 4;
co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
}
ImmSize::Size4_2 => {
co.immediate_size = 4;
co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
co.immediate_size2 = 2;
co.immediate_offset2 = self.imm_addr.wrapping_sub(self.eip).wrapping_add(4) as u8;
}
ImmSize::Size8 => {
co.immediate_size = 8;
co.immediate_offset = self.imm_addr.wrapping_sub(self.eip) as u8;
}
}
co
}
#[must_use]
#[inline]
pub const fn prevent_vex2(&self) -> bool {
self.prevent_vex2 != 0
}
#[inline]
pub fn set_prevent_vex2(&mut self, new_value: bool) {
self.prevent_vex2 = if new_value { u32::MAX } else { 0 };
}
#[must_use]
#[inline]
pub const fn vex_wig(&self) -> u32 {
(self.internal_vex_wig_lig >> 7) & 1
}
#[inline]
pub fn set_vex_wig(&mut self, new_value: u32) {
self.internal_vex_wig_lig = (self.internal_vex_wig_lig & !0x80) | ((new_value & 1) << 7);
}
#[must_use]
#[inline]
pub const fn vex_lig(&self) -> u32 {
(self.internal_vex_wig_lig >> 2) & 1
}
#[inline]
pub fn set_vex_lig(&mut self, new_value: u32) {
self.internal_vex_wig_lig = (self.internal_vex_wig_lig & !4) | ((new_value & 1) << 2);
self.internal_vex_lig = (new_value & 1) << 2;
}
#[must_use]
#[inline]
pub const fn evex_wig(&self) -> u32 {
self.internal_evex_wig >> 7
}
#[inline]
pub fn set_evex_wig(&mut self, new_value: u32) {
self.internal_evex_wig = (new_value & 1) << 7;
}
#[must_use]
#[inline]
pub const fn evex_lig(&self) -> u32 {
self.internal_evex_lig >> 5
}
#[inline]
pub fn set_evex_lig(&mut self, new_value: u32) {
self.internal_evex_lig = (new_value & 3) << 5
}
#[must_use]
#[inline]
#[cfg(feature = "mvex")]
pub const fn mvex_wig(&self) -> u32 {
self.internal_mvex_wig >> 7
}
#[inline]
#[cfg(feature = "mvex")]
pub fn set_mvex_wig(&mut self, new_value: u32) {
self.internal_mvex_wig = (new_value & 1) << 7;
}
#[must_use]
#[inline]
pub const fn bitness(&self) -> u32 {
self.bitness
}
}