pub struct X86Assembler {
pub formatter: X86InstructionFormatter,
pub index_of_last_watchpoint: usize,
pub index_of_tail_of_last_watchpoint: usize,
}Fields§
§formatter: X86InstructionFormatter§index_of_last_watchpoint: usize§index_of_tail_of_last_watchpoint: usizeImplementations§
Source§impl X86Assembler
impl X86Assembler
pub fn code_size(&self) -> usize
pub fn buffer(&self) -> &AssemblerBuffer
pub fn buffer_mut(&mut self) -> &mut AssemblerBuffer
pub fn new() -> Self
pub const fn fpr_name(reg: u8) -> &'static str
pub const fn gpr_name(reg: u8) -> &'static str
pub const fn spr_name(reg: u8) -> &'static str
pub const fn first_fp_register() -> u8
pub const fn last_fp_register() -> u8
pub const fn number_of_fp_registers() -> usize
pub const fn first_sp_register() -> u8
pub const fn last_sp_register() -> u8
pub const fn number_of_sp_registers() -> usize
pub const fn first_register() -> u8
pub const fn last_register() -> u8
pub const fn number_of_registers() -> usize
pub fn push_r(&mut self, reg: u8)
pub fn pop_r(&mut self, reg: u8)
pub fn push_i32(&mut self, imm: i32)
pub fn push_m(&mut self, offset: i32, base: u8)
pub fn pop_m(&mut self, offset: i32, base: u8)
pub fn addl_rr(&mut self, src: u8, dst: u8)
pub fn addl_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn addl_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn addl_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn addl_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn addb_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn addb_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn addw_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn addw_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn addl_ir(&mut self, imm: i32, dst: u8)
pub fn addl_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn addl_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn addb_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn addb_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn addw_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn addw_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn addq_rr(&mut self, src: u8, dst: u8)
pub fn addq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn addq_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn addq_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn addq_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn addq_ir(&mut self, imm: i32, dst: u8)
pub fn addq_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn addq_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn andl_rr(&mut self, src: u8, dst: u8)
pub fn andl_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn andl_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn andw_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn andw_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn andl_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn andl_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn andw_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn andw_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn andb_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn andb_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn andl_ir(&mut self, imm: i32, dst: u8)
pub fn andl_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn andl_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn andw_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn andb_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn andb_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn andq_rr(&mut self, src: u8, dst: u8)
pub fn andq_ir(&mut self, imm: i32, dst: u8)
pub fn andq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn andq_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn andq_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn andq_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn andq_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn andq_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn dec_r(&mut self, dst: u8)
pub fn decq_r(&mut self, dst: u8)
pub fn illegal_instruction(&mut self)
pub fn inc_r(&mut self, dst: u8)
pub fn incq_r(&mut self, dst: u8)
pub fn incq_m(&mut self, offset: i32, base: u8)
pub fn incq_m_scaled(&mut self, offset: i32, base: u8, index: u8, scale: u8)
pub fn negl_r(&mut self, dst: u8)
pub fn negq_r(&mut self, dst: u8)
pub fn negq_m(&mut self, offset: i32, base: u8)
pub fn negq_m_scaled(&mut self, offset: i32, base: u8, index: u8, scale: u8)
pub fn negl_m(&mut self, offset: i32, base: u8)
pub fn negl_m_scaled(&mut self, offset: i32, base: u8, index: u8, scale: u8)
pub fn negw_m(&mut self, offset: i32, base: u8)
pub fn negw_m_scaled(&mut self, offset: i32, base: u8, index: u8, scale: u8)
pub fn negb_m(&mut self, offset: i32, base: u8)
pub fn negb_m_scaled(&mut self, offset: i32, base: u8, index: u8, scale: u8)
pub fn notl_r(&mut self, dst: u8)
pub fn notl_m(&mut self, offset: i32, base: u8)
pub fn notl_m_scaled(&mut self, offset: i32, base: u8, index: u8, scale: u8)
pub fn notw_m(&mut self, offset: i32, base: u8)
pub fn notw_m_scaled(&mut self, offset: i32, base: u8, index: u8, scale: u8)
pub fn notb_m(&mut self, offset: i32, base: u8)
pub fn notb_m_scaled(&mut self, offset: i32, base: u8, index: u8, scale: u8)
pub fn notq_r(&mut self, dst: u8)
pub fn notq_m(&mut self, offset: i32, base: u8)
pub fn notq_m_scaled(&mut self, offset: i32, base: u8, index: u8, scale: u8)
pub fn orl_rr(&mut self, src: u8, dst: u8)
pub fn orl_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn orl_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn orl_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn orl_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn orw_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn orw_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn orb_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn orb_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn orl_ir(&mut self, imm: i32, dst: u8)
pub fn orl_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn orl_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn orw_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn orw_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn orb_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn orb_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn orq_rr(&mut self, src: u8, dst: u8)
pub fn orq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn orq_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn orq_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn orq_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn orq_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn orq_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn orq_ir(&mut self, imm: i32, dst: u8)
pub fn subl_rr(&mut self, src: u8, dst: u8)
pub fn subl_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn subl_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn subl_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn subl_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn subw_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn subw_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn subb_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn subb_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn subl_ir(&mut self, imm: i32, dst: u8)
pub fn subl_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn subl_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn subw_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn subw_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn subb_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn subb_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn subq_rr(&mut self, src: u8, dst: u8)
pub fn subq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn subq_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn subq_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn subq_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn subq_ir(&mut self, imm: i32, dst: u8)
pub fn subq_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn subq_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xorl_rr(&mut self, src: u8, dst: u8)
pub fn xorl_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn xorl_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn xorl_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn xorl_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xorl_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn xorl_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xorw_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn xorw_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xorw_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn xorb_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn xorb_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xorb_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn xorb_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xorl_ir(&mut self, imm: i32, dst: u8)
pub fn xorq_rr(&mut self, src: u8, dst: u8)
pub fn xorq_ir(&mut self, imm: i32, dst: u8)
pub fn xorq_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn xorq_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn xorq_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xorq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn xorq_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn lzcnt_rr(&mut self, src: u8, dst: u8)
pub fn lzcnt_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn lzcntq_rr(&mut self, src: u8, dst: u8)
pub fn lzcntq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn bsr_rr(&mut self, src: u8, dst: u8)
pub fn bsr_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn bsrq_rr(&mut self, src: u8, dst: u8)
pub fn bsrq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn bswapl_r(&mut self, dst: u8)
pub fn bswapq_r(&mut self, dst: u8)
pub fn tzcnt_rr(&mut self, src: u8, dst: u8)
pub fn tzcntq_rr(&mut self, src: u8, dst: u8)
pub fn bsf_rr(&mut self, src: u8, dst: u8)
pub fn bsfq_rr(&mut self, src: u8, dst: u8)
pub fn btrq_rr(&mut self, src: u8, dst: u8)
pub fn popcnt_rr(&mut self, src: u8, dst: u8)
pub fn popcnt_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn popcntq_rr(&mut self, src: u8, dst: u8)
pub fn popcntq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn sarl_i8r(&mut self, imm: i32, dst: u8)
pub fn sarl_clr(&mut self, dst: u8)
pub fn shrl_i8r(&mut self, imm: i32, dst: u8)
pub fn shrl_clr(&mut self, dst: u8)
pub fn shll_i8r(&mut self, imm: i32, dst: u8)
pub fn shll_clr(&mut self, dst: u8)
pub fn rorl_i8r(&mut self, imm: i32, dst: u8)
pub fn rorl_clr(&mut self, dst: u8)
pub fn roll_i8r(&mut self, imm: i32, dst: u8)
pub fn roll_clr(&mut self, dst: u8)
pub fn rolw_i8r(&mut self, imm: i32, dst: u8)
pub fn sarq_i8r(&mut self, imm: i32, dst: u8)
pub fn sarq_clr(&mut self, dst: u8)
pub fn shrq_i8r(&mut self, imm: i32, dst: u8)
pub fn shrq_clr(&mut self, dst: u8)
pub fn shlq_i8r(&mut self, imm: i32, dst: u8)
pub fn shlq_clr(&mut self, dst: u8)
pub fn rorq_i8r(&mut self, imm: i32, dst: u8)
pub fn rorq_clr(&mut self, dst: u8)
pub fn rolq_i8r(&mut self, imm: i32, dst: u8)
pub fn rolq_clr(&mut self, dst: u8)
pub fn imull_rr(&mut self, src: u8, dst: u8)
pub fn imulq_rr(&mut self, src: u8, dst: u8)
pub fn imull_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn imull_i32r(&mut self, src: u8, imm: i32, dst: u8)
pub fn divl_r(&mut self, dst: u8)
pub fn idivl_r(&mut self, dst: u8)
pub fn idivq_r(&mut self, dst: u8)
pub fn divq_r(&mut self, dst: u8)
pub fn cmpl_rr(&mut self, src: u8, dst: u8)
pub fn cmpl_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn cmpl_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn cmpl_ir(&mut self, imm: i32, dst: u8)
pub fn cmpl_ir_force32(&mut self, imm: i32, dst: u8)
pub fn cmpl_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn cmpb_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn cmpb_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn cmpl_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn cmpl_im_force32(&mut self, imm: i32, offset: i32, base: u8)
pub fn cmpq_rr(&mut self, src: u8, dst: u8)
pub fn cmpq_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn cmpq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn cmpq_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn cmpq_ir(&mut self, imm: i32, dst: u8)
pub fn cmpq_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn cmpq_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn cmpw_ir(&mut self, imm: i32, dst: u8)
pub fn cmpw_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn cmpw_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn cmpw_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn testl_rr(&mut self, src: u8, dst: u8)
pub fn testl_i32r(&mut self, imm: i32, dst: u8)
pub fn testl_i32m(&mut self, imm: i32, offset: i32, base: u8)
pub fn testb_rr(&mut self, src: u8, dst: u8)
pub fn testb_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn testb_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn testl_i32m_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn testq_rr(&mut self, src: u8, dst: u8)
pub fn testq_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn testq_i32r(&mut self, imm: i32, dst: u8)
pub fn testq_i32m(&mut self, imm: i32, offset: i32, base: u8)
pub fn testq_i32m_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn testw_rr(&mut self, src: u8, dst: u8)
pub fn testw_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn testw_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn testb_i8r(&mut self, imm: i32, dst: u8)
pub fn bt_ir(&mut self, bit_offset: i32, test_value: u8)
pub fn bt_im(&mut self, bit_offset: i32, offset: i32, base: u8)
pub fn bt_rr(&mut self, bit_offset: u8, test_value: u8)
pub fn bit_rm(&mut self, bit_offset: u8, offset: i32, base: u8)
pub fn btw_ir(&mut self, bit_offset: i32, test_value: u8)
pub fn btw_im(&mut self, bit_offset: i32, offset: i32, base: u8)
pub fn btw_rr(&mut self, bit_offset: u8, test_value: u8)
pub fn btw_rm(&mut self, bit_offset: u8, offset: i32, base: u8)
pub fn setcc_r(&mut self, cc: Condition, dst: u8)
pub fn sete_r(&mut self, dst: u8)
pub fn setz_r(&mut self, dst: u8)
pub fn setne_r(&mut self, dst: u8)
pub fn setnz_r(&mut self, dst: u8)
pub fn setnp_r(&mut self, dst: u8)
pub fn setp_r(&mut self, dst: u8)
pub fn cdq(&mut self)
pub fn cqo(&mut self)
pub fn fstps(&mut self, offset: i32, base: u8)
pub fn fstpl(&mut self, offset: i32, base: u8)
pub fn xchgl_rr(&mut self, src: u8, dst: u8)
pub fn xchgb_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn xchgb_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xchgw_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn xchgw_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xchgl_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn xchgl_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xchgq_rr(&mut self, src: u8, dst: u8)
pub fn xchgq_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn xchgq_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn pinsrb_i8r(&mut self, lane_index: u8, rn: u8, vd: u8)
pub fn pinsrw_i8r(&mut self, lane_index: u8, rn: u8, vd: u8)
pub fn pinsrd_i8rr(&mut self, lane_index: u8, rn: u8, vd: u8)
pub fn pinsrq_i8rr(&mut self, lane_index: u8, rn: u8, vd: u8)
pub fn insertps_i8rr(&mut self, lane_index: u8, rn: u8, vd: u8)
pub fn unpcklpd_rr(&mut self, rn: u8, vd: u8)
pub fn pextrb_i8rr(&mut self, lane_index: u8, rn: u8, vd: u8)
pub fn pextrw_i8rr(&mut self, lane_index: u8, rn: u8, vd: u8)
pub fn pextrd_i8rr(&mut self, lane_index: u8, rn: u8, vd: u8)
pub fn pextrq_i8rr(&mut self, lane_index: u8, rn: u8, vd: u8)
pub fn pshufd_i8rr(&mut self, lane_index: u8, rn: u8, vd: u8)
pub fn pshufb_rr(&mut self, rn: u8, vd: u8)
pub fn pshuflw_i8rr(&mut self, control_bits: u8, rn: u8, vd: u8)
pub fn pshufhw_rr(&mut self, control_bits: u8, rn: u8, vd: u8)
pub fn punpcklqdq_rr(&mut self, rn: u8, vd: u8)
pub fn shufps_i8rr(&mut self, lane_index: u8, rn: u8, vd: u8)
pub fn shufpd_i8rr(&mut self, lane_index: u8, rn: u8, vd: u8)
pub fn paddsb_rr(&mut self, rn: u8, vd: u8)
pub fn paddusb_rr(&mut self, rn: u8, vd: u8)
pub fn paddusw_rr(&mut self, rn: u8, vd: u8)
pub fn psubsb_rr(&mut self, rn: u8, vd: u8)
pub fn psubusb_rr(&mut self, rn: u8, vd: u8)
pub fn psubusw_rr(&mut self, rn: u8, vd: u8)
pub fn psubsw_rr(&mut self, rn: u8, vd: u8)
pub fn pmaxsb_rr(&mut self, rn: u8, vd: u8)
pub fn pmaxsw_rr(&mut self, rn: u8, vd: u8)
pub fn pmaxsd_rr(&mut self, rn: u8, vd: u8)
pub fn pmaxub_rr(&mut self, rn: u8, vd: u8)
pub fn pmaxuw_rr(&mut self, rn: u8, vd: u8)
pub fn pmaxud_rr(&mut self, rn: u8, vd: u8)
pub fn pminsb_rr(&mut self, rn: u8, vd: u8)
pub fn pminsw_rr(&mut self, rn: u8, vd: u8)
pub fn pminsd_rr(&mut self, rn: u8, vd: u8)
pub fn pminub_rr(&mut self, rn: u8, vd: u8)
pub fn pminuw_rr(&mut self, rn: u8, vd: u8)
pub fn pminud_rr(&mut self, rn: u8, vd: u8)
pub fn pavgb_rr(&mut self, rn: u8, vd: u8)
pub fn pavgw_rr(&mut self, rn: u8, vd: u8)
pub fn pabsb_rr(&mut self, rn: u8, vd: u8)
pub fn pabsw_rr(&mut self, rn: u8, vd: u8)
pub fn pabsd_rr(&mut self, rn: u8, vd: u8)
pub fn pxor_rr(&mut self, rn: u8, vd: u8)
pub fn pblendw_i8rr(&mut self, imm8: u8, rn: u8, vd: u8)
pub fn addps_rr(&mut self, rn: u8, vd: u8)
pub fn psubd_rr(&mut self, rn: u8, vd: u8)
pub fn cvtdq2ps_rr(&mut self, vn: u8, vd: u8)
pub fn cvtdq2pd_rr(&mut self, vn: u8, vd: u8)
pub fn packsswb_rr(&mut self, xmm_2: u8, xmm_1: u8)
pub fn packuswb_rr(&mut self, upper: u8, dest: u8)
pub fn packssdw_r(&mut self, xmm_2: u8, xmm_1: u8)
pub fn packusdw_rr(&mut self, xmm_2: u8, xmm_1: u8)
pub fn pmovsxbw(&mut self, xmm_2: u8, xmm_1: u8)
pub fn pmovzxbw(&mut self, xmm_2: u8, xmm_1: u8)
pub fn pmovzxwd(&mut self, xmm_2: u8, xmm_1: u8)
pub fn pmovsxwd(&mut self, xmm_2: u8, xmm_1: u8)
pub fn pmovsxdq(&mut self, xmm_2: u8, xmm_1: u8)
pub fn pmovzxdq(&mut self, xmm_2: u8, xmm_1: u8)
pub fn movl_rr(&mut self, src: u8, dst: u8)
pub fn movl_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn movl_rm_disp32(&mut self, src: u8, offset: i32, base: u8)
pub fn movl_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn movl_meax(&mut self, addr: *const u8)
pub fn movl_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn movl_mr_addr(&mut self, offset: u32, dst: u8)
pub fn movl_rm_addr(&mut self, src: u8, offset: u32)
pub fn movl_mr_disp32(&mut self, offset: i32, base: u8, dst: u8)
pub fn movl_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn movl_mr_disp8(&mut self, offset: i32, base: u8, dst: u8)
pub fn movl_i32r(&mut self, imm: i32, dst: u8)
pub fn movl_i32m(&mut self, imm: i32, offset: i32, base: u8)
pub fn movl_i32m_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn movb_i8m(&mut self, imm: i32, offset: i32, base: u8)
pub fn movb_i8m_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn movb_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn movb_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn movw_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn movw_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn movw_im(&mut self, imm: i32, offset: i32, base: u8)
pub fn movw_im_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn movl_eaxm(&mut self, addr: *const u8)
pub fn movq_rr(&mut self, src: u8, dst: u8)
pub fn movq_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn movq_rm_disp32(&mut self, src: u8, offset: i32, base: u8)
pub fn movq_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn movq_rm_addr(&mut self, src: u8, offset: i32)
pub fn movq_meax(&mut self, addr: *const u8)
pub fn movq_eaxm(&mut self, addr: *const u8)
pub fn movq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn movq_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn movq_mr_addr(&mut self, offset: i32, dst: u8)
pub fn movq_mr_disp32(&mut self, offset: i32, base: u8, dst: u8)
pub fn movq_mr_disp8(&mut self, offset: i32, base: u8, dst: u8)
pub fn movq_i32m(&mut self, imm: i32, offset: i32, base: u8)
pub fn movq_i32m_scaled( &mut self, imm: i32, offset: i32, base: u8, index: u8, scale: u8, )
pub fn movq_i64r(&mut self, imm: i64, dst: u8)
pub fn mov_i32r(&mut self, imm: i32, dst: u8)
pub fn movsxd_rr(&mut self, src: u8, dst: u8)
pub fn movzwl_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn movzwl_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn movswl_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn movswl_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn movzbl_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn movzbl_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn movsbl_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn movsbl_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn movzbl_rr(&mut self, src: u8, dst: u8)
pub fn movsbl_rr(&mut self, src: u8, dst: u8)
pub fn movsbq_rr(&mut self, src: u8, dst: u8)
pub fn movzwl_rr(&mut self, src: u8, dst: u8)
pub fn movswl_rr(&mut self, src: u8, dst: u8)
pub fn movswq_rr(&mut self, src: u8, dst: u8)
pub fn cmovl_rr(&mut self, cond: Condition, src: u8, dst: u8)
pub fn cmovl_mr(&mut self, cond: Condition, offset: i32, base: u8, dst: u8)
pub fn cmovl_mr_scaled( &mut self, cond: Condition, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn cmovel_rr(&mut self, src: u8, dst: u8)
pub fn cmovnel_rr(&mut self, src: u8, dst: u8)
pub fn cmovpl_rr(&mut self, src: u8, dst: u8)
pub fn cmovnpl_rr(&mut self, src: u8, dst: u8)
pub fn cmovq_rr(&mut self, cond: Condition, src: u8, dst: u8)
pub fn cmovq_mr(&mut self, cond: Condition, offset: i32, base: u8, dst: u8)
pub fn cmovq_mr_scaled( &mut self, cond: Condition, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn cmoveq_rr(&mut self, src: u8, dst: u8)
pub fn cmoveq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn cmovneq_rr(&mut self, src: u8, dst: u8)
pub fn cmovneq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn cmovpq_rr(&mut self, src: u8, dst: u8)
pub fn cmovnpq_rr(&mut self, src: u8, dst: u8)
pub fn leal_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn leal_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn leaq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn leaq_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn call(&mut self) -> AssemblerLabel
pub fn call_r(&mut self, dst: u8) -> AssemblerLabel
pub fn call_m(&mut self, offset: i32, base: u8)
pub fn jmp(&mut self) -> AssemblerLabel
pub fn jmp_r(&mut self, dst: u8) -> AssemblerLabel
pub fn jmp_m(&mut self, offset: i32, base: u8)
pub fn jmp_m_scaled(&mut self, offset: i32, base: u8, index: u8, scale: u8)
pub fn jne(&mut self) -> AssemblerLabel
pub fn jnz(&mut self) -> AssemblerLabel
pub fn je(&mut self) -> AssemblerLabel
pub fn jz(&mut self) -> AssemblerLabel
pub fn jl(&mut self) -> AssemblerLabel
pub fn jb(&mut self) -> AssemblerLabel
pub fn jle(&mut self) -> AssemblerLabel
pub fn jbe(&mut self) -> AssemblerLabel
pub fn jge(&mut self) -> AssemblerLabel
pub fn jg(&mut self) -> AssemblerLabel
pub fn ja(&mut self) -> AssemblerLabel
pub fn jae(&mut self) -> AssemblerLabel
pub fn jo(&mut self) -> AssemblerLabel
pub fn jnp(&mut self) -> AssemblerLabel
pub fn jp(&mut self) -> AssemblerLabel
pub fn js(&mut self) -> AssemblerLabel
pub fn jcc(&mut self, condition: Condition) -> AssemblerLabel
pub fn addsd_rr(&mut self, src: u8, dst: u8)
pub fn addsd_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn addsd_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn addss_rr(&mut self, src: u8, dst: u8)
pub fn addss_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn addss_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn cvtsi2sd_rr(&mut self, src: u8, dst: u8)
pub fn cvtsi2ss_rr(&mut self, src: u8, dst: u8)
pub fn cvtsi2sdq_rr(&mut self, src: u8, dst: u8)
pub fn cvtsi2ssq_rr(&mut self, src: u8, dst: u8)
pub fn cvtsi2sdq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn cvtsi2ssq_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn cvtsi2sd_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn cvtsi2ss_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn cvttsd2si_rr(&mut self, src: u8, dst: u8)
pub fn cvttss2si_rr(&mut self, src: u8, dst: u8)
pub fn cvtss2siq_rr(&mut self, src: u8, dst: u8)
pub fn cvtsd2ss_rr(&mut self, src: u8, dst: u8)
pub fn cvtsd2ss_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn cvtss2sd_rr(&mut self, src: u8, dst: u8)
pub fn cvtss2sd_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn cvttsd2siq_rr(&mut self, src: u8, dst: u8)
pub fn movd_f2r(&mut self, src: u8, dst: u8)
pub fn movd_r2f(&mut self, src: u8, dst: u8)
pub fn movddup_rr(&mut self, src: u8, dst: u8)
pub fn movmskpd_rr(&mut self, src: u8, dst: u8)
pub fn movq_f2r(&mut self, src: u8, dst: u8)
pub fn movq_r2f(&mut self, src: u8, dst: u8)
pub fn movapd_rr(&mut self, src: u8, dst: u8)
pub fn movaps_rr(&mut self, src: u8, dst: u8)
pub fn movhlps_rr(&mut self, src: u8, dst: u8)
pub fn movsd_rr(&mut self, src: u8, dst: u8)
pub fn movsd_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn movsd_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn movss_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn movss_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn movsd_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn movsd_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn movss_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn movss_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn movshdup_rr(&mut self, src: u8, dst: u8)
pub fn movsldup_rr(&mut self, src: u8, dst: u8)
pub fn mulsd_rr(&mut self, src: u8, dst: u8)
pub fn mulsd_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn mulsd_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn mulss_rr(&mut self, src: u8, dst: u8)
pub fn mulss_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn mulss_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn pextrw_irr(&mut self, which_word: i32, src: u8, dst: u8)
pub fn psllq_i8r(&mut self, shift: i32, dst: u8)
pub fn psrld_i8r(&mut self, shift: i32, dst: u8)
pub fn prslq_i8r(&mut self, shift: i32, dst: u8)
pub fn por_rr(&mut self, src: u8, dst: u8)
pub fn subsd_rr(&mut self, src: u8, dst: u8)
pub fn subsd_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn subsd_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn subss_rr(&mut self, src: u8, dst: u8)
pub fn subss_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn subss_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn ucomisd_rr(&mut self, src: u8, dst: u8)
pub fn ucomisd_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn ucomisd_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn ucomiss_rr(&mut self, src: u8, dst: u8)
pub fn ucomiss_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn ucomiss_mr_scaled( &mut self, offset: i32, base: u8, index: u8, scale: u8, dst: u8, )
pub fn divsd_rr(&mut self, src: u8, dst: u8)
pub fn divsd_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn divss_rr(&mut self, src: u8, dst: u8)
pub fn divss_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn andps_rr(&mut self, src: u8, dst: u8)
pub fn orps_rr(&mut self, src: u8, dst: u8)
pub fn xorps_rr(&mut self, src: u8, dst: u8)
pub fn xorpd_rr(&mut self, src: u8, dst: u8)
pub fn andnpd_rr(&mut self, src: u8, dst: u8)
pub fn sqrtsd_rr(&mut self, src: u8, dst: u8)
pub fn sqrtsd_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn sqrtss_rr(&mut self, src: u8, dst: u8)
pub fn sqrtss_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn roundss_rr(&mut self, src: u8, dst: u8, rounding: RoundingType)
pub fn roundss_mr( &mut self, offset: i32, base: u8, dst: u8, rounding: RoundingType, )
pub fn roundsd_rr(&mut self, src: u8, dst: u8, rounding: RoundingType)
pub fn roundsd_mr( &mut self, offset: i32, base: u8, dst: u8, rounding: RoundingType, )
pub fn int3(&mut self)
pub fn is_int3(addr: &u8) -> bool
pub fn ret(&mut self)
pub fn predict_not_taken(&mut self)
pub fn lock(&mut self)
pub fn gs(&mut self)
pub fn cmpxchgb_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn cmpxchgb_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn cmpxchgw_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn cmpxchgw_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn cmpxchgl_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn cmpxchgl_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn cmpxchgq_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn cmpxchgq_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xaddb_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn xaddb_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xaddw_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn xaddw_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xaddl_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn xaddl_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn xaddq_rm(&mut self, src: u8, offset: i32, base: u8)
pub fn xaddq_rm_scaled( &mut self, src: u8, offset: i32, base: u8, index: u8, scale: u8, )
pub fn lfence(&mut self)
pub fn mfence(&mut self)
pub fn sfence(&mut self)
pub fn rdtsc(&mut self)
pub fn pause(&mut self)
pub fn cpuid(&mut self)
pub fn vzeroupper(&mut self)
pub fn vpinsrb_i8mrr( &mut self, lane_index: u8, offset: i32, base: u8, xmm_2: u8, xmm_1: u8, )
pub fn vpinsrb_i8rrr(&mut self, lane_index: u8, rm: u8, xmm_2: u8, xmm_1: u8)
pub fn vpinsrw_i8mrr( &mut self, lane_index: u8, offset: i32, base: u8, xmm_2: u8, xmm_1: u8, )
pub fn vpinsrw_i8rrr(&mut self, lane_index: u8, rm: u8, xmm_2: u8, xmm_1: u8)
pub fn vpinsrd_i8mrr( &mut self, lane_index: u8, offset: i32, base: u8, xmm_2: u8, xmm_1: u8, )
pub fn vpinsrd_i8rrr(&mut self, lane_index: u8, rm: u8, xmm_2: u8, xmm_1: u8)
pub fn vpinsrq_i8mrr( &mut self, lane_index: u8, offset: i32, base: u8, xmm_2: u8, xmm_1: u8, )
pub fn vpinsrq_i8rrr(&mut self, lane_index: u8, rm: u8, xmm_2: u8, xmm_1: u8)
pub fn vinsertps_i8rrr(&mut self, lane_index: u8, rm: u8, xmm_2: u8, xmm_1: u8)
pub fn vmovddup_rr(&mut self, src: u8, dst: u8)
pub fn vmovddup_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn vmovapd_rr(&mut self, src: u8, dst: u8)
pub fn vbroadcastss_mr(&mut self, offset: i32, base: u8, dst: u8)
pub fn vpunpcklbw_rrr(&mut self, xmm_3: u8, xmm_2: u8, xmm_1: u8)
pub fn vpunpckhbw_rrr(&mut self, xmm_3: u8, xmm_2: u8, xmm_1: u8)
pub fn vpunpcklqdq_rrr(&mut self, xmm_3: u8, xmm_2: u8, xmm_1: u8)
pub fn vunpcklps_rrr(&mut self, xmm_3: u8, xmm_2: u8, xmm_1: u8)
pub fn vunpcklpd_rrr(&mut self, xmm_3: u8, xmm_2: u8, xmm_1: u8)
pub fn vpextrb_i8rr(&mut self, lane_index: u8, vn: u8, rd: u8)
pub fn vpextrb_i8rm(&mut self, lane_index: u8, src: u8, offset: i32, base: u8)
pub fn vpextrw_i8rr(&mut self, lane_index: u8, vn: u8, rd: u8)
pub fn vpextrw_i8rm(&mut self, lane_index: u8, src: u8, offset: i32, base: u8)
pub fn vpextrd_i8rr(&mut self, lane_index: u8, vn: u8, rd: u8)
pub fn vpextrd_i8rm(&mut self, lane_index: u8, src: u8, offset: i32, base: u8)
pub fn vpextrq_i8rr(&mut self, lane_index: u8, vn: u8, rd: u8)
pub fn vpshufb_rrr(&mut self, vm: u8, vn: u8, vd: u8)
pub fn vshufps_i8rrr( &mut self, control_bits: u8, xmm_3: u8, xmm_2: u8, xmm_1: u8, )
pub fn vshufpd_i8rrr( &mut self, control_bits: u8, xmm_3: u8, xmm_2: u8, xmm_1: u8, )
pub fn vpshuflw_i8rr(&mut self, control_bits: u8, xmm_2: u8, xmm_1: u8)
pub fn vpshufd_i8rr(&mut self, control_bits: u8, xmm_2: u8, xmm_1: u8)
pub fn vpaddsb_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpaddusb_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpaddsw_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpaddusw_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpsubsb_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpsubusb_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpsubsw_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpsubusw_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpmaxsb_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpmaxsw_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpmaxsd_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpmaxub_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpmaxuw_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpmaxud_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpminsb_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpminsw_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpminsd_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpminub_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpminuw_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpminud_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vmaxps_rrr(&mut self, xmm_3: u8, xmm_2: u8, xmm_1: u8)
pub fn vmaxpd_rrr(&mut self, xmm_3: u8, xmm_2: u8, xmm_1: u8)
pub fn vminps_rrr(&mut self, xmm_3: u8, xmm_2: u8, xmm_1: u8)
pub fn vminpd_rrr(&mut self, xmm_3: u8, xmm_2: u8, xmm_1: u8)
pub fn vpavgb_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpavgw_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpabsb_rr(&mut self, vn: u8, vd: u8)
pub fn vpabsw_rr(&mut self, vn: u8, vd: u8)
pub fn vpabsd_rr(&mut self, vn: u8, vd: u8)
pub fn vpxor_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vpsubq_rrr(&mut self, right: u8, left: u8, vd: u8)
pub fn vblendvpd_rrrr(&mut self, xmm_4: u8, xmm_3: u8, xmm_2: u8, xmm_1: u8)
pub fn vpmulhrsw_rrr(&mut self, xmm_3: u8, xmm_2: u8, xmm_1: u8)
pub fn vaddps_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vaddpd_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vpaddb_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vpaddw_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vpaddd_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vpaddq_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vsubps_mrr(&mut self, offset: i32, base: u8, left: u8, dest: u8)
pub fn vsubps_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vsubpd_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vpsubb_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vpsubw_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vpsubd_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vmulps_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vmulpd_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vpmullw_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vpmulld_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vdivps_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vdivpd_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vdivsd_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vdivsd_mrr(&mut self, offset: i32, base: u8, left: u8, dest: u8)
pub fn vdivss_rrr(&mut self, left: u8, right: u8, dest: u8)
pub fn vdivss_mrr(&mut self, offset: i32, base: u8, left: u8, dest: u8)
pub fn vroundsd_i8rrr( &mut self, rounding: RoundingType, src1: u8, src2: u8, dest: u8, )
pub fn vroundsd_i8mrr( &mut self, rounding: RoundingType, offset: i32, base: u8, src: u8, dest: u8, )
pub fn vroundss_i8rrr( &mut self, rounding: RoundingType, src1: u8, src2: u8, dest: u8, )
pub fn vroundss_i8mrr( &mut self, rounding: RoundingType, offset: i32, base: u8, src: u8, dest: u8, )
pub fn label_for_watchpoint(&mut self) -> AssemblerLabel
pub fn label_ignoring_watchpoints(&mut self) -> AssemblerLabel
pub fn label(&mut self) -> AssemblerLabel
pub fn align(&mut self, alignment: usize) -> AssemblerLabel
pub unsafe fn set_pointer(where_: *mut u8, value: *mut u8)
pub unsafe fn set_int32(where_: *mut u8, value: i32)
pub unsafe fn set_rel32(from: *mut u8, to: *mut u8)
pub fn nop(&mut self)
pub fn debug_offset(&self) -> usize
pub unsafe fn replace_with_jump(instruction_start: *mut u8, to: *mut u8)
pub unsafe fn replace_with_hlt(instruction_start: *mut u8)
pub unsafe fn read_pointer(instruction_start: *mut u8) -> *mut u8
pub unsafe fn repatch_pointer(where_: *mut u8, to: *mut u8)
pub unsafe fn repatch_int32(where_: *mut u8, value: i32)
pub unsafe fn relink_jump(from: *mut u8, to: *mut u8)
pub unsafe fn relink_tail_call(from: *mut u8, to: *mut u8)
pub unsafe fn relink_call(from: *mut u8, to: *mut u8)
pub unsafe fn link_pointer(code: *mut u8, where_: AssemblerLabel, to: *mut u8)
pub unsafe fn link_call(code: *mut u8, where_: AssemblerLabel, to: *mut u8)
pub unsafe fn link_tail_call(code: *mut u8, where_: AssemblerLabel, to: *mut u8)
pub unsafe fn link_jump_(code: *mut u8, where_: AssemblerLabel, to: *mut u8)
pub fn link_jump(&mut self, from: AssemblerLabel, to: AssemblerLabel)
pub unsafe fn revert_jump_to_cmpl_im_force32( instruction_start: *mut u8, imm: i32, _offset: i32, dst: u8, )
pub unsafe fn revert_jump_to_cmpl_ir_force32( instruction_start: *mut u8, imm: i32, dst: u8, )
pub unsafe fn revert_jump_to_movq_i64r( instruction_start: *mut u8, imm: i64, dst: u8, )
pub unsafe fn revert_jump_to_movq_i32r( instruction_start: *mut u8, imm: i64, dst: u8, )
pub unsafe fn fill_nops(base: *mut u8, size: usize)
pub fn get_difference_between_labels( a: AssemblerLabel, b: AssemblerLabel, ) -> i32
pub unsafe fn get_relocate_address( code: *mut u8, label: AssemblerLabel, ) -> *mut u8
pub fn get_call_return_offset(call: AssemblerLabel) -> usize
pub unsafe fn replace_with_address_computation(instruction_start: *mut u8)
pub unsafe fn replace_with_load(instruction_start: *mut u8)
pub fn max_jump_replacement_size() -> usize
pub fn patchable_jump_size() -> usize
Auto Trait Implementations§
impl Freeze for X86Assembler
impl RefUnwindSafe for X86Assembler
impl Send for X86Assembler
impl Sync for X86Assembler
impl Unpin for X86Assembler
impl UnwindSafe for X86Assembler
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more