use std::collections::HashMap;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum Reg64 {
RAX = 0,
RCX = 1,
RDX = 2,
RBX = 3,
RSP = 4,
RBP = 5,
RSI = 6,
RDI = 7,
R8 = 8,
R9 = 9,
R10 = 10,
R11 = 11,
R12 = 12,
R13 = 13,
R14 = 14,
R15 = 15,
}
impl Reg64 {
pub fn encoding(self) -> u8 {
(self as u8) & 0x7
}
pub fn requires_rex(self) -> bool {
(self as u8) >= 8
}
pub fn requires_sib(self) -> bool {
matches!(self, Reg64::RSP | Reg64::R12)
}
pub fn requires_disp(self) -> bool {
matches!(self, Reg64::RBP | Reg64::R13)
}
pub fn as_32(self) -> Reg32 {
unsafe { std::mem::transmute(self as u8) }
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum Reg32 {
EAX = 0,
ECX = 1,
EDX = 2,
EBX = 3,
ESP = 4,
EBP = 5,
ESI = 6,
EDI = 7,
R8D = 8,
R9D = 9,
R10D = 10,
R11D = 11,
R12D = 12,
R13D = 13,
R14D = 14,
R15D = 15,
}
impl Reg32 {
pub fn encoding(self) -> u8 {
(self as u8) & 0x7
}
pub fn requires_rex(self) -> bool {
(self as u8) >= 8
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum Reg16 {
AX = 0,
CX = 1,
DX = 2,
BX = 3,
SP = 4,
BP = 5,
SI = 6,
DI = 7,
R8W = 8,
R9W = 9,
R10W = 10,
R11W = 11,
R12W = 12,
R13W = 13,
R14W = 14,
R15W = 15,
}
impl Reg16 {
pub fn encoding(self) -> u8 {
(self as u8) & 0x7
}
pub fn requires_rex(self) -> bool {
(self as u8) >= 8
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum Reg8 {
AL = 0,
CL = 1,
DL = 2,
BL = 3,
AH = 4,
CH = 5,
DH = 6,
BH = 7,
R8B = 8,
R9B = 9,
R10B = 10,
R11B = 11,
R12B = 12,
R13B = 13,
R14B = 14,
R15B = 15,
SPL = 16,
BPL = 17,
SIL = 18,
DIL = 19,
}
impl Reg8 {
pub fn encoding(self) -> u8 {
match self {
Reg8::AL | Reg8::R8B => 0,
Reg8::CL | Reg8::R9B => 1,
Reg8::DL | Reg8::R10B => 2,
Reg8::BL | Reg8::R11B => 3,
Reg8::AH | Reg8::SPL | Reg8::R12B => 4,
Reg8::CH | Reg8::BPL | Reg8::R13B => 5,
Reg8::DH | Reg8::SIL | Reg8::R14B => 6,
Reg8::BH | Reg8::DIL | Reg8::R15B => 7,
}
}
pub fn requires_rex(self) -> bool {
(self as u8) >= 8
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum RegXmm {
XMM0 = 0,
XMM1 = 1,
XMM2 = 2,
XMM3 = 3,
XMM4 = 4,
XMM5 = 5,
XMM6 = 6,
XMM7 = 7,
XMM8 = 8,
XMM9 = 9,
XMM10 = 10,
XMM11 = 11,
XMM12 = 12,
XMM13 = 13,
XMM14 = 14,
XMM15 = 15,
}
impl RegXmm {
pub fn encoding(self) -> u8 {
(self as u8) & 0x7
}
pub fn requires_rex(self) -> bool {
(self as u8) >= 8
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum Scale {
X1 = 0,
X2 = 1,
X4 = 2,
X8 = 3,
}
#[derive(Debug, Clone, Copy)]
pub struct Mem {
pub base: Option<Reg64>,
pub index: Option<Reg64>,
pub scale: Scale,
pub disp: i32,
pub rip_relative: bool,
}
impl Mem {
pub fn base_disp(base: Reg64, disp: i32) -> Self {
Self {
base: Some(base),
index: None,
scale: Scale::X1,
disp,
rip_relative: false,
}
}
pub fn base(base: Reg64) -> Self {
Self::base_disp(base, 0)
}
pub fn base_index_scale_disp(base: Reg64, index: Reg64, scale: Scale, disp: i32) -> Self {
Self {
base: Some(base),
index: Some(index),
scale,
disp,
rip_relative: false,
}
}
pub fn rip_relative(disp: i32) -> Self {
Self {
base: None,
index: None,
scale: Scale::X1,
disp,
rip_relative: true,
}
}
pub fn absolute(addr: i32) -> Self {
Self {
base: None,
index: None,
scale: Scale::X1,
disp: addr,
rip_relative: false,
}
}
fn needs_sib(&self) -> bool {
self.index.is_some()
|| self.base.map_or(false, |b| b.requires_sib())
|| (self.base.is_none() && !self.rip_relative)
}
fn get_mod(&self) -> u8 {
if self.base.is_none() {
if self.rip_relative {
0b00 } else {
0b00 }
} else if self.disp == 0 && !self.base.unwrap().requires_disp() {
0b00
} else if self.disp >= -128 && self.disp <= 127 {
0b01
} else {
0b10
}
}
}
#[derive(Debug, Clone, Copy)]
pub enum Operand {
Reg64(Reg64),
Reg32(Reg32),
Reg16(Reg16),
Reg8(Reg8),
Xmm(RegXmm),
Mem(Mem),
Imm8(i8),
Imm16(i16),
Imm32(i32),
Imm64(i64),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum Cond {
O = 0x0,
NO = 0x1,
B = 0x2,
AE = 0x3,
E = 0x4,
NE = 0x5,
BE = 0x6,
A = 0x7,
S = 0x8,
NS = 0x9,
P = 0xA,
NP = 0xB,
L = 0xC,
GE = 0xD,
LE = 0xE,
G = 0xF,
}
impl Cond {
pub fn invert(self) -> Self {
unsafe { std::mem::transmute((self as u8) ^ 1) }
}
}
#[derive(Debug, Clone, Copy, Default)]
pub struct Rex {
pub w: bool,
pub r: bool,
pub x: bool,
pub b: bool,
}
impl Rex {
pub fn is_needed(&self) -> bool {
self.w || self.r || self.x || self.b
}
pub fn encode(&self) -> u8 {
0x40 | ((self.w as u8) << 3)
| ((self.r as u8) << 2)
| ((self.x as u8) << 1)
| (self.b as u8)
}
}
pub fn encode_modrm(mod_: u8, reg: u8, rm: u8) -> u8 {
((mod_ & 0x3) << 6) | ((reg & 0x7) << 3) | (rm & 0x7)
}
pub fn encode_sib(scale: u8, index: u8, base: u8) -> u8 {
((scale & 0x3) << 6) | ((index & 0x7) << 3) | (base & 0x7)
}
#[derive(Debug, Clone)]
pub struct Relocation {
pub offset: usize,
pub symbol: String,
pub kind: RelocKind,
pub addend: i64,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum RelocKind {
Rel32,
Abs64,
Abs32,
GotPcRel,
PltRel,
}
#[derive(Debug, Clone)]
pub struct LabelRef {
pub offset: usize,
pub label: u32,
pub size: u8,
}
pub struct X86_64Encoder {
pub code: Vec<u8>,
pub labels: HashMap<u32, usize>,
pub label_refs: Vec<LabelRef>,
pub relocations: Vec<Relocation>,
next_label: u32,
}
impl X86_64Encoder {
pub fn new() -> Self {
Self {
code: Vec::new(),
labels: HashMap::new(),
label_refs: Vec::new(),
relocations: Vec::new(),
next_label: 0,
}
}
pub fn position(&self) -> usize {
self.code.len()
}
pub fn new_label(&mut self) -> u32 {
let id = self.next_label;
self.next_label += 1;
id
}
pub fn define_label(&mut self, label: u32) {
self.labels.insert(label, self.code.len());
}
pub fn emit_u8(&mut self, b: u8) {
self.code.push(b);
}
pub fn emit_u16(&mut self, v: u16) {
self.code.extend_from_slice(&v.to_le_bytes());
}
pub fn emit_u32(&mut self, v: u32) {
self.code.extend_from_slice(&v.to_le_bytes());
}
pub fn emit_u64(&mut self, v: u64) {
self.code.extend_from_slice(&v.to_le_bytes());
}
fn emit_rex(&mut self, rex: Rex) {
if rex.is_needed() {
self.emit_u8(rex.encode());
}
}
fn emit_rex_rr(&mut self, dst: Reg64, src: Reg64, w: bool) {
let rex = Rex {
w,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
};
self.emit_rex(rex);
}
fn emit_rex_rm(&mut self, reg: Reg64, mem: &Mem, w: bool) {
let rex = Rex {
w,
r: reg.requires_rex(),
x: mem.index.map_or(false, |i| i.requires_rex()),
b: mem.base.map_or(false, |b| b.requires_rex()),
};
self.emit_rex(rex);
}
fn emit_modrm_rr(&mut self, dst: Reg64, src: Reg64) {
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
fn emit_modrm_rm(&mut self, reg: Reg64, mem: &Mem) {
let mod_ = mem.get_mod();
if mem.rip_relative {
self.emit_u8(encode_modrm(0b00, reg.encoding(), 0b101));
self.emit_u32(mem.disp as u32);
return;
}
if mem.needs_sib() {
self.emit_u8(encode_modrm(mod_, reg.encoding(), 0b100));
let base_enc = mem.base.map_or(0b101, |b| b.encoding());
let index_enc = mem.index.map_or(0b100, |i| i.encoding());
self.emit_u8(encode_sib(mem.scale as u8, index_enc, base_enc));
} else {
let base = mem.base.unwrap();
self.emit_u8(encode_modrm(mod_, reg.encoding(), base.encoding()));
}
match mod_ {
0b00 if mem.base.is_none() => {
self.emit_u32(mem.disp as u32);
}
0b01 => {
self.emit_u8(mem.disp as u8);
}
0b10 => {
self.emit_u32(mem.disp as u32);
}
_ => {}
}
}
pub fn mov_rr(&mut self, dst: Reg64, src: Reg64) {
self.emit_rex_rr(dst, src, true);
self.emit_u8(0x89);
self.emit_modrm_rr(src, dst);
}
pub fn mov_ri64(&mut self, dst: Reg64, imm: i64) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
self.emit_u8(0xB8 + dst.encoding());
self.emit_u64(imm as u64);
}
pub fn mov_ri32(&mut self, dst: Reg64, imm: i32) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
self.emit_u8(0xC7);
self.emit_u8(encode_modrm(0b11, 0, dst.encoding()));
self.emit_u32(imm as u32);
}
pub fn mov_r32i32(&mut self, dst: Reg32, imm: i32) {
if dst.requires_rex() {
self.emit_rex(Rex {
w: false,
r: false,
x: false,
b: true,
});
}
self.emit_u8(0xB8 + dst.encoding());
self.emit_u32(imm as u32);
}
pub fn mov_rm(&mut self, dst: Reg64, src: &Mem) {
self.emit_rex_rm(dst, src, true);
self.emit_u8(0x8B);
self.emit_modrm_rm(dst, src);
}
pub fn mov_mr(&mut self, dst: &Mem, src: Reg64) {
self.emit_rex_rm(src, dst, true);
self.emit_u8(0x89);
self.emit_modrm_rm(src, dst);
}
pub fn mov_mi32(&mut self, dst: &Mem, imm: i32) {
self.emit_rex_rm(Reg64::RAX, dst, true); self.emit_u8(0xC7);
self.emit_modrm_rm(Reg64::RAX, dst);
self.emit_u32(imm as u32);
}
pub fn movzx_r64_r8(&mut self, dst: Reg64, src: Reg8) {
self.emit_rex(Rex {
w: true,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
self.emit_u8(0x0F);
self.emit_u8(0xB6);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn movsxd(&mut self, dst: Reg64, src: Reg32) {
self.emit_rex(Rex {
w: true,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
self.emit_u8(0x63);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn lea(&mut self, dst: Reg64, src: &Mem) {
self.emit_rex_rm(dst, src, true);
self.emit_u8(0x8D);
self.emit_modrm_rm(dst, src);
}
pub fn xchg(&mut self, a: Reg64, b: Reg64) {
if a == Reg64::RAX {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: b.requires_rex(),
});
self.emit_u8(0x90 + b.encoding());
} else if b == Reg64::RAX {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: a.requires_rex(),
});
self.emit_u8(0x90 + a.encoding());
} else {
self.emit_rex_rr(a, b, true);
self.emit_u8(0x87);
self.emit_modrm_rr(a, b);
}
}
pub fn push(&mut self, reg: Reg64) {
if reg.requires_rex() {
self.emit_rex(Rex {
w: false,
r: false,
x: false,
b: true,
});
}
self.emit_u8(0x50 + reg.encoding());
}
pub fn pop(&mut self, reg: Reg64) {
if reg.requires_rex() {
self.emit_rex(Rex {
w: false,
r: false,
x: false,
b: true,
});
}
self.emit_u8(0x58 + reg.encoding());
}
pub fn push_imm32(&mut self, imm: i32) {
self.emit_u8(0x68);
self.emit_u32(imm as u32);
}
pub fn push_imm8(&mut self, imm: i8) {
self.emit_u8(0x6A);
self.emit_u8(imm as u8);
}
pub fn add_rr(&mut self, dst: Reg64, src: Reg64) {
self.emit_rex_rr(src, dst, true);
self.emit_u8(0x01);
self.emit_modrm_rr(src, dst);
}
pub fn add_ri32(&mut self, dst: Reg64, imm: i32) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
if imm >= -128 && imm <= 127 {
self.emit_u8(0x83);
self.emit_u8(encode_modrm(0b11, 0, dst.encoding()));
self.emit_u8(imm as u8);
} else if dst == Reg64::RAX {
self.emit_u8(0x05);
self.emit_u32(imm as u32);
} else {
self.emit_u8(0x81);
self.emit_u8(encode_modrm(0b11, 0, dst.encoding()));
self.emit_u32(imm as u32);
}
}
pub fn add_rm(&mut self, dst: Reg64, src: &Mem) {
self.emit_rex_rm(dst, src, true);
self.emit_u8(0x03);
self.emit_modrm_rm(dst, src);
}
pub fn sub_rr(&mut self, dst: Reg64, src: Reg64) {
self.emit_rex_rr(src, dst, true);
self.emit_u8(0x29);
self.emit_modrm_rr(src, dst);
}
pub fn sub_ri32(&mut self, dst: Reg64, imm: i32) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
if imm >= -128 && imm <= 127 {
self.emit_u8(0x83);
self.emit_u8(encode_modrm(0b11, 5, dst.encoding()));
self.emit_u8(imm as u8);
} else if dst == Reg64::RAX {
self.emit_u8(0x2D);
self.emit_u32(imm as u32);
} else {
self.emit_u8(0x81);
self.emit_u8(encode_modrm(0b11, 5, dst.encoding()));
self.emit_u32(imm as u32);
}
}
pub fn imul_rr(&mut self, dst: Reg64, src: Reg64) {
self.emit_rex_rr(dst, src, true);
self.emit_u8(0x0F);
self.emit_u8(0xAF);
self.emit_modrm_rr(dst, src);
}
pub fn imul_rri32(&mut self, dst: Reg64, src: Reg64, imm: i32) {
self.emit_rex_rr(dst, src, true);
if imm >= -128 && imm <= 127 {
self.emit_u8(0x6B);
self.emit_modrm_rr(dst, src);
self.emit_u8(imm as u8);
} else {
self.emit_u8(0x69);
self.emit_modrm_rr(dst, src);
self.emit_u32(imm as u32);
}
}
pub fn idiv(&mut self, src: Reg64) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: src.requires_rex(),
});
self.emit_u8(0xF7);
self.emit_u8(encode_modrm(0b11, 7, src.encoding()));
}
pub fn div(&mut self, src: Reg64) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: src.requires_rex(),
});
self.emit_u8(0xF7);
self.emit_u8(encode_modrm(0b11, 6, src.encoding()));
}
pub fn cqo(&mut self) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: false,
});
self.emit_u8(0x99);
}
pub fn cdq(&mut self) {
self.emit_u8(0x99);
}
pub fn neg(&mut self, dst: Reg64) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
self.emit_u8(0xF7);
self.emit_u8(encode_modrm(0b11, 3, dst.encoding()));
}
pub fn inc(&mut self, dst: Reg64) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
self.emit_u8(0xFF);
self.emit_u8(encode_modrm(0b11, 0, dst.encoding()));
}
pub fn dec(&mut self, dst: Reg64) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
self.emit_u8(0xFF);
self.emit_u8(encode_modrm(0b11, 1, dst.encoding()));
}
pub fn and_rr(&mut self, dst: Reg64, src: Reg64) {
self.emit_rex_rr(src, dst, true);
self.emit_u8(0x21);
self.emit_modrm_rr(src, dst);
}
pub fn and_ri32(&mut self, dst: Reg64, imm: i32) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
if imm >= -128 && imm <= 127 {
self.emit_u8(0x83);
self.emit_u8(encode_modrm(0b11, 4, dst.encoding()));
self.emit_u8(imm as u8);
} else if dst == Reg64::RAX {
self.emit_u8(0x25);
self.emit_u32(imm as u32);
} else {
self.emit_u8(0x81);
self.emit_u8(encode_modrm(0b11, 4, dst.encoding()));
self.emit_u32(imm as u32);
}
}
pub fn or_rr(&mut self, dst: Reg64, src: Reg64) {
self.emit_rex_rr(src, dst, true);
self.emit_u8(0x09);
self.emit_modrm_rr(src, dst);
}
pub fn or_ri32(&mut self, dst: Reg64, imm: i32) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
if imm >= -128 && imm <= 127 {
self.emit_u8(0x83);
self.emit_u8(encode_modrm(0b11, 1, dst.encoding()));
self.emit_u8(imm as u8);
} else if dst == Reg64::RAX {
self.emit_u8(0x0D);
self.emit_u32(imm as u32);
} else {
self.emit_u8(0x81);
self.emit_u8(encode_modrm(0b11, 1, dst.encoding()));
self.emit_u32(imm as u32);
}
}
pub fn xor_rr(&mut self, dst: Reg64, src: Reg64) {
self.emit_rex_rr(src, dst, true);
self.emit_u8(0x31);
self.emit_modrm_rr(src, dst);
}
pub fn xor_ri32(&mut self, dst: Reg64, imm: i32) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
if imm >= -128 && imm <= 127 {
self.emit_u8(0x83);
self.emit_u8(encode_modrm(0b11, 6, dst.encoding()));
self.emit_u8(imm as u8);
} else if dst == Reg64::RAX {
self.emit_u8(0x35);
self.emit_u32(imm as u32);
} else {
self.emit_u8(0x81);
self.emit_u8(encode_modrm(0b11, 6, dst.encoding()));
self.emit_u32(imm as u32);
}
}
pub fn not(&mut self, dst: Reg64) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
self.emit_u8(0xF7);
self.emit_u8(encode_modrm(0b11, 2, dst.encoding()));
}
pub fn shl_cl(&mut self, dst: Reg64) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
self.emit_u8(0xD3);
self.emit_u8(encode_modrm(0b11, 4, dst.encoding()));
}
pub fn shl_imm(&mut self, dst: Reg64, imm: u8) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
if imm == 1 {
self.emit_u8(0xD1);
self.emit_u8(encode_modrm(0b11, 4, dst.encoding()));
} else {
self.emit_u8(0xC1);
self.emit_u8(encode_modrm(0b11, 4, dst.encoding()));
self.emit_u8(imm);
}
}
pub fn shr_cl(&mut self, dst: Reg64) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
self.emit_u8(0xD3);
self.emit_u8(encode_modrm(0b11, 5, dst.encoding()));
}
pub fn shr_imm(&mut self, dst: Reg64, imm: u8) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
if imm == 1 {
self.emit_u8(0xD1);
self.emit_u8(encode_modrm(0b11, 5, dst.encoding()));
} else {
self.emit_u8(0xC1);
self.emit_u8(encode_modrm(0b11, 5, dst.encoding()));
self.emit_u8(imm);
}
}
pub fn sar_cl(&mut self, dst: Reg64) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
self.emit_u8(0xD3);
self.emit_u8(encode_modrm(0b11, 7, dst.encoding()));
}
pub fn sar_imm(&mut self, dst: Reg64, imm: u8) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
if imm == 1 {
self.emit_u8(0xD1);
self.emit_u8(encode_modrm(0b11, 7, dst.encoding()));
} else {
self.emit_u8(0xC1);
self.emit_u8(encode_modrm(0b11, 7, dst.encoding()));
self.emit_u8(imm);
}
}
pub fn cmp_rr(&mut self, a: Reg64, b: Reg64) {
self.emit_rex_rr(b, a, true);
self.emit_u8(0x39);
self.emit_modrm_rr(b, a);
}
pub fn cmp_ri32(&mut self, dst: Reg64, imm: i32) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
if imm >= -128 && imm <= 127 {
self.emit_u8(0x83);
self.emit_u8(encode_modrm(0b11, 7, dst.encoding()));
self.emit_u8(imm as u8);
} else if dst == Reg64::RAX {
self.emit_u8(0x3D);
self.emit_u32(imm as u32);
} else {
self.emit_u8(0x81);
self.emit_u8(encode_modrm(0b11, 7, dst.encoding()));
self.emit_u32(imm as u32);
}
}
pub fn cmp_rm(&mut self, dst: Reg64, src: &Mem) {
self.emit_rex_rm(dst, src, true);
self.emit_u8(0x3B);
self.emit_modrm_rm(dst, src);
}
pub fn test_rr(&mut self, a: Reg64, b: Reg64) {
self.emit_rex_rr(b, a, true);
self.emit_u8(0x85);
self.emit_modrm_rr(b, a);
}
pub fn test_ri32(&mut self, dst: Reg64, imm: i32) {
self.emit_rex(Rex {
w: true,
r: false,
x: false,
b: dst.requires_rex(),
});
if dst == Reg64::RAX {
self.emit_u8(0xA9);
} else {
self.emit_u8(0xF7);
self.emit_u8(encode_modrm(0b11, 0, dst.encoding()));
}
self.emit_u32(imm as u32);
}
pub fn setcc(&mut self, cond: Cond, dst: Reg8) {
if dst.requires_rex() {
self.emit_rex(Rex {
w: false,
r: false,
x: false,
b: true,
});
}
self.emit_u8(0x0F);
self.emit_u8(0x90 + cond as u8);
self.emit_u8(encode_modrm(0b11, 0, dst.encoding()));
}
pub fn cmovcc(&mut self, cond: Cond, dst: Reg64, src: Reg64) {
self.emit_rex_rr(dst, src, true);
self.emit_u8(0x0F);
self.emit_u8(0x40 + cond as u8);
self.emit_modrm_rr(dst, src);
}
pub fn jmp_rel32(&mut self, offset: i32) {
self.emit_u8(0xE9);
self.emit_u32(offset as u32);
}
pub fn jmp_label(&mut self, label: u32) {
if let Some(&target) = self.labels.get(&label) {
let offset = (target as i64) - (self.code.len() as i64 + 5);
self.jmp_rel32(offset as i32);
} else {
self.emit_u8(0xE9);
self.label_refs.push(LabelRef {
offset: self.code.len(),
label,
size: 4,
});
self.emit_u32(0); }
}
pub fn jmp_reg(&mut self, target: Reg64) {
if target.requires_rex() {
self.emit_rex(Rex {
w: false,
r: false,
x: false,
b: true,
});
}
self.emit_u8(0xFF);
self.emit_u8(encode_modrm(0b11, 4, target.encoding()));
}
pub fn jcc_rel32(&mut self, cond: Cond, offset: i32) {
self.emit_u8(0x0F);
self.emit_u8(0x80 + cond as u8);
self.emit_u32(offset as u32);
}
pub fn jcc_label(&mut self, cond: Cond, label: u32) {
if let Some(&target) = self.labels.get(&label) {
let offset = (target as i64) - (self.code.len() as i64 + 6);
self.jcc_rel32(cond, offset as i32);
} else {
self.emit_u8(0x0F);
self.emit_u8(0x80 + cond as u8);
self.label_refs.push(LabelRef {
offset: self.code.len(),
label,
size: 4,
});
self.emit_u32(0);
}
}
pub fn jcc_rel8(&mut self, cond: Cond, offset: i8) {
self.emit_u8(0x70 + cond as u8);
self.emit_u8(offset as u8);
}
pub fn call_rel32(&mut self, offset: i32) {
self.emit_u8(0xE8);
self.emit_u32(offset as u32);
}
pub fn call_symbol(&mut self, symbol: &str) {
self.emit_u8(0xE8);
self.relocations.push(Relocation {
offset: self.code.len(),
symbol: symbol.to_string(),
kind: RelocKind::Rel32,
addend: -4,
});
self.emit_u32(0);
}
pub fn call_reg(&mut self, target: Reg64) {
if target.requires_rex() {
self.emit_rex(Rex {
w: false,
r: false,
x: false,
b: true,
});
}
self.emit_u8(0xFF);
self.emit_u8(encode_modrm(0b11, 2, target.encoding()));
}
pub fn ret(&mut self) {
self.emit_u8(0xC3);
}
pub fn ret_imm(&mut self, imm: u16) {
self.emit_u8(0xC2);
self.emit_u16(imm);
}
pub fn nop(&mut self) {
self.emit_u8(0x90);
}
pub fn nop_n(&mut self, n: usize) {
let nops: &[&[u8]] = &[
&[], &[0x90], &[0x66, 0x90], &[0x0F, 0x1F, 0x00], &[0x0F, 0x1F, 0x40, 0x00], &[0x0F, 0x1F, 0x44, 0x00, 0x00], &[0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00], &[0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00], &[0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00], &[0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00], ];
let mut remaining = n;
while remaining > 0 {
let chunk = remaining.min(9);
self.code.extend_from_slice(nops[chunk]);
remaining -= chunk;
}
}
pub fn ud2(&mut self) {
self.emit_u8(0x0F);
self.emit_u8(0x0B);
}
pub fn int3(&mut self) {
self.emit_u8(0xCC);
}
pub fn int(&mut self, n: u8) {
self.emit_u8(0xCD);
self.emit_u8(n);
}
pub fn syscall(&mut self) {
self.emit_u8(0x0F);
self.emit_u8(0x05);
}
pub fn movsd_rr(&mut self, dst: RegXmm, src: RegXmm) {
self.emit_u8(0xF2);
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x10);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn movsd_rm(&mut self, dst: RegXmm, src: &Mem) {
self.emit_u8(0xF2);
let rex = Rex {
w: false,
r: dst.requires_rex(),
x: src.index.map_or(false, |i| i.requires_rex()),
b: src.base.map_or(false, |b| b.requires_rex()),
};
self.emit_rex(rex);
self.emit_u8(0x0F);
self.emit_u8(0x10);
let reg = unsafe { std::mem::transmute::<u8, Reg64>(dst as u8) };
self.emit_modrm_rm(reg, src);
}
pub fn movsd_mr(&mut self, dst: &Mem, src: RegXmm) {
self.emit_u8(0xF2);
let rex = Rex {
w: false,
r: src.requires_rex(),
x: dst.index.map_or(false, |i| i.requires_rex()),
b: dst.base.map_or(false, |b| b.requires_rex()),
};
self.emit_rex(rex);
self.emit_u8(0x0F);
self.emit_u8(0x11);
let reg = unsafe { std::mem::transmute::<u8, Reg64>(src as u8) };
self.emit_modrm_rm(reg, dst);
}
pub fn movss_rr(&mut self, dst: RegXmm, src: RegXmm) {
self.emit_u8(0xF3);
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x10);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn addsd(&mut self, dst: RegXmm, src: RegXmm) {
self.emit_u8(0xF2);
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x58);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn subsd(&mut self, dst: RegXmm, src: RegXmm) {
self.emit_u8(0xF2);
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x5C);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn mulsd(&mut self, dst: RegXmm, src: RegXmm) {
self.emit_u8(0xF2);
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x59);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn divsd(&mut self, dst: RegXmm, src: RegXmm) {
self.emit_u8(0xF2);
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x5E);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn addss(&mut self, dst: RegXmm, src: RegXmm) {
self.emit_u8(0xF3);
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x58);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn subss(&mut self, dst: RegXmm, src: RegXmm) {
self.emit_u8(0xF3);
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x5C);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn mulss(&mut self, dst: RegXmm, src: RegXmm) {
self.emit_u8(0xF3);
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x59);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn divss(&mut self, dst: RegXmm, src: RegXmm) {
self.emit_u8(0xF3);
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x5E);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn ucomisd(&mut self, a: RegXmm, b: RegXmm) {
self.emit_u8(0x66);
if a.requires_rex() || b.requires_rex() {
self.emit_rex(Rex {
w: false,
r: a.requires_rex(),
x: false,
b: b.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x2E);
self.emit_u8(encode_modrm(0b11, a.encoding(), b.encoding()));
}
pub fn ucomiss(&mut self, a: RegXmm, b: RegXmm) {
if a.requires_rex() || b.requires_rex() {
self.emit_rex(Rex {
w: false,
r: a.requires_rex(),
x: false,
b: b.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x2E);
self.emit_u8(encode_modrm(0b11, a.encoding(), b.encoding()));
}
pub fn cvtsi2sd(&mut self, dst: RegXmm, src: Reg64) {
self.emit_u8(0xF2);
self.emit_rex(Rex {
w: true,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
self.emit_u8(0x0F);
self.emit_u8(0x2A);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn cvtsd2si(&mut self, dst: Reg64, src: RegXmm) {
self.emit_u8(0xF2);
self.emit_rex(Rex {
w: true,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
self.emit_u8(0x0F);
self.emit_u8(0x2D);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn cvttsd2si(&mut self, dst: Reg64, src: RegXmm) {
self.emit_u8(0xF2);
self.emit_rex(Rex {
w: true,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
self.emit_u8(0x0F);
self.emit_u8(0x2C);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn cvtss2sd(&mut self, dst: RegXmm, src: RegXmm) {
self.emit_u8(0xF3);
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x5A);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn cvtsd2ss(&mut self, dst: RegXmm, src: RegXmm) {
self.emit_u8(0xF2);
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x5A);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn xorps(&mut self, dst: RegXmm, src: RegXmm) {
if dst.requires_rex() || src.requires_rex() {
self.emit_rex(Rex {
w: false,
r: dst.requires_rex(),
x: false,
b: src.requires_rex(),
});
}
self.emit_u8(0x0F);
self.emit_u8(0x57);
self.emit_u8(encode_modrm(0b11, dst.encoding(), src.encoding()));
}
pub fn fixup_labels(&mut self) {
for label_ref in &self.label_refs {
if let Some(&target) = self.labels.get(&label_ref.label) {
let offset = (target as i64) - (label_ref.offset as i64 + label_ref.size as i64);
match label_ref.size {
1 => {
self.code[label_ref.offset] = offset as u8;
}
4 => {
let bytes = (offset as i32).to_le_bytes();
self.code[label_ref.offset..label_ref.offset + 4].copy_from_slice(&bytes);
}
_ => panic!("Unsupported label reference size"),
}
} else {
panic!("Undefined label: {}", label_ref.label);
}
}
self.label_refs.clear();
}
pub fn align(&mut self, alignment: usize) {
let pos = self.code.len();
let padding = (alignment - (pos % alignment)) % alignment;
self.nop_n(padding);
}
pub fn create_label(&mut self) -> u32 {
self.new_label()
}
pub fn bind_label(&mut self, label: u32) {
self.define_label(label);
}
pub fn mov_ri(&mut self, dst: Reg64, imm: i64) {
self.mov_ri64(dst, imm);
}
pub fn test_ri(&mut self, dst: Reg64, imm: i32) {
self.test_ri32(dst, imm);
}
pub fn shr_ri(&mut self, dst: Reg64, imm: u8) {
self.shr_imm(dst, imm);
}
pub fn finish(mut self) -> Vec<u8> {
self.fixup_labels();
self.code
}
}
impl Default for X86_64Encoder {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mov_rr() {
let mut enc = X86_64Encoder::new();
enc.mov_rr(Reg64::RAX, Reg64::RCX);
assert_eq!(enc.code, vec![0x48, 0x89, 0xC8]); }
#[test]
fn test_mov_ri64() {
let mut enc = X86_64Encoder::new();
enc.mov_ri64(Reg64::RAX, 0x123456789ABCDEF0i64);
assert_eq!(enc.code.len(), 10); assert_eq!(enc.code[0], 0x48); assert_eq!(enc.code[1], 0xB8); }
#[test]
fn test_mov_r8() {
let mut enc = X86_64Encoder::new();
enc.mov_rr(Reg64::R8, Reg64::R9);
assert_eq!(enc.code, vec![0x4D, 0x89, 0xC8]);
}
#[test]
fn test_push_pop() {
let mut enc = X86_64Encoder::new();
enc.push(Reg64::RAX);
enc.push(Reg64::R15);
enc.pop(Reg64::R15);
enc.pop(Reg64::RAX);
assert_eq!(enc.code, vec![0x50, 0x41, 0x57, 0x41, 0x5F, 0x58]);
}
#[test]
fn test_add_rr() {
let mut enc = X86_64Encoder::new();
enc.add_rr(Reg64::RAX, Reg64::RCX);
assert_eq!(enc.code, vec![0x48, 0x01, 0xC8]); }
#[test]
fn test_add_ri32_small() {
let mut enc = X86_64Encoder::new();
enc.add_ri32(Reg64::RAX, 10);
assert_eq!(enc.code, vec![0x48, 0x83, 0xC0, 0x0A]);
}
#[test]
fn test_add_ri32_large() {
let mut enc = X86_64Encoder::new();
enc.add_ri32(Reg64::RAX, 0x12345678);
assert_eq!(enc.code, vec![0x48, 0x05, 0x78, 0x56, 0x34, 0x12]);
}
#[test]
fn test_sub_rr() {
let mut enc = X86_64Encoder::new();
enc.sub_rr(Reg64::RBX, Reg64::RDX);
assert_eq!(enc.code, vec![0x48, 0x29, 0xD3]);
}
#[test]
fn test_imul_rr() {
let mut enc = X86_64Encoder::new();
enc.imul_rr(Reg64::RAX, Reg64::RCX);
assert_eq!(enc.code, vec![0x48, 0x0F, 0xAF, 0xC1]);
}
#[test]
fn test_idiv() {
let mut enc = X86_64Encoder::new();
enc.cqo();
enc.idiv(Reg64::RCX);
assert_eq!(enc.code, vec![0x48, 0x99, 0x48, 0xF7, 0xF9]);
}
#[test]
fn test_neg() {
let mut enc = X86_64Encoder::new();
enc.neg(Reg64::RAX);
assert_eq!(enc.code, vec![0x48, 0xF7, 0xD8]);
}
#[test]
fn test_and_rr() {
let mut enc = X86_64Encoder::new();
enc.and_rr(Reg64::RAX, Reg64::RCX);
assert_eq!(enc.code, vec![0x48, 0x21, 0xC8]);
}
#[test]
fn test_or_rr() {
let mut enc = X86_64Encoder::new();
enc.or_rr(Reg64::RAX, Reg64::RCX);
assert_eq!(enc.code, vec![0x48, 0x09, 0xC8]);
}
#[test]
fn test_xor_rr() {
let mut enc = X86_64Encoder::new();
enc.xor_rr(Reg64::RAX, Reg64::RAX); assert_eq!(enc.code, vec![0x48, 0x31, 0xC0]);
}
#[test]
fn test_shl_imm() {
let mut enc = X86_64Encoder::new();
enc.shl_imm(Reg64::RAX, 4);
assert_eq!(enc.code, vec![0x48, 0xC1, 0xE0, 0x04]);
}
#[test]
fn test_shr_imm() {
let mut enc = X86_64Encoder::new();
enc.shr_imm(Reg64::RAX, 1);
assert_eq!(enc.code, vec![0x48, 0xD1, 0xE8]); }
#[test]
fn test_cmp_rr() {
let mut enc = X86_64Encoder::new();
enc.cmp_rr(Reg64::RAX, Reg64::RCX);
assert_eq!(enc.code, vec![0x48, 0x39, 0xC8]);
}
#[test]
fn test_test_rr() {
let mut enc = X86_64Encoder::new();
enc.test_rr(Reg64::RAX, Reg64::RAX);
assert_eq!(enc.code, vec![0x48, 0x85, 0xC0]);
}
#[test]
fn test_setcc() {
let mut enc = X86_64Encoder::new();
enc.setcc(Cond::E, Reg8::AL);
assert_eq!(enc.code, vec![0x0F, 0x94, 0xC0]);
}
#[test]
fn test_jmp_rel32() {
let mut enc = X86_64Encoder::new();
enc.jmp_rel32(0x12345678);
assert_eq!(enc.code, vec![0xE9, 0x78, 0x56, 0x34, 0x12]);
}
#[test]
fn test_jcc_rel32() {
let mut enc = X86_64Encoder::new();
enc.jcc_rel32(Cond::E, 0x100);
assert_eq!(enc.code, vec![0x0F, 0x84, 0x00, 0x01, 0x00, 0x00]);
}
#[test]
fn test_call_ret() {
let mut enc = X86_64Encoder::new();
enc.call_rel32(0);
enc.ret();
assert_eq!(enc.code, vec![0xE8, 0x00, 0x00, 0x00, 0x00, 0xC3]);
}
#[test]
fn test_nop() {
let mut enc = X86_64Encoder::new();
enc.nop();
enc.nop_n(5);
assert_eq!(enc.code[0], 0x90); assert_eq!(enc.code.len(), 6); }
#[test]
fn test_ud2() {
let mut enc = X86_64Encoder::new();
enc.ud2();
assert_eq!(enc.code, vec![0x0F, 0x0B]);
}
#[test]
fn test_syscall() {
let mut enc = X86_64Encoder::new();
enc.syscall();
assert_eq!(enc.code, vec![0x0F, 0x05]);
}
#[test]
fn test_labels() {
let mut enc = X86_64Encoder::new();
let label = enc.new_label();
enc.jmp_label(label);
enc.nop();
enc.nop();
enc.define_label(label);
enc.nop();
enc.fixup_labels();
let offset = enc.code[1..5].to_vec();
assert_eq!(offset, vec![0x02, 0x00, 0x00, 0x00]);
}
#[test]
fn test_mov_rm() {
let mut enc = X86_64Encoder::new();
let mem = Mem::base_disp(Reg64::RBP, -8);
enc.mov_rm(Reg64::RAX, &mem);
assert_eq!(enc.code, vec![0x48, 0x8B, 0x45, 0xF8]);
}
#[test]
fn test_mov_mr() {
let mut enc = X86_64Encoder::new();
let mem = Mem::base_disp(Reg64::RBP, -16);
enc.mov_mr(&mem, Reg64::RCX);
assert_eq!(enc.code, vec![0x48, 0x89, 0x4D, 0xF0]);
}
#[test]
fn test_lea() {
let mut enc = X86_64Encoder::new();
let mem = Mem::base_index_scale_disp(Reg64::RBX, Reg64::RCX, Scale::X4, 8);
enc.lea(Reg64::RAX, &mem);
assert!(enc.code.len() > 0);
}
#[test]
fn test_movsd_rr() {
let mut enc = X86_64Encoder::new();
enc.movsd_rr(RegXmm::XMM0, RegXmm::XMM1);
assert_eq!(enc.code, vec![0xF2, 0x0F, 0x10, 0xC1]);
}
#[test]
fn test_addsd() {
let mut enc = X86_64Encoder::new();
enc.addsd(RegXmm::XMM0, RegXmm::XMM1);
assert_eq!(enc.code, vec![0xF2, 0x0F, 0x58, 0xC1]);
}
#[test]
fn test_cvtsi2sd() {
let mut enc = X86_64Encoder::new();
enc.cvtsi2sd(RegXmm::XMM0, Reg64::RAX);
assert_eq!(enc.code, vec![0xF2, 0x48, 0x0F, 0x2A, 0xC0]);
}
#[test]
fn test_condition_invert() {
assert_eq!(Cond::E.invert(), Cond::NE);
assert_eq!(Cond::L.invert(), Cond::GE);
assert_eq!(Cond::G.invert(), Cond::LE);
}
#[test]
fn test_rex_encoding() {
let rex = Rex {
w: true,
r: true,
x: false,
b: true,
};
assert_eq!(rex.encode(), 0x4D); }
#[test]
fn test_modrm_encoding() {
assert_eq!(encode_modrm(0b11, 0, 1), 0xC1); assert_eq!(encode_modrm(0b00, 4, 5), 0x25); }
#[test]
fn test_sib_encoding() {
assert_eq!(encode_sib(2, 1, 3), 0x8B);
}
#[test]
fn test_prologue_epilogue() {
let mut enc = X86_64Encoder::new();
enc.push(Reg64::RBP);
enc.mov_rr(Reg64::RBP, Reg64::RSP);
enc.sub_ri32(Reg64::RSP, 32);
enc.mov_rr(Reg64::RSP, Reg64::RBP);
enc.pop(Reg64::RBP);
enc.ret();
assert!(enc.code.len() > 10);
assert_eq!(*enc.code.last().unwrap(), 0xC3); }
#[test]
fn test_align() {
let mut enc = X86_64Encoder::new();
enc.nop();
enc.align(16);
assert_eq!(enc.code.len() % 16, 0);
}
}