use crate::windows::constants::*;
use crate::emu::Emu;
use crate::maps::mem64::Permission;
use crate::windows::structures::MemoryBasicInformation64;
use iced_x86::{Instruction, Mnemonic, OpKind, Register};
fn permission_to_nt_page_protection(perm: Permission) -> u32 {
const PAGE_NOACCESS: u32 = 0x01;
const PAGE_READONLY: u32 = 0x02;
const PAGE_READWRITE: u32 = 0x04;
const PAGE_EXECUTE: u32 = 0x10;
const PAGE_EXECUTE_READ: u32 = 0x20;
const PAGE_EXECUTE_READWRITE: u32 = 0x40;
let r = perm.contains(Permission::READ);
let w = perm.contains(Permission::WRITE);
let x = perm.contains(Permission::EXECUTE);
match (r, w, x) {
(false, false, false) => PAGE_NOACCESS,
(true, false, false) => PAGE_READONLY,
(true, true, false) => PAGE_READWRITE,
(false, false, true) => PAGE_EXECUTE,
(true, false, true) => PAGE_EXECUTE_READ,
(true, true, true) => PAGE_EXECUTE_READWRITE,
_ => PAGE_NOACCESS,
}
}
fn nt_page_protection_to_permission(protect: u32) -> Permission {
const PAGE_READONLY: u32 = 0x02;
const PAGE_READWRITE: u32 = 0x04;
const PAGE_WRITECOPY: u32 = 0x08;
const PAGE_EXECUTE: u32 = 0x10;
const PAGE_EXECUTE_READ: u32 = 0x20;
const PAGE_EXECUTE_READWRITE: u32 = 0x40;
const PAGE_EXECUTE_WRITECOPY: u32 = 0x80;
let can_read = (protect
& (PAGE_READONLY
| PAGE_READWRITE
| PAGE_WRITECOPY
| PAGE_EXECUTE_READ
| PAGE_EXECUTE_READWRITE
| PAGE_EXECUTE_WRITECOPY))
!= 0;
let can_write = (protect
& (PAGE_READWRITE | PAGE_WRITECOPY | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY))
!= 0;
let can_execute = (protect
& (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY))
!= 0;
Permission::from_flags(can_read, can_write, can_execute)
}
fn align_up_page(size: u64) -> u64 {
const PAGE: u64 = 0x1000;
size.saturating_add(PAGE - 1) & !(PAGE - 1)
}
fn patch_ldr_heap_list_sentinel(emu: &mut Emu, base: u64, size: u64) {
if !emu.cfg.emulate_winapi {
return;
}
const OFF: u64 = 0xb000;
if size < OFF + 0x10 {
return;
}
let p = base.saturating_add(OFF);
if !emu.maps.is_mapped(p) || !emu.maps.is_mapped(p + 8) {
return;
}
let _ = emu.maps.write_qword(p, p);
let _ = emu.maps.write_qword(p + 8, p);
}
pub fn ntdll_heap_list_walk_fixup(emu: &mut Emu, ins: &Instruction, rip: u64) {
if !emu.cfg.emulate_winapi {
return;
}
let in_ntdll = emu
.maps
.get_mem_by_addr(rip)
.map(|m| {
let n = m.get_name();
n == "ntdll.pe" || n.starts_with("ntdll.")
})
.unwrap_or(false);
if !in_ntdll {
return;
}
if ins.mnemonic() != Mnemonic::Mov || ins.op_count() < 2 {
return;
}
if ins.op1_kind() != OpKind::Memory {
return;
}
if ins.memory_base() != Register::RSI || ins.memory_index() != Register::None {
return;
}
if ins.memory_displacement64() != 0 {
return;
}
let rsi = emu.regs().rsi;
if rsi < ALLOC64_MIN || rsi >= ALLOC64_MAX {
return;
}
if !emu.maps.is_mapped(rsi) || !emu.maps.is_mapped(rsi + 8) {
return;
}
if emu.maps.read_qword(rsi).unwrap_or(1) != 0
|| emu.maps.read_qword(rsi + 8).unwrap_or(1) != 0
{
return;
}
let next = rip.wrapping_add(ins.len() as u64);
let b0 = emu.maps.read_byte(next).unwrap_or(0);
let b1 = emu.maps.read_byte(next.wrapping_add(1)).unwrap_or(0);
let next_is_null_check = (b0 == 0x48 && b1 == 0x85) || b0 == 0x85 || (b0 == 0x48 && b1 == 0x83); if next_is_null_check {
return;
}
let _ = emu.maps.write_qword(rsi, rsi);
let _ = emu.maps.write_qword(rsi + 8, rsi);
}
fn is_current_process_handle(h: u64) -> bool {
h == !0 || h == 0xffff_ffff_ffff_fffe
}
pub fn nt_query_virtual_memory(emu: &mut Emu) {
let process_handle = emu.regs().rcx;
let base_address = emu.regs().rdx;
let memory_information_class = emu.regs().r8;
let memory_information = emu.regs().r9;
let rsp = emu.regs().rsp;
let memory_information_length = emu.maps.read_qword(rsp + 0x28).unwrap_or(0);
let return_length_ptr = emu.maps.read_qword(rsp + 0x30).unwrap_or(0);
log_orange!(
emu,
"syscall 0x{:x}: NtQueryVirtualMemory process: 0x{:x}, base: 0x{:x}, class: 0x{:x}, out: 0x{:x}, len: 0x{:x}, ret_len_ptr: 0x{:x}",
WIN64_NTQUERYVIRTUALMEMORY,
process_handle,
base_address,
memory_information_class,
memory_information,
memory_information_length,
return_length_ptr
);
if memory_information == 0 || !emu.maps.is_mapped(memory_information) {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
if memory_information_class == MEMORY_INFORMATION_CLASS_MEMORY_IMAGE_INFORMATION {
const MEMORY_IMAGE_INFO_SIZE: u64 = 0x18;
if memory_information_length < MEMORY_IMAGE_INFO_SIZE {
emu.regs_mut().rax = STATUS_INFO_LENGTH_MISMATCH;
return;
}
match emu.maps.find_pe_image_info(base_address) {
Some((image_base, size_of_image)) => {
emu.maps.write_qword(memory_information, image_base);
emu.maps.write_qword(memory_information + 8, size_of_image);
emu.maps.write_dword(memory_information + 16, 0);
if return_length_ptr != 0 {
emu.maps.write_qword(return_length_ptr, MEMORY_IMAGE_INFO_SIZE);
}
emu.regs_mut().rax = STATUS_SUCCESS;
}
None => {
emu.regs_mut().rax = STATUS_INVALID_ADDRESS;
}
}
return;
}
if memory_information_class != MEMORY_INFORMATION_CLASS_MEMORY_BASIC_INFORMATION
&& memory_information_class != MEMORY_INFORMATION_CLASS_MEMORY_PRIVILEGED_BASIC_INFORMATION
{
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
if memory_information_length < MemoryBasicInformation64::SIZE {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
let mem_info = if !emu.maps.is_mapped(base_address) {
let page_base = base_address & !0xFFF;
let region_size = emu
.maps
.next_mapped_addr(page_base)
.map(|next| next.saturating_sub(page_base))
.unwrap_or(0x1000);
MemoryBasicInformation64 {
base_address: page_base,
allocation_base: 0,
allocation_protect: 0,
partition_id: 0,
reserved: 0,
region_size,
state: MEM_FREE,
protect: PAGE_NOACCESS,
typ: 0,
}
} else {
let base = emu.maps.get_addr_base(base_address).unwrap_or(0);
let region_size = emu
.maps
.get_mem_by_addr(base_address)
.map(|m| m.size() as u64)
.unwrap_or(0);
let protect = emu
.maps
.get_mem_by_addr(base_address)
.map(|m| permission_to_nt_page_protection(m.permission()))
.unwrap_or(PAGE_READWRITE);
MemoryBasicInformation64 {
base_address: base,
allocation_base: base,
allocation_protect: protect,
partition_id: 0,
reserved: 0,
region_size,
state: MEM_COMMIT,
protect,
typ: MEM_PRIVATE,
}
};
mem_info.save(memory_information, &mut emu.maps);
if return_length_ptr != 0 {
if !emu
.maps
.write_qword(return_length_ptr, MemoryBasicInformation64::SIZE)
{
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
}
emu.regs_mut().rax = STATUS_SUCCESS;
}
pub fn nt_allocate_virtual_memory(emu: &mut Emu) {
let process_handle = emu.regs().rcx;
let base_ptr = emu.regs().rdx;
let _zero_bits = emu.regs().r8;
let region_sz_ptr = emu.regs().r9;
let rsp = emu.regs().rsp;
let alloc_type = emu.maps.read_dword(rsp + 0x28).unwrap_or(0);
let protect = emu.maps.read_dword(rsp + 0x30).unwrap_or(0);
let size_in = if region_sz_ptr != 0 && emu.maps.is_mapped(region_sz_ptr) {
emu.maps.read_qword(region_sz_ptr).unwrap_or(0)
} else {
0
};
let pref_in = if base_ptr != 0 && emu.maps.is_mapped(base_ptr) {
emu.maps.read_qword(base_ptr).unwrap_or(0)
} else {
0
};
log_orange!(
emu,
"syscall 0x{:x}: NtAllocateVirtualMemory rcx/h: 0x{:x} rdx/base_ptr: 0x{:x} *Base(in): 0x{:x} r9/region_sz_ptr: 0x{:x} *Size(in): 0x{:x} [rsp+28]/type: 0x{:x} [rsp+30]/prot: 0x{:x}",
WIN64_NTALLOCATEVIRTUALMEMORY,
process_handle,
base_ptr,
pref_in,
region_sz_ptr,
size_in,
alloc_type,
protect
);
if !is_current_process_handle(process_handle) {
emu.regs_mut().rax = STATUS_ACCESS_DENIED;
return;
}
if base_ptr == 0 || region_sz_ptr == 0 {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
let mut size = match emu.maps.read_qword(region_sz_ptr) {
Some(s) => s,
None => {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
};
if size == 0 {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
size = align_up_page(size);
let preferred_base = match emu.maps.read_qword(base_ptr) {
Some(b) => b,
None => {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
};
let mem_commit = (alloc_type & MEM_COMMIT) != 0;
let mem_reserve = (alloc_type & MEM_RESERVE) != 0;
if !mem_commit && !mem_reserve {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
let permission = nt_page_protection_to_permission(protect);
let base = if preferred_base == 0 {
let allocation = if mem_reserve && size >= 0x10000 {
alloc_64k_aligned(emu, size)
} else {
emu.maps.alloc(size)
};
match allocation {
Some(a) => a,
None => {
emu.regs_mut().rax = STATUS_NO_MEMORY;
return;
}
}
} else {
preferred_base
};
if !emu.maps.write_qword(base_ptr, base) {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
let name = format!("alloc_{:x}", base);
match emu.maps.create_map(&name, base, size, permission) {
Ok(_) => {}
Err(_) => {
if !emu.maps.is_mapped(base) {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
}
}
patch_ldr_heap_list_sentinel(emu, base, size);
if !emu.maps.write_qword(region_sz_ptr, size) {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
emu.regs_mut().rax = STATUS_SUCCESS;
}
fn alloc_64k_aligned(emu: &mut Emu, size: u64) -> Option<u64> {
const GRAN: u64 = 0x10000;
let probe = emu.maps.alloc(size + GRAN - 1)?;
let aligned = (probe + GRAN - 1) & !(GRAN - 1);
if aligned + size <= probe + size + GRAN - 1 && !emu.maps.overlaps(aligned, size) {
Some(aligned)
} else {
let mut candidate = (probe + GRAN - 1) & !(GRAN - 1);
for _ in 0..64 {
if !emu.maps.overlaps(candidate, size) {
return Some(candidate);
}
candidate += GRAN;
}
None
}
}
pub fn nt_allocate_virtual_memory_ex(emu: &mut Emu) {
let process_handle = emu.regs().rcx;
let base_ptr = emu.regs().rdx;
let region_sz_ptr = emu.regs().r8;
let alloc_type = emu.regs().r9 as u32;
let rsp = emu.regs().rsp;
let protect = emu.maps.read_dword(rsp + 0x28).unwrap_or(0);
let ext_count = emu.maps.read_qword(rsp + 0x38).unwrap_or(0);
let size_in = if region_sz_ptr != 0 && emu.maps.is_mapped(region_sz_ptr) {
emu.maps.read_qword(region_sz_ptr).unwrap_or(0)
} else {
0
};
let pref_in = if base_ptr != 0 && emu.maps.is_mapped(base_ptr) {
emu.maps.read_qword(base_ptr).unwrap_or(0)
} else {
0
};
log_orange!(
emu,
"syscall 0x{:x}: NtAllocateVirtualMemoryEx rcx/h: 0x{:x} rdx/base_ptr: 0x{:x} *Base(in): 0x{:x} r8/region_sz_ptr: 0x{:x} *Size(in): 0x{:x} r9/type: 0x{:x} [rsp+28]/prot: 0x{:x} [rsp+38]/ext_count: 0x{:x}",
WIN64_NTALLOCATEVIRTUALMEMORYEX,
process_handle,
base_ptr,
pref_in,
region_sz_ptr,
size_in,
alloc_type,
protect,
ext_count
);
if !is_current_process_handle(process_handle) {
emu.regs_mut().rax = STATUS_ACCESS_DENIED;
return;
}
if base_ptr == 0 || region_sz_ptr == 0 {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
let mut size = match emu.maps.read_qword(region_sz_ptr) {
Some(s) => s,
None => {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
};
if size == 0 {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
size = align_up_page(size);
let preferred_base = match emu.maps.read_qword(base_ptr) {
Some(b) => b,
None => {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
};
let mem_commit = (alloc_type & MEM_COMMIT) != 0;
let mem_reserve = (alloc_type & MEM_RESERVE) != 0;
if !mem_commit && !mem_reserve {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
let permission = nt_page_protection_to_permission(protect);
let base = if preferred_base == 0 {
let allocation = if mem_reserve && size >= 0x10000 {
alloc_64k_aligned(emu, size)
} else {
emu.maps.alloc(size)
};
match allocation {
Some(a) => a,
None => {
emu.regs_mut().rax = STATUS_NO_MEMORY;
return;
}
}
} else {
preferred_base
};
if !emu.maps.write_qword(base_ptr, base) {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
let name = format!("alloc_{:x}", base);
match emu.maps.create_map(&name, base, size, permission) {
Ok(_) => {}
Err(_) => {
if !emu.maps.is_mapped(base) {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
}
}
patch_ldr_heap_list_sentinel(emu, base, size);
if !emu.maps.write_qword(region_sz_ptr, size) {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
emu.regs_mut().rax = STATUS_SUCCESS;
}
pub fn nt_free_virtual_memory(emu: &mut Emu) {
let process_handle = emu.regs().rcx;
let base_ptr = emu.regs().rdx;
let region_sz_ptr = emu.regs().r8;
let free_type = emu.regs().r9 as u32;
let base_disp: String = if base_ptr == 0 {
"—".to_string()
} else if emu.maps.is_mapped(base_ptr) {
format!(
"0x{:x}",
emu.maps.read_qword(base_ptr).unwrap_or(0)
)
} else {
"? (unmapped rdx)".to_string()
};
let region_size_disp: String = if region_sz_ptr == 0 {
"— (r8=0)".to_string()
} else if emu.maps.is_mapped(region_sz_ptr) {
format!(
"0x{:x}",
emu.maps.read_qword(region_sz_ptr).unwrap_or(0)
)
} else {
"? (unmapped r8)".to_string()
};
log_orange!(
emu,
"syscall 0x{:x}: NtFreeVirtualMemory rcx/h: 0x{:x} rdx/base_ptr: 0x{:x} *BaseAddress: {} r8/region_sz_ptr: 0x{:x} *RegionSize: {} r9/free_type: 0x{:x}",
WIN64_NTFREEVIRTUALMEMORY,
process_handle,
base_ptr,
base_disp,
region_sz_ptr,
region_size_disp,
free_type
);
if !is_current_process_handle(process_handle) {
emu.regs_mut().rax = STATUS_ACCESS_DENIED;
return;
}
if base_ptr == 0 {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
let base = match emu.maps.read_qword(base_ptr) {
Some(b) => b,
None => {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
};
if (free_type & MEM_RELEASE) != 0 && (free_type & MEM_DECOMMIT) != 0 {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
if (free_type & MEM_RELEASE) != 0 {
let alloc_base = emu.maps.alloc_region_base_for_free(base);
let base_mapped = emu.maps.is_mapped(base);
let region_size = if region_sz_ptr != 0 {
emu.maps.read_qword(region_sz_ptr).unwrap_or(0)
} else {
0
};
let map_for_base = if base_mapped {
emu.maps.get_mem_by_addr(base).map(|m| (m.get_base(), m.size() as u64))
} else if let Some(ab) = alloc_base {
emu.maps.get_mem_by_addr(ab).map(|m| (m.get_base(), m.size() as u64))
} else {
None
};
let is_partial_release = emu.cfg.emulate_winapi
&& region_size != 0
&& match map_for_base {
Some((mb, msz)) => region_size < msz || base != mb,
None => false,
};
let released = if is_partial_release {
true
} else if base_mapped {
emu.maps.dealloc(base);
true
} else if let Some(ab) = alloc_base {
emu.maps.dealloc(ab);
true
} else {
false
};
if !released {
if emu.cfg.emulate_winapi
&& base >= ALLOC64_MIN
&& base < ALLOC64_MAX
{
emu.regs_mut().rax = STATUS_SUCCESS;
return;
}
emu.regs_mut().rax = STATUS_INVALID_ADDRESS;
return;
}
if region_sz_ptr != 0 {
let _ = emu.maps.write_qword(region_sz_ptr, 0);
}
emu.regs_mut().rax = STATUS_SUCCESS;
return;
}
if (free_type & MEM_DECOMMIT) != 0 {
let mut sz = if region_sz_ptr != 0 {
emu.maps.read_qword(region_sz_ptr).unwrap_or(0)
} else {
0
};
if !emu.maps.is_mapped(base) {
emu.regs_mut().rax = STATUS_INVALID_ADDRESS;
return;
}
if region_sz_ptr == 0 || sz == 0 {
sz = emu
.maps
.get_mem_by_addr(base)
.map(|m| {
let mb = m.get_base();
let top = mb.saturating_add(m.size() as u64);
top.saturating_sub(base)
})
.unwrap_or(0);
}
if sz == 0 {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
let cap = sz.min(0x1000_0000) as usize;
emu.maps.memset(base, 0, cap);
if region_sz_ptr != 0 {
let _ = emu.maps.write_qword(region_sz_ptr, cap as u64);
}
emu.regs_mut().rax = STATUS_SUCCESS;
return;
}
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
}
pub fn nt_protect_virtual_memory(emu: &mut Emu) {
let process_handle = emu.regs().rcx;
let base_ptr = emu.regs().rdx;
let region_sz_ptr = emu.regs().r8;
let new_protect = emu.regs().r9 as u32;
let rsp = emu.regs().rsp;
let old_protect_ptr = emu.maps.read_qword(rsp + 0x28).unwrap_or(0);
log_orange!(
emu,
"syscall 0x{:x}: NtProtectVirtualMemory h: 0x{:x} base_ptr: 0x{:x} new_prot: 0x{:x}",
WIN64_NTPROTECTVIRTUALMEMORY,
process_handle,
base_ptr,
new_protect
);
if !is_current_process_handle(process_handle) {
emu.regs_mut().rax = STATUS_ACCESS_DENIED;
return;
}
if base_ptr == 0 || region_sz_ptr == 0 {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
let base = emu.maps.read_qword(base_ptr).unwrap_or(0);
let _region_sz = emu.maps.read_qword(region_sz_ptr).unwrap_or(0);
let old_protect = if let Some(mem) = emu.maps.get_mem_by_addr(base) {
permission_to_nt_page_protection(mem.permission())
} else {
0x04 };
if old_protect_ptr != 0 {
let _ = emu.maps.write_dword(old_protect_ptr as u64, old_protect);
}
if let Some(mem) = emu.maps.get_mem_by_addr_mut(base) {
mem.set_permission(nt_page_protection_to_permission(new_protect));
}
emu.regs_mut().rax = STATUS_SUCCESS;
}
pub fn nt_read_virtual_memory(emu: &mut Emu) {
let process_handle = emu.regs().rcx;
let base = emu.regs().rdx;
let buffer = emu.regs().r8;
let size = emu.regs().r9;
let rsp = emu.regs().rsp;
let bytes_read_ptr = emu.maps.read_qword(rsp + 0x28).unwrap_or(0);
log_orange!(
emu,
"syscall 0x{:x}: NtReadVirtualMemory h: 0x{:x} from: 0x{:x} to: 0x{:x} len: 0x{:x}",
WIN64_NTREADVIRTUALMEMORY,
process_handle,
base,
buffer,
size
);
if !is_current_process_handle(process_handle) {
emu.regs_mut().rax = STATUS_ACCESS_DENIED;
return;
}
if buffer == 0 || size == 0 {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
let sz = size.min(usize::MAX as u64) as usize;
let data = match emu.maps.try_read_bytes(base, sz) {
Some(s) => s.to_vec(),
None => {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
};
if !emu.maps.write_bytes(buffer, &data) {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
if bytes_read_ptr != 0 {
let _ = emu.maps.write_qword(bytes_read_ptr, data.len() as u64);
}
emu.regs_mut().rax = STATUS_SUCCESS;
}
pub fn nt_write_virtual_memory(emu: &mut Emu) {
let process_handle = emu.regs().rcx;
let base = emu.regs().rdx;
let buffer = emu.regs().r8;
let size = emu.regs().r9;
let rsp = emu.regs().rsp;
let bytes_written_ptr = emu.maps.read_qword(rsp + 0x28).unwrap_or(0);
log_orange!(
emu,
"syscall 0x{:x}: NtWriteVirtualMemory h: 0x{:x} to: 0x{:x} from: 0x{:x} len: 0x{:x}",
WIN64_NTWRITEVIRTUALMEMORY,
process_handle,
base,
buffer,
size
);
if !is_current_process_handle(process_handle) {
emu.regs_mut().rax = STATUS_ACCESS_DENIED;
return;
}
if buffer == 0 || size == 0 {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
let sz = size.min(usize::MAX as u64) as usize;
let data = match emu.maps.try_read_bytes(buffer, sz) {
Some(s) => s.to_vec(),
None => {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
};
if !emu.maps.write_bytes(base, &data) {
emu.regs_mut().rax = STATUS_ACCESS_VIOLATION;
return;
}
if bytes_written_ptr != 0 {
let _ = emu.maps.write_qword(bytes_written_ptr, data.len() as u64);
}
emu.regs_mut().rax = STATUS_SUCCESS;
}
pub fn nt_unmap_view_of_section(emu: &mut Emu) {
let process_handle = emu.regs().rcx;
let base = emu.regs().rdx;
log_orange!(
emu,
"syscall 0x{:x}: NtUnmapViewOfSection h: 0x{:x} base: 0x{:x}",
WIN64_NTUNMAPVIEWOFSECTION,
process_handle,
base
);
if !is_current_process_handle(process_handle) {
emu.regs_mut().rax = STATUS_ACCESS_DENIED;
return;
}
emu.regs_mut().rax = STATUS_SUCCESS;
}
pub fn nt_map_view_of_section(emu: &mut Emu) {
let section_handle = emu.regs().rcx;
let process_handle = emu.regs().rdx;
let base_addr_ptr = emu.regs().r8;
let rsp = emu.regs().rsp;
let view_size_ptr = emu.maps.read_qword(rsp + 0x38).unwrap_or(0);
let protect = emu.maps.read_dword(rsp + 0x50).unwrap_or(4);
let requested_base = if base_addr_ptr != 0 && emu.maps.is_mapped(base_addr_ptr) {
emu.maps.read_qword(base_addr_ptr).unwrap_or(0)
} else {
0
};
let view_size = if view_size_ptr != 0 && emu.maps.is_mapped(view_size_ptr) {
emu.maps.read_qword(view_size_ptr).unwrap_or(0x1000)
} else {
0x1000
};
let size = if view_size == 0 { 0x1000 } else { (view_size + 0xfff) & !0xfff };
log_orange!(
emu,
"syscall 0x{:x}: NtMapViewOfSection base_ptr: 0x{:x} req_base: 0x{:x} size: 0x{:x} prot: 0x{:x}",
WIN64_NTMAPVIEWOFSECTION,
base_addr_ptr,
requested_base,
size,
protect,
);
if !is_current_process_handle(process_handle) {
emu.regs_mut().rax = STATUS_ACCESS_DENIED;
return;
}
if let Some(dll_name) = emu.section_handles.get(§ion_handle).cloned() {
let dll_base = crate::api::windows::winapi64::kernel32::load_library(emu, &dll_name);
if dll_base != 0 {
log::trace!(
"NtMapViewOfSection: KnownDll {} loaded at 0x{:x}",
dll_name, dll_base
);
if base_addr_ptr != 0 && emu.maps.is_mapped(base_addr_ptr) {
let _ = emu.maps.write_qword(base_addr_ptr, dll_base);
}
let size_of_image: u64 = {
let pe_off = emu.maps.read_dword(dll_base + 0x3c).unwrap_or(0) as u64;
if pe_off > 0 {
emu.maps.read_dword(dll_base + pe_off + 0x50).unwrap_or(0x1000) as u64
} else {
0x1000
}
};
if view_size_ptr != 0 && emu.maps.is_mapped(view_size_ptr) {
let _ = emu.maps.write_qword(view_size_ptr, size_of_image);
}
emu.regs_mut().rax = STATUS_SUCCESS;
return;
}
}
let perm = nt_page_protection_to_permission(protect);
let mapped_base = if requested_base >= 0x10000 && !emu.maps.is_mapped(requested_base) {
let name = format!("section_view_{:x}", requested_base);
match emu.maps.create_map(&name, requested_base, size, perm) {
Ok(_) => requested_base,
Err(_) => {
let base = emu.maps.lib64_alloc(size).unwrap_or(0);
if base != 0 {
let name2 = format!("section_view_{:x}", base);
let _ = emu.maps.create_map(&name2, base, size, perm);
}
base
}
}
} else {
let base = emu.maps.lib64_alloc(size).unwrap_or(0);
if base != 0 {
let name = format!("section_view_{:x}", base);
emu.maps.create_map(&name, base, size, perm);
}
base
};
if mapped_base == 0 {
emu.regs_mut().rax = STATUS_NO_MEMORY;
return;
}
if base_addr_ptr != 0 && emu.maps.is_mapped(base_addr_ptr) {
let _ = emu.maps.write_qword(base_addr_ptr, mapped_base);
}
if view_size_ptr != 0 && emu.maps.is_mapped(view_size_ptr) {
let _ = emu.maps.write_qword(view_size_ptr, size);
}
emu.regs_mut().rax = STATUS_SUCCESS;
}
pub fn nt_allocate_user_physical_pages_ex(emu: &mut Emu) {
let process_handle = emu.regs().rcx;
let num_pages_ptr = emu.regs().rdx;
let _pfn_array = emu.regs().r8;
let num_pages = emu.maps.read_qword(num_pages_ptr).unwrap_or(0);
log_orange!(
emu,
"syscall 0x{:x}: NtAllocateUserPhysicalPagesEx h: 0x{:x} num_pages: {}",
WIN64_NTALLOCATEUSERPHYSICALPAGESEX,
process_handle,
num_pages
);
if num_pages_ptr != 0 && emu.maps.is_mapped(num_pages_ptr) {
let _ = emu.maps.write_qword(num_pages_ptr, 0);
}
emu.regs_mut().rax = STATUS_SUCCESS;
}
pub fn nt_open_section(emu: &mut Emu) {
let handle_out = emu.regs().rcx;
let desired_access = emu.regs().rdx;
let obj_attr = emu.regs().r8;
let section_name = read_object_attributes_name(emu, obj_attr);
log_orange!(
emu,
"syscall 0x{:x}: NtOpenSection handle_out: 0x{:x}, access: 0x{:x}, obj_attr: 0x{:x} name: {:?}",
WIN64_NTOPENSECTION,
handle_out,
desired_access,
obj_attr,
section_name,
);
if handle_out == 0 || !emu.maps.is_mapped(handle_out) {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
let root_dir = read_object_attributes_root_directory(emu, obj_attr);
let is_known_dll_dir = root_dir != 0 && emu.known_dll_dir_handles.contains(&root_dir);
let dll_name = if is_known_dll_dir {
if !section_name.is_empty() {
Some(section_name.to_lowercase())
} else {
None
}
} else {
extract_known_dll_name(§ion_name)
};
if is_known_dll_dir && dll_name.is_none() {
const FALLBACK_BUDGET: usize = 0;
if emu.section_handles.values().filter(|n| *n == "kernelbase.dll").count() < FALLBACK_BUDGET {
let h = crate::syscall::windows::syscall64::sync::next_handle();
let _ = emu.maps.write_qword(handle_out, h);
log::trace!(
"NtOpenSection: empty-name KnownDll → handle 0x{:x} -> kernelbase.dll (api-set fallback)",
h
);
emu.section_handles.insert(h, "kernelbase.dll".to_string());
emu.regs_mut().rax = STATUS_SUCCESS;
return;
}
emu.regs_mut().rax = STATUS_OBJECT_NAME_NOT_FOUND;
return;
}
let h = crate::syscall::windows::syscall64::sync::next_handle();
let _ = emu.maps.write_qword(handle_out, h);
if let Some(dll_name) = dll_name {
log::trace!("NtOpenSection: tracking KnownDll handle 0x{:x} -> {}", h, dll_name);
emu.section_handles.insert(h, dll_name);
}
emu.regs_mut().rax = STATUS_SUCCESS;
}
fn extract_known_dll_name(path: &str) -> Option<String> {
let lower = path.to_lowercase();
let prefix = if lower.starts_with("\\knowndlls32\\") {
"\\knowndlls32\\"
} else if lower.starts_with("\\knowndlls\\") {
"\\knowndlls\\"
} else {
return None;
};
let rest = &path[prefix.len()..];
if rest.is_empty() {
return None;
}
Some(rest.to_lowercase())
}
fn read_unicode_string(emu: &Emu, addr: u64) -> String {
if addr == 0 || !emu.maps.is_mapped(addr) {
return String::new();
}
let _len = emu.maps.read_word(addr).unwrap_or(0);
let buf = emu.maps.read_qword(addr + 8).unwrap_or(0);
if buf == 0 || !emu.maps.is_mapped(buf) {
return String::new();
}
emu.maps.read_wide_string(buf)
}
fn read_object_attributes_name(emu: &Emu, addr: u64) -> String {
if addr == 0 || !emu.maps.is_mapped(addr) {
return String::new();
}
let object_name_ptr = emu.maps.read_qword(addr + 0x10).unwrap_or(0);
read_unicode_string(emu, object_name_ptr)
}
fn read_object_attributes_root_directory(emu: &Emu, addr: u64) -> u64 {
if addr == 0 || !emu.maps.is_mapped(addr) {
return 0;
}
emu.maps.read_qword(addr + 0x08).unwrap_or(0)
}
pub fn nt_create_section(emu: &mut Emu) {
let handle_out = emu.regs().rcx;
let desired_access = emu.regs().rdx;
let object_attributes = emu.regs().r8;
let max_size = emu.regs().r9;
let rsp = emu.regs().rsp;
let page_protection = emu.maps.read_dword(rsp + 0x28).unwrap_or(0);
let alloc_attributes = emu.maps.read_dword(rsp + 0x30).unwrap_or(0);
let file_handle = emu.maps.read_qword(rsp + 0x38).unwrap_or(0);
log_orange!(
emu,
"syscall 0x{:x}: NtCreateSection handle_out: 0x{:x}, access: 0x{:x}, max_size: 0x{:x}, prot: 0x{:x}, alloc: 0x{:x}, file: 0x{:x}",
WIN64_NTCREATESECTION,
handle_out,
desired_access,
max_size,
page_protection,
alloc_attributes,
file_handle,
);
if handle_out == 0 || !emu.maps.is_mapped(handle_out) {
emu.regs_mut().rax = STATUS_INVALID_PARAMETER;
return;
}
let h = crate::syscall::windows::syscall64::sync::next_handle();
let _ = emu.maps.write_qword(handle_out, h);
emu.regs_mut().rax = STATUS_SUCCESS;
}