use core::ptr;
use wdk::println;
use wdk_sys::{
ntddk::{
ExAllocatePool2, ExFreePoolWithTag,
IoAllocateMdl, IoFreeMdl, MmProbeAndLockPages, MmUnlockPages,
MmGetSystemAddressForMdlSafe, MmBuildMdlForNonPagedPool,
ProbeForRead, ProbeForWrite,
},
POOL_FLAG_NON_PAGED, POOL_FLAG_PAGED, POOL_FLAG_NON_PAGED_EXECUTE,
PMDL, PVOID, ULONG, LOCK_OPERATION, MM_PAGE_PRIORITY,
};
pub const POOL_TAG: u32 = u32::from_le_bytes(*b"LVTN");
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PoolType {
NonPaged,
Paged,
NonPagedNx,
}
impl PoolType {
fn to_flags(self) -> u64 {
match self {
PoolType::NonPaged => POOL_FLAG_NON_PAGED as u64,
PoolType::Paged => POOL_FLAG_PAGED as u64,
PoolType::NonPagedNx => (POOL_FLAG_NON_PAGED | POOL_FLAG_NON_PAGED_EXECUTE) as u64,
}
}
}
pub struct PoolAllocation {
ptr: PVOID,
size: usize,
pool_type: PoolType,
}
impl PoolAllocation {
pub unsafe fn new(size: usize, pool_type: PoolType) -> Option<Self> {
if size == 0 {
return None;
}
let ptr = unsafe {
ExAllocatePool2(pool_type.to_flags(), size as u64, POOL_TAG)
};
if ptr.is_null() {
println!("[Leviathan] Pool allocation failed: size={}", size);
return None;
}
unsafe {
ptr::write_bytes(ptr as *mut u8, 0, size);
}
Some(Self { ptr, size, pool_type })
}
pub fn as_ptr(&self) -> PVOID {
self.ptr
}
pub fn as_mut_ptr(&mut self) -> PVOID {
self.ptr
}
pub fn size(&self) -> usize {
self.size
}
pub unsafe fn as_typed<T>(&self) -> *const T {
self.ptr as *const T
}
pub unsafe fn as_typed_mut<T>(&mut self) -> *mut T {
self.ptr as *mut T
}
pub unsafe fn as_slice(&self) -> &[u8] {
unsafe { core::slice::from_raw_parts(self.ptr as *const u8, self.size) }
}
pub unsafe fn as_slice_mut(&mut self) -> &mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self.ptr as *mut u8, self.size) }
}
}
impl Drop for PoolAllocation {
fn drop(&mut self) {
if !self.ptr.is_null() {
unsafe {
ptr::write_bytes(self.ptr as *mut u8, 0, self.size);
ExFreePoolWithTag(self.ptr, POOL_TAG);
}
}
}
}
pub struct Mdl {
mdl: PMDL,
locked: bool,
system_address: PVOID,
}
impl Mdl {
pub unsafe fn new(
virtual_address: PVOID,
length: usize,
secondary_buffer: bool,
charge_quota: bool,
) -> Option<Self> {
let mdl = unsafe {
IoAllocateMdl(
virtual_address,
length as u32,
secondary_buffer as u8,
charge_quota as u8,
ptr::null_mut(),
)
};
if mdl.is_null() {
return None;
}
Some(Self {
mdl,
locked: false,
system_address: ptr::null_mut(),
})
}
pub unsafe fn lock_pages(&mut self, operation: LOCK_OPERATION) -> Result<(), ()> {
if self.locked {
return Ok(());
}
unsafe {
MmProbeAndLockPages(
self.mdl,
wdk_sys::MODE::KernelMode as i8,
operation,
);
}
self.locked = true;
Ok(())
}
pub unsafe fn get_system_address(&mut self, priority: MM_PAGE_PRIORITY) -> Option<PVOID> {
if !self.locked {
return None;
}
if self.system_address.is_null() {
self.system_address = unsafe {
MmGetSystemAddressForMdlSafe(self.mdl, priority as u32)
};
}
if self.system_address.is_null() {
None
} else {
Some(self.system_address)
}
}
pub unsafe fn build_for_nonpaged(&mut self) {
unsafe { MmBuildMdlForNonPagedPool(self.mdl) };
self.locked = true;
}
pub fn as_raw(&self) -> PMDL {
self.mdl
}
}
impl Drop for Mdl {
fn drop(&mut self) {
if self.locked && !self.mdl.is_null() {
unsafe { MmUnlockPages(self.mdl) };
}
if !self.mdl.is_null() {
unsafe { IoFreeMdl(self.mdl) };
}
}
}
pub unsafe fn probe_user_buffer(
buffer: PVOID,
length: usize,
alignment: u32,
for_write: bool,
) -> Result<(), ()> {
if buffer.is_null() || length == 0 {
return Err(());
}
if for_write {
unsafe { ProbeForWrite(buffer, length as u64, alignment) };
} else {
unsafe { ProbeForRead(buffer, length as u64, alignment) };
}
Ok(())
}
pub unsafe fn copy_from_user(
kernel_buffer: &mut [u8],
user_buffer: PVOID,
length: usize,
) -> Result<usize, ()> {
if user_buffer.is_null() || length == 0 {
return Err(());
}
let copy_len = core::cmp::min(length, kernel_buffer.len());
unsafe {
ProbeForRead(user_buffer, copy_len as u64, 1);
ptr::copy_nonoverlapping(
user_buffer as *const u8,
kernel_buffer.as_mut_ptr(),
copy_len,
);
}
Ok(copy_len)
}
pub unsafe fn copy_to_user(
user_buffer: PVOID,
kernel_buffer: &[u8],
length: usize,
) -> Result<usize, ()> {
if user_buffer.is_null() || length == 0 {
return Err(());
}
let copy_len = core::cmp::min(length, kernel_buffer.len());
unsafe {
ProbeForWrite(user_buffer, copy_len as u64, 1);
ptr::copy_nonoverlapping(
kernel_buffer.as_ptr(),
user_buffer as *mut u8,
copy_len,
);
}
Ok(copy_len)
}
pub fn secure_zero(buffer: &mut [u8]) {
for byte in buffer.iter_mut() {
unsafe {
ptr::write_volatile(byte, 0);
}
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
pub struct LookasideList {
entry_size: usize,
pool_type: PoolType,
}
impl LookasideList {
pub unsafe fn new(entry_size: usize, pool_type: PoolType) -> Self {
Self { entry_size, pool_type }
}
pub unsafe fn allocate(&self) -> Option<PVOID> {
PoolAllocation::new(self.entry_size, self.pool_type)
.map(|alloc| {
let ptr = alloc.as_ptr();
core::mem::forget(alloc); ptr
})
}
pub unsafe fn free(&self, entry: PVOID) {
if !entry.is_null() {
unsafe { ExFreePoolWithTag(entry, POOL_TAG) };
}
}
}