use core::ptr;
use core::sync::atomic::{AtomicU32, AtomicU64, Ordering};
use wdk::println;
use wdk_sys::{
ntddk::{
ExAllocatePool2, ExFreePoolWithTag, IoAllocateMdl, IoFreeMdl,
MmBuildMdlForNonPagedPool, MmMapLockedPagesSpecifyCache,
MmUnmapLockedPages, KeInitializeEvent, KeSetEvent,
},
KEVENT, NTSTATUS, PMDL, PVOID,
STATUS_INSUFFICIENT_RESOURCES, POOL_FLAG_NON_PAGED,
};
const STATUS_NOT_INITIALIZED: NTSTATUS = -1073741809i32;
const COMM_POOL_TAG: u32 = u32::from_le_bytes(*b"COMM");
pub const DEFAULT_RING_SIZE: usize = 1024 * 1024;
pub const MAX_EVENT_SIZE: usize = 4096;
#[repr(C)]
pub struct RingBufferHeader {
pub magic: u32,
pub version: u32,
pub buffer_size: u32,
pub write_pos: AtomicU32,
pub read_pos: AtomicU32,
pub events_written: AtomicU64,
pub events_read: AtomicU64,
pub events_dropped: AtomicU64,
pub reserved: [u32; 8],
}
pub const RING_MAGIC: u32 = 0x4C455654;
pub const RING_VERSION: u32 = 1;
impl RingBufferHeader {
pub fn init(&mut self, buffer_size: u32) {
self.magic = RING_MAGIC;
self.version = RING_VERSION;
self.buffer_size = buffer_size;
self.write_pos = AtomicU32::new(0);
self.read_pos = AtomicU32::new(0);
self.events_written = AtomicU64::new(0);
self.events_read = AtomicU64::new(0);
self.events_dropped = AtomicU64::new(0);
self.reserved = [0; 8];
}
pub fn is_valid(&self) -> bool {
self.magic == RING_MAGIC && self.version == RING_VERSION
}
pub fn available_write_space(&self) -> u32 {
let write = self.write_pos.load(Ordering::Acquire);
let read = self.read_pos.load(Ordering::Acquire);
if write >= read {
self.buffer_size - (write - read) - 1
} else {
read - write - 1
}
}
pub fn available_read_data(&self) -> u32 {
let write = self.write_pos.load(Ordering::Acquire);
let read = self.read_pos.load(Ordering::Acquire);
if write >= read {
write - read
} else {
self.buffer_size - read + write
}
}
}
#[repr(C)]
pub struct EventHeader {
pub event_type: u32,
pub size: u32,
pub timestamp: u64,
pub pid: u32,
pub tid: u32,
}
#[repr(u32)]
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum EventType {
ProcessCreate = 1,
ProcessExit = 2,
ThreadCreate = 3,
ThreadExit = 4,
ImageLoad = 5,
RegistryOp = 6,
FileOp = 7,
NetworkOp = 8,
Alert = 100,
Status = 200,
}
#[repr(C)]
pub struct ProcessEvent {
pub header: EventHeader,
pub process_id: u32,
pub parent_id: u32,
pub creating_pid: u32,
pub creating_tid: u32,
pub session_id: u32,
pub is_wow64: u8,
pub reserved: [u8; 3],
pub image_path_len: u16,
pub command_line_len: u16,
}
#[repr(C)]
pub struct ThreadEvent {
pub header: EventHeader,
pub thread_id: u32,
pub process_id: u32,
pub start_address: u64,
pub is_remote: u8,
pub reserved: [u8; 7],
}
#[repr(C)]
pub struct ImageEvent {
pub header: EventHeader,
pub process_id: u32,
pub image_base: u64,
pub image_size: u64,
pub is_kernel: u8,
pub reserved: [u8; 3],
pub path_len: u16,
pub reserved2: u16,
}
pub struct SharedChannel {
kernel_addr: PVOID,
alloc_size: usize,
mdl: PMDL,
user_addr: PVOID,
event: *mut KEVENT,
initialized: bool,
}
impl SharedChannel {
pub fn new() -> Self {
Self {
kernel_addr: ptr::null_mut(),
alloc_size: 0,
mdl: ptr::null_mut(),
user_addr: ptr::null_mut(),
event: ptr::null_mut(),
initialized: false,
}
}
pub unsafe fn initialize(&mut self, size: usize) -> Result<(), NTSTATUS> {
let total_size = core::mem::size_of::<RingBufferHeader>() + size;
let buffer = unsafe {
ExAllocatePool2(
POOL_FLAG_NON_PAGED,
total_size as u64,
COMM_POOL_TAG,
)
};
if buffer.is_null() {
println!("[Leviathan] Failed to allocate ring buffer");
return Err(STATUS_INSUFFICIENT_RESOURCES);
}
unsafe {
ptr::write_bytes(buffer as *mut u8, 0, total_size);
}
let header = buffer as *mut RingBufferHeader;
unsafe {
(*header).init(size as u32);
}
let mdl = unsafe {
IoAllocateMdl(
buffer,
total_size as u32,
0, 0, ptr::null_mut(),
)
};
if mdl.is_null() {
unsafe { ExFreePoolWithTag(buffer, COMM_POOL_TAG) };
println!("[Leviathan] Failed to allocate MDL");
return Err(STATUS_INSUFFICIENT_RESOURCES);
}
unsafe { MmBuildMdlForNonPagedPool(mdl) };
let event_size = core::mem::size_of::<KEVENT>();
let event = unsafe {
ExAllocatePool2(
POOL_FLAG_NON_PAGED,
event_size as u64,
COMM_POOL_TAG,
)
} as *mut KEVENT;
if event.is_null() {
unsafe {
IoFreeMdl(mdl);
ExFreePoolWithTag(buffer, COMM_POOL_TAG);
}
return Err(STATUS_INSUFFICIENT_RESOURCES);
}
unsafe {
KeInitializeEvent(event, wdk_sys::_EVENT_TYPE::SynchronizationEvent, 0);
}
self.kernel_addr = buffer;
self.alloc_size = total_size;
self.mdl = mdl;
self.event = event;
self.initialized = true;
println!(
"[Leviathan] Shared channel initialized: {}KB buffer at {:p}",
total_size / 1024,
buffer
);
Ok(())
}
pub unsafe fn map_to_user(&mut self) -> Result<PVOID, NTSTATUS> {
if !self.initialized || self.mdl.is_null() {
return Err(STATUS_NOT_INITIALIZED);
}
let user_addr = unsafe {
MmMapLockedPagesSpecifyCache(
self.mdl,
wdk_sys::_MODE::KernelMode as wdk_sys::KPROCESSOR_MODE,
wdk_sys::_MEMORY_CACHING_TYPE::MmCached,
ptr::null_mut(),
0, 16, )
};
if user_addr.is_null() {
return Err(wdk_sys::STATUS_INSUFFICIENT_RESOURCES);
}
self.user_addr = user_addr;
println!("[Leviathan] Buffer mapped to user-mode at {:p}", user_addr);
Ok(user_addr)
}
pub unsafe fn unmap_from_user(&mut self) {
if !self.user_addr.is_null() && !self.mdl.is_null() {
unsafe { MmUnmapLockedPages(self.user_addr, self.mdl) };
self.user_addr = ptr::null_mut();
}
}
fn get_header(&self) -> Option<&mut RingBufferHeader> {
if self.kernel_addr.is_null() {
return None;
}
Some(unsafe { &mut *(self.kernel_addr as *mut RingBufferHeader) })
}
fn get_buffer(&self) -> Option<*mut u8> {
if self.kernel_addr.is_null() {
return None;
}
Some(unsafe {
(self.kernel_addr as *mut u8)
.add(core::mem::size_of::<RingBufferHeader>())
})
}
pub unsafe fn write_event(&self, event_type: EventType, data: &[u8]) -> Result<(), NTSTATUS> {
let header = match self.get_header() {
Some(h) => h,
None => return Err(STATUS_NOT_INITIALIZED),
};
let buffer = match self.get_buffer() {
Some(b) => b,
None => return Err(STATUS_NOT_INITIALIZED),
};
let event_size = core::mem::size_of::<EventHeader>() + data.len();
if header.available_write_space() < event_size as u32 {
header.events_dropped.fetch_add(1, Ordering::Relaxed);
return Err(wdk_sys::STATUS_BUFFER_OVERFLOW);
}
let write_pos = header.write_pos.load(Ordering::Acquire) as usize;
let buffer_size = header.buffer_size as usize;
let event_header = EventHeader {
event_type: event_type as u32,
size: event_size as u32,
timestamp: get_timestamp(),
pid: get_current_pid(),
tid: get_current_tid(),
};
let header_bytes = unsafe {
core::slice::from_raw_parts(
&event_header as *const _ as *const u8,
core::mem::size_of::<EventHeader>(),
)
};
let mut pos = write_pos;
for &byte in header_bytes {
unsafe { *buffer.add(pos % buffer_size) = byte };
pos += 1;
}
for &byte in data {
unsafe { *buffer.add(pos % buffer_size) = byte };
pos += 1;
}
header.write_pos.store((pos % buffer_size) as u32, Ordering::Release);
header.events_written.fetch_add(1, Ordering::Relaxed);
if !self.event.is_null() {
unsafe { KeSetEvent(self.event, 0, 0) };
}
Ok(())
}
pub fn get_stats(&self) -> Option<ChannelStats> {
let header = self.get_header()?;
Some(ChannelStats {
events_written: header.events_written.load(Ordering::Relaxed),
events_read: header.events_read.load(Ordering::Relaxed),
events_dropped: header.events_dropped.load(Ordering::Relaxed),
buffer_used: header.available_read_data(),
buffer_size: header.buffer_size,
})
}
}
impl Drop for SharedChannel {
fn drop(&mut self) {
unsafe {
self.unmap_from_user();
if !self.mdl.is_null() {
IoFreeMdl(self.mdl);
}
if !self.event.is_null() {
ExFreePoolWithTag(self.event as PVOID, COMM_POOL_TAG);
}
if !self.kernel_addr.is_null() {
ExFreePoolWithTag(self.kernel_addr, COMM_POOL_TAG);
}
}
println!("[Leviathan] Shared channel destroyed");
}
}
#[derive(Debug, Clone)]
pub struct ChannelStats {
pub events_written: u64,
pub events_read: u64,
pub events_dropped: u64,
pub buffer_used: u32,
pub buffer_size: u32,
}
fn get_timestamp() -> u64 {
0
}
fn get_current_pid() -> u32 {
0
}
fn get_current_tid() -> u32 {
0
}
pub mod ioctl {
use wdk_sys::{FILE_ANY_ACCESS, FILE_DEVICE_UNKNOWN, METHOD_BUFFERED};
const IOCTL_BASE: u32 = 0x800;
const fn ctl_code(function: u32, method: u32, access: u32) -> u32 {
(FILE_DEVICE_UNKNOWN << 16) | (access << 14) | (function << 2) | method
}
pub const IOCTL_GET_VERSION: u32 = ctl_code(IOCTL_BASE + 0, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_GET_BUFFER: u32 = ctl_code(IOCTL_BASE + 1, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_SET_CALLBACKS: u32 = ctl_code(IOCTL_BASE + 2, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_PROTECT_PROCESS: u32 = ctl_code(IOCTL_BASE + 3, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_UNPROTECT_PROCESS: u32 = ctl_code(IOCTL_BASE + 4, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_SCAN_PROCESS: u32 = ctl_code(IOCTL_BASE + 5, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_GET_STATS: u32 = ctl_code(IOCTL_BASE + 6, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_ENUM_HIDDEN: u32 = ctl_code(IOCTL_BASE + 7, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_SCAN_HOOKS: u32 = ctl_code(IOCTL_BASE + 8, METHOD_BUFFERED, FILE_ANY_ACCESS);
}
static mut GLOBAL_CHANNEL: Option<SharedChannel> = None;
pub unsafe fn init_global_channel(size: usize) -> Result<(), NTSTATUS> {
let mut channel = SharedChannel::new();
unsafe { channel.initialize(size)? };
unsafe { GLOBAL_CHANNEL = Some(channel) };
Ok(())
}
pub unsafe fn get_global_channel() -> Option<&'static SharedChannel> {
#[allow(static_mut_refs)]
unsafe { GLOBAL_CHANNEL.as_ref() }
}
pub unsafe fn cleanup_global_channel() {
unsafe { GLOBAL_CHANNEL = None };
}