leviathan-driver 0.2.0

Windows kernel-mode EDR/XDR driver framework in Rust - callbacks, filters, detection, forensics
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
//! Kernel Synchronization Primitives
//!
//! Provides thread-safe synchronization for kernel-mode code:
//! - Spinlocks (DISPATCH_LEVEL)
//! - Fast Mutex (APC_LEVEL)
//! - Executive Resources (Read/Write locks)
//! - Event objects (signaling)
//! - Interlocked operations
//!
//! # IRQL Rules
//! - Spinlock: Raises to DISPATCH_LEVEL, cannot page fault
//! - Fast Mutex: Raises to APC_LEVEL, can access paged memory
//! - ERESOURCE: Can be acquired shared (read) or exclusive (write)
//! - Event: Wait lowers to PASSIVE_LEVEL

use core::cell::UnsafeCell;
use core::sync::atomic::{AtomicU32, Ordering};
use wdk_sys::{
    ntddk::{
        KeInitializeSpinLock, KeAcquireSpinLockRaiseToDpc, KeReleaseSpinLock,
        ExAcquireFastMutex, ExReleaseFastMutex,
        ExInitializeResourceLite, ExAcquireResourceExclusiveLite,
        ExAcquireResourceSharedLite, ExReleaseResourceLite, ExDeleteResourceLite,
        KeInitializeEvent, KeSetEvent, KeClearEvent, KeWaitForSingleObject,
    },
    KSPIN_LOCK, KIRQL, FAST_MUTEX, ERESOURCE, KEVENT,
    LARGE_INTEGER,
};

// ExInitializeFastMutex is not exported in wdk-sys 0.5.
// It's a macro in the WDK that expands to initialize the FAST_MUTEX structure.
// We implement it manually by zeroing the structure.
/// Initialize a FAST_MUTEX structure
///
/// # Safety
/// Must be called at IRQL <= DISPATCH_LEVEL
pub unsafe fn ex_initialize_fast_mutex(mutex: &mut FAST_MUTEX) {
    // FAST_MUTEX initialization - zero the struct and set Count to 1
    unsafe {
        core::ptr::write_bytes(mutex as *mut FAST_MUTEX as *mut u8, 0, core::mem::size_of::<FAST_MUTEX>());
    }
    // The Count field should be set to 1 (available)
    // On x64, Count is the first field of FAST_MUTEX
    unsafe {
        let count_ptr = mutex as *mut FAST_MUTEX as *mut i32;
        *count_ptr = 1;
    }
}

/// Spinlock wrapper for kernel synchronization
///
/// Spinlocks are the fastest synchronization primitive but have restrictions:
/// - Cannot access paged memory while held
/// - Cannot call functions that may page fault
/// - Should be held for very short periods
pub struct SpinLock {
    lock: UnsafeCell<KSPIN_LOCK>,
}

// Safety: SpinLock is designed for multi-threaded kernel access
unsafe impl Send for SpinLock {}
unsafe impl Sync for SpinLock {}

impl SpinLock {
    /// Create a new spinlock
    ///
    /// # Safety
    /// Must call init() before using
    pub const fn new() -> Self {
        Self {
            lock: UnsafeCell::new(0),
        }
    }

    /// Initialize the spinlock
    ///
    /// # Safety
    /// Must be called at IRQL <= DISPATCH_LEVEL
    pub unsafe fn init(&self) {
        unsafe { KeInitializeSpinLock(self.lock.get()) };
    }

    /// Acquire the spinlock, returning the previous IRQL
    ///
    /// # Safety
    /// - Must be at IRQL <= DISPATCH_LEVEL
    /// - Must call release() with the returned IRQL
    pub unsafe fn acquire(&self) -> KIRQL {
        let old_irql: KIRQL = 0;
        unsafe { KeAcquireSpinLockRaiseToDpc(self.lock.get()) };
        old_irql
    }

    /// Release the spinlock
    ///
    /// # Safety
    /// - Must have called acquire() first
    /// - old_irql must be the value from acquire()
    pub unsafe fn release(&self, old_irql: KIRQL) {
        unsafe { KeReleaseSpinLock(self.lock.get(), old_irql) };
    }

    /// Execute a closure while holding the spinlock
    ///
    /// # Safety
    /// Closure must not:
    /// - Access paged memory
    /// - Call blocking functions
    /// - Acquire other locks that could deadlock
    pub unsafe fn with_lock<T, F: FnOnce() -> T>(&self, f: F) -> T {
        let irql = unsafe { self.acquire() };
        let result = f();
        unsafe { self.release(irql) };
        result
    }
}

/// Fast Mutex - lighter than regular mutex
///
/// Operates at APC_LEVEL, allowing paged memory access.
/// Cannot be acquired recursively.
pub struct FastMutex {
    mutex: UnsafeCell<FAST_MUTEX>,
}

unsafe impl Send for FastMutex {}
unsafe impl Sync for FastMutex {}

impl FastMutex {
    /// Create a new fast mutex
    ///
    /// # Safety
    /// Must call init() before using
    pub const fn new() -> Self {
        Self {
            mutex: UnsafeCell::new(unsafe { core::mem::zeroed() }),
        }
    }

    /// Initialize the fast mutex
    ///
    /// # Safety
    /// Must be called at IRQL <= DISPATCH_LEVEL
    pub unsafe fn init(&self) {
        unsafe { ex_initialize_fast_mutex(&mut *self.mutex.get()) };
    }

    /// Acquire the fast mutex
    ///
    /// # Safety
    /// - Must be at IRQL < APC_LEVEL (typically PASSIVE_LEVEL)
    /// - Do not acquire recursively
    pub unsafe fn acquire(&self) {
        unsafe { ExAcquireFastMutex(self.mutex.get()) };
    }

    /// Release the fast mutex
    ///
    /// # Safety
    /// Must have called acquire() first
    pub unsafe fn release(&self) {
        unsafe { ExReleaseFastMutex(self.mutex.get()) };
    }

    /// Execute closure with mutex held
    ///
    /// # Safety
    /// Same restrictions as acquire()
    pub unsafe fn with_lock<T, F: FnOnce() -> T>(&self, f: F) -> T {
        unsafe { self.acquire() };
        let result = f();
        unsafe { self.release() };
        result
    }
}

/// Executive Resource - Read/Write Lock
///
/// Allows multiple readers or one exclusive writer.
/// Best for data that is read frequently but written rarely.
pub struct ExResource {
    resource: UnsafeCell<ERESOURCE>,
    initialized: AtomicU32,
}

unsafe impl Send for ExResource {}
unsafe impl Sync for ExResource {}

impl ExResource {
    /// Create a new executive resource
    pub const fn new() -> Self {
        Self {
            resource: UnsafeCell::new(unsafe { core::mem::zeroed() }),
            initialized: AtomicU32::new(0),
        }
    }

    /// Initialize the resource
    ///
    /// # Safety
    /// Must be called at IRQL <= DISPATCH_LEVEL
    pub unsafe fn init(&self) -> Result<(), ()> {
        if self.initialized.load(Ordering::SeqCst) != 0 {
            return Ok(()); // Already initialized
        }

        let status = unsafe { ExInitializeResourceLite(self.resource.get()) };
        if status == 0 {
            self.initialized.store(1, Ordering::SeqCst);
            Ok(())
        } else {
            Err(())
        }
    }

    /// Acquire exclusive (write) access
    ///
    /// # Safety
    /// Must be at IRQL < DISPATCH_LEVEL
    pub unsafe fn acquire_exclusive(&self, wait: bool) -> bool {
        unsafe {
            ExAcquireResourceExclusiveLite(self.resource.get(), wait as u8) != 0
        }
    }

    /// Acquire shared (read) access
    ///
    /// # Safety
    /// Must be at IRQL < DISPATCH_LEVEL
    pub unsafe fn acquire_shared(&self, wait: bool) -> bool {
        unsafe {
            ExAcquireResourceSharedLite(self.resource.get(), wait as u8) != 0
        }
    }

    /// Release the resource
    ///
    /// # Safety
    /// Must have acquired the resource first
    pub unsafe fn release(&self) {
        unsafe { ExReleaseResourceLite(self.resource.get()) };
    }

    /// Execute closure with exclusive access
    pub unsafe fn with_exclusive<T, F: FnOnce() -> T>(&self, f: F) -> Option<T> {
        if unsafe { self.acquire_exclusive(true) } {
            let result = f();
            unsafe { self.release() };
            Some(result)
        } else {
            None
        }
    }

    /// Execute closure with shared access
    pub unsafe fn with_shared<T, F: FnOnce() -> T>(&self, f: F) -> Option<T> {
        if unsafe { self.acquire_shared(true) } {
            let result = f();
            unsafe { self.release() };
            Some(result)
        } else {
            None
        }
    }
}

impl Drop for ExResource {
    fn drop(&mut self) {
        if self.initialized.load(Ordering::SeqCst) != 0 {
            unsafe { let _ = ExDeleteResourceLite(self.resource.get()); };
        }
    }
}

/// Kernel Event - Signaling mechanism
///
/// Used to synchronize between threads or signal completion.
pub struct KernelEvent {
    event: UnsafeCell<KEVENT>,
}

unsafe impl Send for KernelEvent {}
unsafe impl Sync for KernelEvent {}

/// Event type
pub enum KernelEventType {
    /// Automatically resets after one wait is satisfied
    SynchronizationEvent,
    /// Stays signaled until explicitly cleared
    NotificationEvent,
}

impl KernelEvent {
    /// Create a new kernel event
    ///
    /// # Safety
    /// Must call init() before using
    pub const fn new() -> Self {
        Self {
            event: UnsafeCell::new(unsafe { core::mem::zeroed() }),
        }
    }

    /// Initialize the event
    ///
    /// # Safety
    /// Must be at IRQL <= DISPATCH_LEVEL
    pub unsafe fn init(&self, event_type: KernelEventType, signaled: bool) {
        let evt_type = match event_type {
            KernelEventType::SynchronizationEvent => wdk_sys::_EVENT_TYPE::SynchronizationEvent,
            KernelEventType::NotificationEvent => wdk_sys::_EVENT_TYPE::NotificationEvent,
        };

        unsafe {
            KeInitializeEvent(self.event.get(), evt_type, signaled as u8);
        }
    }

    /// Set (signal) the event
    ///
    /// # Safety
    /// Must be at IRQL <= DISPATCH_LEVEL
    pub unsafe fn set(&self) -> i32 {
        unsafe { KeSetEvent(self.event.get(), 0, 0) }
    }

    /// Clear the event
    ///
    /// # Safety
    /// Must be at IRQL <= DISPATCH_LEVEL
    pub unsafe fn clear(&self) {
        unsafe { KeClearEvent(self.event.get()) };
    }

    /// Wait for the event to be signaled
    ///
    /// # Parameters
    /// - `timeout_ms`: Optional timeout in milliseconds (None = infinite)
    ///
    /// # Safety
    /// Must be at IRQL PASSIVE_LEVEL (waiting lowers IRQL)
    pub unsafe fn wait(&self, timeout_ms: Option<u64>) -> bool {
        let timeout = timeout_ms.map(|ms| {
            let mut t: LARGE_INTEGER = unsafe { core::mem::zeroed() };
            t.QuadPart = -(ms as i64 * 10_000);
            t
        });

        let timeout_ptr = timeout
            .as_ref()
            .map(|t| t as *const _)
            .unwrap_or(core::ptr::null());

        let status = unsafe {
            KeWaitForSingleObject(
                self.event.get() as *mut _,
                wdk_sys::_KWAIT_REASON::Executive,
                wdk_sys::_MODE::KernelMode as wdk_sys::KPROCESSOR_MODE,
                0, // Not alertable
                timeout_ptr as *mut _,
            )
        };

        status == 0 // STATUS_SUCCESS
    }
}

/// Interlocked operations for atomic updates
pub mod interlocked {
    use core::sync::atomic::{AtomicI32, AtomicI64, Ordering};

    /// Atomic increment
    pub fn increment(value: &AtomicI32) -> i32 {
        value.fetch_add(1, Ordering::SeqCst) + 1
    }

    /// Atomic decrement
    pub fn decrement(value: &AtomicI32) -> i32 {
        value.fetch_sub(1, Ordering::SeqCst) - 1
    }

    /// Atomic exchange
    pub fn exchange(value: &AtomicI32, new_value: i32) -> i32 {
        value.swap(new_value, Ordering::SeqCst)
    }

    /// Atomic compare and exchange
    pub fn compare_exchange(
        value: &AtomicI32,
        expected: i32,
        new_value: i32,
    ) -> i32 {
        match value.compare_exchange(expected, new_value, Ordering::SeqCst, Ordering::SeqCst) {
            Ok(v) => v,
            Err(v) => v,
        }
    }

    /// Atomic add
    pub fn add(value: &AtomicI64, addend: i64) -> i64 {
        value.fetch_add(addend, Ordering::SeqCst) + addend
    }
}