wraith/km/
shared.rs

1//! Shared memory for KM<->UM communication
2
3use core::ffi::c_void;
4use core::ptr::NonNull;
5
6use super::error::{status, KmError, KmResult, NtStatus};
7use super::memory::{Mdl, AccessMode, LockOperation, PhysicalAddress};
8
9/// shared memory section between kernel and usermode
10pub struct SharedMemory {
11    section_handle: *mut c_void,
12    kernel_address: NonNull<c_void>,
13    user_address: Option<NonNull<c_void>>,
14    size: usize,
15    mdl: Option<Mdl>,
16}
17
18impl SharedMemory {
19    /// create a new shared memory section
20    pub fn create(size: usize) -> KmResult<Self> {
21        let mut section_handle: *mut c_void = core::ptr::null_mut();
22        let mut large_size = size as i64;
23        let mut object_attributes = ObjectAttributes::new();
24
25        // SAFETY: create kernel section
26        let status = unsafe {
27            ZwCreateSection(
28                &mut section_handle,
29                SECTION_ALL_ACCESS,
30                &mut object_attributes as *mut _ as *mut _,
31                &mut large_size,
32                PAGE_READWRITE,
33                SEC_COMMIT,
34                core::ptr::null_mut(),
35            )
36        };
37
38        if !status::nt_success(status) {
39            return Err(KmError::NtStatus(status));
40        }
41
42        // map to kernel space
43        let mut kernel_address: *mut c_void = core::ptr::null_mut();
44        let mut view_size = size;
45
46        // SAFETY: map the section to kernel
47        let status = unsafe {
48            ZwMapViewOfSection(
49                section_handle,
50                -1isize as *mut c_void, // current process (kernel)
51                &mut kernel_address,
52                0,
53                0,
54                core::ptr::null_mut(),
55                &mut view_size,
56                VIEW_SHARE,
57                0,
58                PAGE_READWRITE,
59            )
60        };
61
62        if !status::nt_success(status) {
63            unsafe { ZwClose(section_handle) };
64            return Err(KmError::NtStatus(status));
65        }
66
67        let kernel_ptr = NonNull::new(kernel_address).ok_or(KmError::NtStatus(status::STATUS_UNSUCCESSFUL))?;
68
69        Ok(Self {
70            section_handle,
71            kernel_address: kernel_ptr,
72            user_address: None,
73            size,
74            mdl: None,
75        })
76    }
77
78    /// map the shared memory to a user process
79    pub fn map_to_process(&mut self, process_handle: *mut c_void) -> KmResult<*mut c_void> {
80        let mut user_address: *mut c_void = core::ptr::null_mut();
81        let mut view_size = self.size;
82
83        // SAFETY: map section to user process
84        let status = unsafe {
85            ZwMapViewOfSection(
86                self.section_handle,
87                process_handle,
88                &mut user_address,
89                0,
90                0,
91                core::ptr::null_mut(),
92                &mut view_size,
93                VIEW_SHARE,
94                0,
95                PAGE_READWRITE,
96            )
97        };
98
99        if !status::nt_success(status) {
100            return Err(KmError::NtStatus(status));
101        }
102
103        self.user_address = NonNull::new(user_address);
104        Ok(user_address)
105    }
106
107    /// get kernel-space pointer
108    pub fn kernel_ptr(&self) -> *mut c_void {
109        self.kernel_address.as_ptr()
110    }
111
112    /// get user-space pointer (if mapped)
113    pub fn user_ptr(&self) -> Option<*mut c_void> {
114        self.user_address.map(|p| p.as_ptr())
115    }
116
117    /// get size
118    pub fn size(&self) -> usize {
119        self.size
120    }
121
122    /// get as typed reference
123    pub fn as_ref<T>(&self) -> Option<&T> {
124        if core::mem::size_of::<T>() > self.size {
125            return None;
126        }
127        // SAFETY: memory is valid
128        Some(unsafe { &*(self.kernel_address.as_ptr() as *const T) })
129    }
130
131    /// get as typed mutable reference
132    pub fn as_mut<T>(&mut self) -> Option<&mut T> {
133        if core::mem::size_of::<T>() > self.size {
134            return None;
135        }
136        // SAFETY: memory is valid and we have exclusive access
137        Some(unsafe { &mut *(self.kernel_address.as_ptr() as *mut T) })
138    }
139
140    /// get as byte slice
141    pub fn as_bytes(&self) -> &[u8] {
142        // SAFETY: memory is valid
143        unsafe { core::slice::from_raw_parts(self.kernel_address.as_ptr() as *const u8, self.size) }
144    }
145
146    /// get as mutable byte slice
147    pub fn as_bytes_mut(&mut self) -> &mut [u8] {
148        // SAFETY: memory is valid
149        unsafe { core::slice::from_raw_parts_mut(self.kernel_address.as_ptr() as *mut u8, self.size) }
150    }
151}
152
153impl Drop for SharedMemory {
154    fn drop(&mut self) {
155        // unmap from kernel
156        unsafe {
157            ZwUnmapViewOfSection(-1isize as *mut c_void, self.kernel_address.as_ptr());
158        }
159
160        // close section
161        if !self.section_handle.is_null() {
162            unsafe { ZwClose(self.section_handle) };
163        }
164    }
165}
166
167/// ring buffer for efficient KM<->UM data transfer
168#[repr(C)]
169pub struct SharedRingBuffer {
170    read_index: u32,
171    write_index: u32,
172    size: u32,
173    _padding: u32,
174    // data follows
175}
176
177impl SharedRingBuffer {
178    /// minimum buffer size (header + at least 1 page of data)
179    pub const MIN_SIZE: usize = 0x1000;
180
181    /// create in shared memory
182    pub fn init(memory: &mut SharedMemory) -> KmResult<&mut Self> {
183        let mem_size = memory.size();
184        if mem_size < Self::MIN_SIZE {
185            return Err(KmError::BufferTooSmall {
186                required: Self::MIN_SIZE,
187                provided: mem_size,
188            });
189        }
190
191        let header = memory.as_mut::<SharedRingBuffer>().ok_or(KmError::InvalidParameter {
192            context: "buffer too small for header",
193        })?;
194
195        header.read_index = 0;
196        header.write_index = 0;
197        header.size = (mem_size - core::mem::size_of::<SharedRingBuffer>()) as u32;
198        header._padding = 0;
199
200        Ok(header)
201    }
202
203    /// get data portion pointer
204    fn data_ptr(&self) -> *mut u8 {
205        let header_size = core::mem::size_of::<SharedRingBuffer>();
206        // SAFETY: data follows header
207        unsafe { (self as *const Self as *mut u8).add(header_size) }
208    }
209
210    /// available space for writing
211    pub fn available_write(&self) -> u32 {
212        // SAFETY: read_index is a valid u32 that we're reading atomically
213        let read = unsafe {
214            core::sync::atomic::AtomicU32::from_ptr(&self.read_index as *const _ as *mut _)
215                .load(core::sync::atomic::Ordering::Acquire)
216        };
217        let write = self.write_index;
218
219        if write >= read {
220            self.size - (write - read) - 1
221        } else {
222            read - write - 1
223        }
224    }
225
226    /// available data for reading
227    pub fn available_read(&self) -> u32 {
228        let read = self.read_index;
229        // SAFETY: write_index is a valid u32 that we're reading atomically
230        let write = unsafe {
231            core::sync::atomic::AtomicU32::from_ptr(&self.write_index as *const _ as *mut _)
232                .load(core::sync::atomic::Ordering::Acquire)
233        };
234
235        if write >= read {
236            write - read
237        } else {
238            self.size - read + write
239        }
240    }
241
242    /// write data to ring buffer
243    pub fn write(&mut self, data: &[u8]) -> KmResult<()> {
244        let len = data.len() as u32;
245        if len > self.available_write() {
246            return Err(KmError::BufferTooSmall {
247                required: len as usize,
248                provided: self.available_write() as usize,
249            });
250        }
251
252        let write = self.write_index;
253        let data_ptr = self.data_ptr();
254
255        // handle wrap-around
256        let first_chunk = core::cmp::min(len, self.size - write);
257        let second_chunk = len - first_chunk;
258
259        // SAFETY: indices are within bounds
260        unsafe {
261            core::ptr::copy_nonoverlapping(data.as_ptr(), data_ptr.add(write as usize), first_chunk as usize);
262            if second_chunk > 0 {
263                core::ptr::copy_nonoverlapping(data.as_ptr().add(first_chunk as usize), data_ptr, second_chunk as usize);
264            }
265        }
266
267        // update write index with release ordering
268        let new_write = (write + len) % self.size;
269        // SAFETY: write_index is a valid u32 that we're storing atomically
270        unsafe {
271            core::sync::atomic::AtomicU32::from_ptr(&self.write_index as *const _ as *mut _)
272                .store(new_write, core::sync::atomic::Ordering::Release);
273        }
274
275        Ok(())
276    }
277
278    /// read data from ring buffer
279    pub fn read(&mut self, buffer: &mut [u8]) -> KmResult<usize> {
280        let available = self.available_read();
281        let len = core::cmp::min(buffer.len() as u32, available);
282
283        if len == 0 {
284            return Ok(0);
285        }
286
287        let read = self.read_index;
288        let data_ptr = self.data_ptr();
289
290        // handle wrap-around
291        let first_chunk = core::cmp::min(len, self.size - read);
292        let second_chunk = len - first_chunk;
293
294        // SAFETY: indices are within bounds
295        unsafe {
296            core::ptr::copy_nonoverlapping(data_ptr.add(read as usize), buffer.as_mut_ptr(), first_chunk as usize);
297            if second_chunk > 0 {
298                core::ptr::copy_nonoverlapping(data_ptr, buffer.as_mut_ptr().add(first_chunk as usize), second_chunk as usize);
299            }
300        }
301
302        // update read index with release ordering
303        let new_read = (read + len) % self.size;
304        // SAFETY: read_index is a valid u32 that we're storing atomically
305        unsafe {
306            core::sync::atomic::AtomicU32::from_ptr(&self.read_index as *const _ as *mut _)
307                .store(new_read, core::sync::atomic::Ordering::Release);
308        }
309
310        Ok(len as usize)
311    }
312}
313
314/// shared buffer for simple message passing
315#[repr(C)]
316pub struct SharedBuffer {
317    pub sequence: u32,
318    pub flags: u32,
319    pub request_size: u32,
320    pub response_size: u32,
321    // data follows
322}
323
324impl SharedBuffer {
325    /// header size
326    pub const HEADER_SIZE: usize = core::mem::size_of::<SharedBuffer>();
327
328    /// flag: request pending
329    pub const FLAG_REQUEST_PENDING: u32 = 1;
330    /// flag: response ready
331    pub const FLAG_RESPONSE_READY: u32 = 2;
332    /// flag: busy (kernel processing)
333    pub const FLAG_BUSY: u32 = 4;
334
335    /// initialize buffer
336    pub fn init(&mut self) {
337        self.sequence = 0;
338        self.flags = 0;
339        self.request_size = 0;
340        self.response_size = 0;
341    }
342
343    /// get request data pointer
344    pub fn request_data(&self) -> *const u8 {
345        // SAFETY: data follows header
346        unsafe { (self as *const Self as *const u8).add(Self::HEADER_SIZE) }
347    }
348
349    /// get response data pointer
350    pub fn response_data(&mut self) -> *mut u8 {
351        // SAFETY: data follows header
352        unsafe { (self as *mut Self as *mut u8).add(Self::HEADER_SIZE) }
353    }
354
355    /// check if request is pending
356    pub fn has_request(&self) -> bool {
357        // SAFETY: flags is a valid u32 that we're reading atomically
358        let flags = unsafe {
359            core::sync::atomic::AtomicU32::from_ptr(&self.flags as *const _ as *mut _)
360                .load(core::sync::atomic::Ordering::Acquire)
361        };
362        (flags & Self::FLAG_REQUEST_PENDING) != 0
363    }
364
365    /// check if response is ready
366    pub fn has_response(&self) -> bool {
367        // SAFETY: flags is a valid u32 that we're reading atomically
368        let flags = unsafe {
369            core::sync::atomic::AtomicU32::from_ptr(&self.flags as *const _ as *mut _)
370                .load(core::sync::atomic::Ordering::Acquire)
371        };
372        (flags & Self::FLAG_RESPONSE_READY) != 0
373    }
374
375    /// mark request as processed, set response
376    pub fn set_response(&mut self, size: u32) {
377        self.response_size = size;
378        // SAFETY: flags is a valid u32 that we're storing atomically
379        unsafe {
380            core::sync::atomic::AtomicU32::from_ptr(&self.flags as *const _ as *mut _)
381                .store(Self::FLAG_RESPONSE_READY, core::sync::atomic::Ordering::Release);
382        }
383    }
384
385    /// clear request (kernel side)
386    pub fn clear_request(&mut self) {
387        // SAFETY: flags is a valid u32 that we're storing atomically
388        unsafe {
389            core::sync::atomic::AtomicU32::from_ptr(&self.flags as *const _ as *mut _)
390                .store(0, core::sync::atomic::Ordering::Release);
391        }
392    }
393}
394
395// object attributes for section creation
396#[repr(C)]
397struct ObjectAttributes {
398    length: u32,
399    root_directory: *mut c_void,
400    object_name: *mut c_void,
401    attributes: u32,
402    security_descriptor: *mut c_void,
403    security_quality_of_service: *mut c_void,
404}
405
406impl ObjectAttributes {
407    fn new() -> Self {
408        Self {
409            length: core::mem::size_of::<Self>() as u32,
410            root_directory: core::ptr::null_mut(),
411            object_name: core::ptr::null_mut(),
412            attributes: 0x00000040, // OBJ_KERNEL_HANDLE
413            security_descriptor: core::ptr::null_mut(),
414            security_quality_of_service: core::ptr::null_mut(),
415        }
416    }
417}
418
419// section disposition constants
420const VIEW_SHARE: u32 = 1;
421const VIEW_UNMAP: u32 = 2;
422
423// page protection
424const PAGE_READWRITE: u32 = 0x04;
425const SEC_COMMIT: u32 = 0x8000000;
426const SECTION_ALL_ACCESS: u32 = 0x000F001F;
427
428// section/memory functions
429extern "system" {
430    fn ZwCreateSection(
431        SectionHandle: *mut *mut c_void,
432        DesiredAccess: u32,
433        ObjectAttributes: *mut c_void,
434        MaximumSize: *mut i64,
435        PageProtection: u32,
436        AllocationAttributes: u32,
437        FileHandle: *mut c_void,
438    ) -> NtStatus;
439
440    fn ZwMapViewOfSection(
441        SectionHandle: *mut c_void,
442        ProcessHandle: *mut c_void,
443        BaseAddress: *mut *mut c_void,
444        ZeroBits: usize,
445        CommitSize: usize,
446        SectionOffset: *mut i64,
447        ViewSize: *mut usize,
448        InheritDisposition: u32,
449        AllocationType: u32,
450        Win32Protect: u32,
451    ) -> NtStatus;
452
453    fn ZwUnmapViewOfSection(ProcessHandle: *mut c_void, BaseAddress: *mut c_void) -> NtStatus;
454
455    fn ZwClose(Handle: *mut c_void) -> NtStatus;
456}