1use core::ffi::c_void;
4use core::ptr::NonNull;
5
6use super::error::{status, KmError, KmResult, NtStatus};
7use super::memory::{Mdl, AccessMode, LockOperation, PhysicalAddress};
8
9pub struct SharedMemory {
11 section_handle: *mut c_void,
12 kernel_address: NonNull<c_void>,
13 user_address: Option<NonNull<c_void>>,
14 size: usize,
15 mdl: Option<Mdl>,
16}
17
18impl SharedMemory {
19 pub fn create(size: usize) -> KmResult<Self> {
21 let mut section_handle: *mut c_void = core::ptr::null_mut();
22 let mut large_size = size as i64;
23 let mut object_attributes = ObjectAttributes::new();
24
25 let status = unsafe {
27 ZwCreateSection(
28 &mut section_handle,
29 SECTION_ALL_ACCESS,
30 &mut object_attributes as *mut _ as *mut _,
31 &mut large_size,
32 PAGE_READWRITE,
33 SEC_COMMIT,
34 core::ptr::null_mut(),
35 )
36 };
37
38 if !status::nt_success(status) {
39 return Err(KmError::NtStatus(status));
40 }
41
42 let mut kernel_address: *mut c_void = core::ptr::null_mut();
44 let mut view_size = size;
45
46 let status = unsafe {
48 ZwMapViewOfSection(
49 section_handle,
50 -1isize as *mut c_void, &mut kernel_address,
52 0,
53 0,
54 core::ptr::null_mut(),
55 &mut view_size,
56 VIEW_SHARE,
57 0,
58 PAGE_READWRITE,
59 )
60 };
61
62 if !status::nt_success(status) {
63 unsafe { ZwClose(section_handle) };
64 return Err(KmError::NtStatus(status));
65 }
66
67 let kernel_ptr = NonNull::new(kernel_address).ok_or(KmError::NtStatus(status::STATUS_UNSUCCESSFUL))?;
68
69 Ok(Self {
70 section_handle,
71 kernel_address: kernel_ptr,
72 user_address: None,
73 size,
74 mdl: None,
75 })
76 }
77
78 pub fn map_to_process(&mut self, process_handle: *mut c_void) -> KmResult<*mut c_void> {
80 let mut user_address: *mut c_void = core::ptr::null_mut();
81 let mut view_size = self.size;
82
83 let status = unsafe {
85 ZwMapViewOfSection(
86 self.section_handle,
87 process_handle,
88 &mut user_address,
89 0,
90 0,
91 core::ptr::null_mut(),
92 &mut view_size,
93 VIEW_SHARE,
94 0,
95 PAGE_READWRITE,
96 )
97 };
98
99 if !status::nt_success(status) {
100 return Err(KmError::NtStatus(status));
101 }
102
103 self.user_address = NonNull::new(user_address);
104 Ok(user_address)
105 }
106
107 pub fn kernel_ptr(&self) -> *mut c_void {
109 self.kernel_address.as_ptr()
110 }
111
112 pub fn user_ptr(&self) -> Option<*mut c_void> {
114 self.user_address.map(|p| p.as_ptr())
115 }
116
117 pub fn size(&self) -> usize {
119 self.size
120 }
121
122 pub fn as_ref<T>(&self) -> Option<&T> {
124 if core::mem::size_of::<T>() > self.size {
125 return None;
126 }
127 Some(unsafe { &*(self.kernel_address.as_ptr() as *const T) })
129 }
130
131 pub fn as_mut<T>(&mut self) -> Option<&mut T> {
133 if core::mem::size_of::<T>() > self.size {
134 return None;
135 }
136 Some(unsafe { &mut *(self.kernel_address.as_ptr() as *mut T) })
138 }
139
140 pub fn as_bytes(&self) -> &[u8] {
142 unsafe { core::slice::from_raw_parts(self.kernel_address.as_ptr() as *const u8, self.size) }
144 }
145
146 pub fn as_bytes_mut(&mut self) -> &mut [u8] {
148 unsafe { core::slice::from_raw_parts_mut(self.kernel_address.as_ptr() as *mut u8, self.size) }
150 }
151}
152
153impl Drop for SharedMemory {
154 fn drop(&mut self) {
155 unsafe {
157 ZwUnmapViewOfSection(-1isize as *mut c_void, self.kernel_address.as_ptr());
158 }
159
160 if !self.section_handle.is_null() {
162 unsafe { ZwClose(self.section_handle) };
163 }
164 }
165}
166
167#[repr(C)]
169pub struct SharedRingBuffer {
170 read_index: u32,
171 write_index: u32,
172 size: u32,
173 _padding: u32,
174 }
176
177impl SharedRingBuffer {
178 pub const MIN_SIZE: usize = 0x1000;
180
181 pub fn init(memory: &mut SharedMemory) -> KmResult<&mut Self> {
183 let mem_size = memory.size();
184 if mem_size < Self::MIN_SIZE {
185 return Err(KmError::BufferTooSmall {
186 required: Self::MIN_SIZE,
187 provided: mem_size,
188 });
189 }
190
191 let header = memory.as_mut::<SharedRingBuffer>().ok_or(KmError::InvalidParameter {
192 context: "buffer too small for header",
193 })?;
194
195 header.read_index = 0;
196 header.write_index = 0;
197 header.size = (mem_size - core::mem::size_of::<SharedRingBuffer>()) as u32;
198 header._padding = 0;
199
200 Ok(header)
201 }
202
203 fn data_ptr(&self) -> *mut u8 {
205 let header_size = core::mem::size_of::<SharedRingBuffer>();
206 unsafe { (self as *const Self as *mut u8).add(header_size) }
208 }
209
210 pub fn available_write(&self) -> u32 {
212 let read = unsafe {
214 core::sync::atomic::AtomicU32::from_ptr(&self.read_index as *const _ as *mut _)
215 .load(core::sync::atomic::Ordering::Acquire)
216 };
217 let write = self.write_index;
218
219 if write >= read {
220 self.size - (write - read) - 1
221 } else {
222 read - write - 1
223 }
224 }
225
226 pub fn available_read(&self) -> u32 {
228 let read = self.read_index;
229 let write = unsafe {
231 core::sync::atomic::AtomicU32::from_ptr(&self.write_index as *const _ as *mut _)
232 .load(core::sync::atomic::Ordering::Acquire)
233 };
234
235 if write >= read {
236 write - read
237 } else {
238 self.size - read + write
239 }
240 }
241
242 pub fn write(&mut self, data: &[u8]) -> KmResult<()> {
244 let len = data.len() as u32;
245 if len > self.available_write() {
246 return Err(KmError::BufferTooSmall {
247 required: len as usize,
248 provided: self.available_write() as usize,
249 });
250 }
251
252 let write = self.write_index;
253 let data_ptr = self.data_ptr();
254
255 let first_chunk = core::cmp::min(len, self.size - write);
257 let second_chunk = len - first_chunk;
258
259 unsafe {
261 core::ptr::copy_nonoverlapping(data.as_ptr(), data_ptr.add(write as usize), first_chunk as usize);
262 if second_chunk > 0 {
263 core::ptr::copy_nonoverlapping(data.as_ptr().add(first_chunk as usize), data_ptr, second_chunk as usize);
264 }
265 }
266
267 let new_write = (write + len) % self.size;
269 unsafe {
271 core::sync::atomic::AtomicU32::from_ptr(&self.write_index as *const _ as *mut _)
272 .store(new_write, core::sync::atomic::Ordering::Release);
273 }
274
275 Ok(())
276 }
277
278 pub fn read(&mut self, buffer: &mut [u8]) -> KmResult<usize> {
280 let available = self.available_read();
281 let len = core::cmp::min(buffer.len() as u32, available);
282
283 if len == 0 {
284 return Ok(0);
285 }
286
287 let read = self.read_index;
288 let data_ptr = self.data_ptr();
289
290 let first_chunk = core::cmp::min(len, self.size - read);
292 let second_chunk = len - first_chunk;
293
294 unsafe {
296 core::ptr::copy_nonoverlapping(data_ptr.add(read as usize), buffer.as_mut_ptr(), first_chunk as usize);
297 if second_chunk > 0 {
298 core::ptr::copy_nonoverlapping(data_ptr, buffer.as_mut_ptr().add(first_chunk as usize), second_chunk as usize);
299 }
300 }
301
302 let new_read = (read + len) % self.size;
304 unsafe {
306 core::sync::atomic::AtomicU32::from_ptr(&self.read_index as *const _ as *mut _)
307 .store(new_read, core::sync::atomic::Ordering::Release);
308 }
309
310 Ok(len as usize)
311 }
312}
313
314#[repr(C)]
316pub struct SharedBuffer {
317 pub sequence: u32,
318 pub flags: u32,
319 pub request_size: u32,
320 pub response_size: u32,
321 }
323
324impl SharedBuffer {
325 pub const HEADER_SIZE: usize = core::mem::size_of::<SharedBuffer>();
327
328 pub const FLAG_REQUEST_PENDING: u32 = 1;
330 pub const FLAG_RESPONSE_READY: u32 = 2;
332 pub const FLAG_BUSY: u32 = 4;
334
335 pub fn init(&mut self) {
337 self.sequence = 0;
338 self.flags = 0;
339 self.request_size = 0;
340 self.response_size = 0;
341 }
342
343 pub fn request_data(&self) -> *const u8 {
345 unsafe { (self as *const Self as *const u8).add(Self::HEADER_SIZE) }
347 }
348
349 pub fn response_data(&mut self) -> *mut u8 {
351 unsafe { (self as *mut Self as *mut u8).add(Self::HEADER_SIZE) }
353 }
354
355 pub fn has_request(&self) -> bool {
357 let flags = unsafe {
359 core::sync::atomic::AtomicU32::from_ptr(&self.flags as *const _ as *mut _)
360 .load(core::sync::atomic::Ordering::Acquire)
361 };
362 (flags & Self::FLAG_REQUEST_PENDING) != 0
363 }
364
365 pub fn has_response(&self) -> bool {
367 let flags = unsafe {
369 core::sync::atomic::AtomicU32::from_ptr(&self.flags as *const _ as *mut _)
370 .load(core::sync::atomic::Ordering::Acquire)
371 };
372 (flags & Self::FLAG_RESPONSE_READY) != 0
373 }
374
375 pub fn set_response(&mut self, size: u32) {
377 self.response_size = size;
378 unsafe {
380 core::sync::atomic::AtomicU32::from_ptr(&self.flags as *const _ as *mut _)
381 .store(Self::FLAG_RESPONSE_READY, core::sync::atomic::Ordering::Release);
382 }
383 }
384
385 pub fn clear_request(&mut self) {
387 unsafe {
389 core::sync::atomic::AtomicU32::from_ptr(&self.flags as *const _ as *mut _)
390 .store(0, core::sync::atomic::Ordering::Release);
391 }
392 }
393}
394
395#[repr(C)]
397struct ObjectAttributes {
398 length: u32,
399 root_directory: *mut c_void,
400 object_name: *mut c_void,
401 attributes: u32,
402 security_descriptor: *mut c_void,
403 security_quality_of_service: *mut c_void,
404}
405
406impl ObjectAttributes {
407 fn new() -> Self {
408 Self {
409 length: core::mem::size_of::<Self>() as u32,
410 root_directory: core::ptr::null_mut(),
411 object_name: core::ptr::null_mut(),
412 attributes: 0x00000040, security_descriptor: core::ptr::null_mut(),
414 security_quality_of_service: core::ptr::null_mut(),
415 }
416 }
417}
418
419const VIEW_SHARE: u32 = 1;
421const VIEW_UNMAP: u32 = 2;
422
423const PAGE_READWRITE: u32 = 0x04;
425const SEC_COMMIT: u32 = 0x8000000;
426const SECTION_ALL_ACCESS: u32 = 0x000F001F;
427
428extern "system" {
430 fn ZwCreateSection(
431 SectionHandle: *mut *mut c_void,
432 DesiredAccess: u32,
433 ObjectAttributes: *mut c_void,
434 MaximumSize: *mut i64,
435 PageProtection: u32,
436 AllocationAttributes: u32,
437 FileHandle: *mut c_void,
438 ) -> NtStatus;
439
440 fn ZwMapViewOfSection(
441 SectionHandle: *mut c_void,
442 ProcessHandle: *mut c_void,
443 BaseAddress: *mut *mut c_void,
444 ZeroBits: usize,
445 CommitSize: usize,
446 SectionOffset: *mut i64,
447 ViewSize: *mut usize,
448 InheritDisposition: u32,
449 AllocationType: u32,
450 Win32Protect: u32,
451 ) -> NtStatus;
452
453 fn ZwUnmapViewOfSection(ProcessHandle: *mut c_void, BaseAddress: *mut c_void) -> NtStatus;
454
455 fn ZwClose(Handle: *mut c_void) -> NtStatus;
456}