1#![allow(unused_imports, dead_code)]
2use alloc::format;
3use alloc::string::String;
4
5use core::{
6 ffi::CStr,
7 mem::MaybeUninit,
8 ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign},
9 sync::atomic::{AtomicBool, AtomicI32, AtomicU32, Ordering},
10};
11
12#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
14pub struct Info {
15 pub page_size: u32,
17 pub page_granularity: u32,
19}
20
21#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
22#[repr(transparent)]
23pub struct MemoryFlags(pub u32);
24
25impl From<MemoryFlags> for u32 {
26 fn from(val: MemoryFlags) -> Self {
27 val.0
28 }
29}
30
31impl From<u32> for MemoryFlags {
32 fn from(value: u32) -> Self {
33 Self(value)
34 }
35}
36
37impl MemoryFlags {
38 pub const NONE: u32 = 0;
40 pub const ACCESS_READ: u32 = 0x00000001;
42
43 pub const ACCESS_WRITE: u32 = 0x00000002;
45
46 pub const ACCESS_EXECUTE: u32 = 0x00000004;
48
49 pub const ACCESS_RW: u32 = Self::ACCESS_READ | Self::ACCESS_WRITE;
51
52 pub const ACCESS_RX: u32 = Self::ACCESS_READ | Self::ACCESS_EXECUTE;
54
55 pub const ACCESS_RWX: u32 = Self::ACCESS_READ | Self::ACCESS_WRITE | Self::ACCESS_EXECUTE;
57
58 pub const MMAP_ENABLE_JIT: u32 = 0x00000010;
67 pub const MMAP_MAX_ACCESS_READ: u32 = 0x00000020;
76
77 pub const MMAP_MAX_ACCESS_WRITE: u32 = 0x00000040;
86
87 pub const MMAP_MAX_ACCESS_EXECUTE: u32 = 0x00000080;
96
97 pub const MMAP_MAX_ACCESS_RW: u32 = Self::MMAP_MAX_ACCESS_READ | Self::MMAP_MAX_ACCESS_WRITE;
98 pub const MMAP_MAX_ACCESS_RX: u32 = Self::MMAP_MAX_ACCESS_READ | Self::MMAP_MAX_ACCESS_EXECUTE;
99 pub const MMAP_MAX_ACCESS_RWX: u32 =
100 Self::MMAP_MAX_ACCESS_READ | Self::MMAP_MAX_ACCESS_WRITE | Self::MMAP_MAX_ACCESS_EXECUTE;
101
102 pub const MAP_SHARED: u32 = 0x00000100;
109
110 pub const MAPPING_PREFER_TMP: u32 = 0x80000000;
119}
120
121impl MemoryFlags {
122 pub fn contains(self, other: u32) -> bool {
123 (self.0 & other) != 0
124 }
125}
126
127impl BitOr<MemoryFlags> for MemoryFlags {
128 type Output = Self;
129
130 #[inline]
131 fn bitor(self, rhs: Self) -> Self::Output {
132 Self(self.0 | rhs.0)
133 }
134}
135
136impl BitOr<u32> for MemoryFlags {
137 type Output = Self;
138
139 #[inline]
140 fn bitor(self, rhs: u32) -> Self::Output {
141 Self(self.0 | rhs)
142 }
143}
144
145impl BitOrAssign<MemoryFlags> for MemoryFlags {
146 #[inline]
147 fn bitor_assign(&mut self, rhs: Self) {
148 *self = *self | rhs;
149 }
150}
151
152impl BitOrAssign<u32> for MemoryFlags {
153 #[inline]
154 fn bitor_assign(&mut self, rhs: u32) {
155 *self = *self | rhs;
156 }
157}
158
159impl BitAnd<MemoryFlags> for MemoryFlags {
160 type Output = Self;
161
162 #[inline]
163 fn bitand(self, rhs: Self) -> Self::Output {
164 Self(self.0 & rhs.0)
165 }
166}
167
168impl BitAnd<u32> for MemoryFlags {
169 type Output = Self;
170
171 #[inline]
172 fn bitand(self, rhs: u32) -> Self::Output {
173 Self(self.0 & rhs)
174 }
175}
176
177impl BitAndAssign<MemoryFlags> for MemoryFlags {
178 #[inline]
179 fn bitand_assign(&mut self, rhs: Self) {
180 *self = *self & rhs;
181 }
182}
183
184impl BitAndAssign<u32> for MemoryFlags {
185 #[inline]
186 fn bitand_assign(&mut self, rhs: u32) {
187 *self = *self & rhs;
188 }
189}
190
191impl PartialEq<u32> for MemoryFlags {
192 #[inline]
193 fn eq(&self, other: &u32) -> bool {
194 self.0 == *other
195 }
196}
197
198pub struct DualMapping {
202 pub rx: *const u8,
204 pub rw: *mut u8,
206}
207
208#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
210#[repr(u32)]
211pub enum HardenedRuntimeFlags {
212 #[default]
214 None = 0,
215 Enabled = 0x00000001,
224 MapJit = 0x00000002,
226
227 EnabledMapJit = Self::Enabled as u32 | Self::MapJit as u32,
228}
229
230#[derive(Default)]
231pub struct HardenedRuntimeInfo {
232 pub flags: HardenedRuntimeFlags,
233}
234
235#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
237#[repr(u32)]
238pub enum ProtectJitAccess {
239 ReadWrite = 0,
241 ReadExecute = 1,
243}
244
245pub const DUAL_MAPPING_FILTER: [u32; 2] = [
246 MemoryFlags::ACCESS_WRITE | MemoryFlags::MMAP_MAX_ACCESS_WRITE,
247 MemoryFlags::ACCESS_EXECUTE | MemoryFlags::MMAP_MAX_ACCESS_EXECUTE,
248];
249
250use errno::errno;
251
252use libc::*;
253
254use crate::Error;
255
256cfgenius::define! {
257 vm_shm_detect = cfg(
258 any(
259 target_vendor="apple",
260 target_os="android"
261 )
262 );
263
264 has_shm_open = cfg(not(target_os="android"));
265 has_pthread_jit_write_protect_np = cfg(all(
266 target_os="macos"
267 ));
268
269 has_shm_anon = cfg(target_os="freebsd");
270
271
272}
273
274fn error_from_errno() -> Error {
275 match errno().0 {
276 EACCES | EAGAIN | ENODEV | EPERM => Error::InvalidState,
277 EFBIG | ENOMEM | EOVERFLOW => Error::OutOfMemory,
278 EMFILE | ENFILE => Error::TooManyHandles,
279
280 _ => Error::InvalidArgument,
281 }
282}
283
284cfgenius::cond! {
285 if cfg(not(windows))
286 {
287
288
289 fn get_vm_info() -> Info {
290 extern "C" {
291 fn getpagesize() -> c_int;
292 }
293
294 let page_size = unsafe { getpagesize() as usize };
295
296 Info {
297 page_size: page_size as _,
298 page_granularity: 65536.max(page_size) as _,
299 }
300 }
301
302 #[cfg(target_os="macos")]
303 fn get_osx_version() -> i32 {
304 static GLOBAL_VERSION: AtomicI32 = AtomicI32::new(0);
305
306 let mut ver = GLOBAL_VERSION.load(Ordering::Relaxed);
307
308 if ver == 0 {
309 unsafe {
310 let mut osname: MaybeUninit<utsname> = MaybeUninit::uninit();
311 uname(osname.as_mut_ptr());
312 ver = atoi(CStr::from_ptr((*osname.as_ptr()).release.as_ptr().cast()).to_bytes().as_ptr().cast());
313 GLOBAL_VERSION.store(ver, Ordering::Relaxed);
314 }
315 }
316
317 ver
318 }
319
320 fn mm_prot_from_memory_flags(memory_flags: MemoryFlags) -> i32 {
321 let mut prot = 0;
322
323
324 let x = memory_flags;
325 if x.contains(MemoryFlags::ACCESS_READ) { prot |= PROT_READ }
326 if x.contains(MemoryFlags::ACCESS_WRITE) { prot |= PROT_WRITE }
327 if x.contains(MemoryFlags::ACCESS_EXECUTE) { prot |= PROT_EXEC }
328
329
330
331 prot
332 }
333 #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
338 pub enum AnonymousMemoryStrategy {
339 Unknown = 0,
340 DevShm = 1,
341 TmpDir = 2,
342 }
343
344 #[cfg(not(target_os="freebsd"))]
345 fn get_tmp_dir() -> String {
346 unsafe{
347 let env = getenv(b"TMPDIR\0".as_ptr() as *const _);
348
349 if !env.is_null() {
350 CStr::from_ptr(env).to_string_lossy().into_owned()
351 } else {
352 String::from("/tmp")
353 }
354 }
355
356 }
357
358 #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
359 enum FileType {
360 None,
361 Shm,
362 Tmp,
363 }
364
365 struct AnonymousMemory {
366 fd: i32,
367 filetype: FileType,
368 tmpname: String,
369 }
370 #[allow(clippy::needless_late_init)]
371 impl AnonymousMemory {
372 #[allow(unused_variables)]
373 fn open(&mut self, prefer_tmp_over_dev_shm: bool) -> Result<(), Error> {
374 cfgenius::cond! {
375 if cfg(target_os="linux") {
376
377
378 static MEMFD_CREATE_NOT_SUPPORTED: AtomicBool = AtomicBool::new(false);
388
389 if !MEMFD_CREATE_NOT_SUPPORTED.load(Ordering::Relaxed) {
390 unsafe {
391 self.fd = libc::syscall(libc::SYS_memfd_create, b"vmem\0".as_ptr(), libc::MFD_CLOEXEC) as i32;
392
393 if self.fd >= 0 {
394
395 return Ok(());
396 }
397
398 if errno().0 == ENOSYS {
399 MEMFD_CREATE_NOT_SUPPORTED.store(true, Ordering::Relaxed);
400 } else {
401 return Err(error_from_errno());
402 }
403 }
404 }
405 }
406 }
407
408 cfgenius::cond! {
409 if all(macro(has_shm_open), macro(has_shm_anon)) {
410 unsafe {
411 let _ = prefer_tmp_over_dev_shm;
412 self.fd = shm_open(libc::SHM_ANON, libc::O_RDWR | libc::O_CREAT | libc::O_EXCL, libc::S_IRUSR | libc::S_IWUSR);
413
414 if self.fd >= 0 {
415 return Ok(())
416 } else {
417 return Err(error_from_errno());
418 }
419 }
420 } else {
421 static INTERNAL_COUNTER: AtomicU32 = AtomicU32::new(0);
426
427
428
429 let retry_count = 100;
430 let mut bits = self as *const Self as u64 & 0x55555555;
431
432 for _ in 0..retry_count {
433 bits = bits.wrapping_sub(crate::os::get_tick_count() as u64 * 773703683);
434 bits = ((bits >> 14) ^ (bits << 6)) + INTERNAL_COUNTER.fetch_add(1, Ordering::AcqRel) as u64 + 10619863;
435
436 let use_tmp;
437 cfgenius::cond! {
438 if macro(vm_shm_detect) {
439 use_tmp = true;
440 } else {
441 use_tmp = prefer_tmp_over_dev_shm;
442 }
443 };
444
445 if use_tmp {
446 self.tmpname.push_str(&get_tmp_dir());
447 self.tmpname.push_str(&format!("/shm-id-{:016X}\0", bits));
448
449 unsafe {
450 self.fd = libc::open(
451 self.tmpname.as_ptr() as *const c_char,
452 libc::O_RDWR | libc::O_CREAT | libc::O_EXCL,
453 0
454 );
455
456 if self.fd >= 0 {
457 self.filetype = FileType::Tmp;
458 return Ok(());
459 }
460 }
461 } else {
462 self.tmpname = format!("shm-id-{:016X}\0", bits);
463
464 unsafe {
465 self.fd = libc::shm_open(
466 self.tmpname.as_ptr() as *const c_char,
467 libc::O_RDWR | libc::O_CREAT | libc::O_EXCL,
468 0
469 );
470
471 if self.fd >= 0 {
472 self.filetype = FileType::Shm;
473 return Ok(());
474 }
475 }
476 }
477
478 if errno().0 != EEXIST {
479 return Err(error_from_errno());
480 }
481 }
482 }
483 }
484
485 Err(Error::FailedToOpenAnonymousMemory)
486 }
487
488 fn unlink(&mut self) {
489 #[allow(unused_variables)]
490 let typ = self.filetype;
491 self.filetype = FileType::None;
492
493 cfgenius::cond! {
494 if macro(has_shm_open) {
495 if typ== FileType::Shm {
496 unsafe {
497 libc::shm_unlink(self.tmpname.as_ptr() as *const c_char);
498 return;
499 }
500 }
501
502 }
503 }
504 #[allow(unreachable_code)]
505 if typ == FileType::Tmp {
506 unsafe {
507 libc::unlink(self.tmpname.as_ptr() as *const c_char);
508 }
509
510
511 }
512
513 }
514
515 fn close(&mut self) {
516 if self.fd >= 0 {
517 unsafe {
518 libc::close(self.fd);
519 }
520
521 self.fd = -1;
522 }
523 }
524
525 const fn new() -> Self {
526 Self {
527 fd: -1,
528 filetype: FileType::None,
529 tmpname: String::new(),
530 }
531 }
532
533 fn allocate(&self, size: usize) -> Result<(), Error> {
534 unsafe {
535 if libc::ftruncate(self.fd, size as _) != 0 {
536 return Err(error_from_errno());
537 }
538
539 Ok(())
540 }
541 }
542 }
543
544 impl Drop for AnonymousMemory {
545 fn drop(&mut self) {
546 self.unlink();
547 self.close();
548 }
549 }
550 }
551}
552
553cfgenius::cond! {
554 if macro(vm_shm_detect) {
555 fn detect_anonymous_memory_strategy() -> Result<AnonymousMemoryStrategy, Error> {
556 let mut anon_mem = AnonymousMemory::new();
557 let vm_info = info();
558
559 anon_mem.open(false)?;
560 anon_mem.allocate(vm_info.page_size as usize)?;
561
562 unsafe {
563 let ptr = libc::mmap(core::ptr::null_mut(), vm_info.page_size as _, libc::PROT_READ | libc::PROT_EXEC, libc::MAP_SHARED, anon_mem.fd, 0);
564 if ptr == libc::MAP_FAILED {
565 if errno().0 == EINVAL {
566 return Ok(AnonymousMemoryStrategy::TmpDir);
567 }
568
569 return Err(error_from_errno());
570 } else {
571 libc::munmap(ptr, vm_info.page_size as _);
572 Ok(AnonymousMemoryStrategy::DevShm)
573 }
574 }
575 }
576 }
577}
578
579cfgenius::cond! {
580 if cfg(not(windows)) {
581 #[allow(unreachable_code)]
582 pub fn get_anonymous_memory_strategy() -> Result<AnonymousMemoryStrategy, Error> {
583 cfgenius::cond! {
584 if macro(vm_shm_detect) {
585 use core::sync::atomic::AtomicU8;
586 static GLOBAL_STRATEGY: AtomicU8 = AtomicU8::new(0);
587
588 if GLOBAL_STRATEGY.load(Ordering::Acquire) != 0 {
589 return Ok(unsafe { core::mem::transmute(GLOBAL_STRATEGY.load(Ordering::Acquire)) });
590 }
591
592 let strategy = detect_anonymous_memory_strategy()?;
593
594 GLOBAL_STRATEGY.store(strategy as u8, Ordering::Release);
595
596 return Ok(strategy)
597 }
598 }
599
600 Ok(AnonymousMemoryStrategy::TmpDir)
601 }
602#[cfg(not(windows))]
606pub fn has_hardened_runtime() -> bool {
607 cfgenius::cond! {
608 if cfg(all(target_os="macos")) {
609 true
610 } else {
611 static GLOBAL_HARDENED_FLAG: AtomicU32 = AtomicU32::new(0);
612
613 let mut flag = GLOBAL_HARDENED_FLAG.load(Ordering::Acquire);
614
615 if flag == 0 {
616 let page_size = info().page_size;
617
618 unsafe {
619 let ptr = libc::mmap(core::ptr::null_mut(), page_size as _, libc::PROT_READ | libc::PROT_WRITE | libc::PROT_EXEC, libc::MAP_PRIVATE | libc::MAP_ANONYMOUS, -1, 0);
620
621 if ptr == libc::MAP_FAILED {
622 flag = 2;
623 } else {
624 flag = 1;
625 libc::munmap(ptr, page_size as _);
626 }
627 }
628
629 GLOBAL_HARDENED_FLAG.store(flag, Ordering::Release);
630 }
631
632 flag == 2
633 }
634 }
635}
636
637pub const fn has_map_jit_support() -> bool {
638 cfgenius::cond! {
639 if cfg(all(target_os="macos")) {
640 true
641 } else {
642 false
643 }
644 }
645}
646
647pub fn map_jit_from_memory_flags(memory_flags: MemoryFlags) -> i32 {
648 cfgenius::cond! {
649 if cfg(target_vendor="apple") {
650 let use_map_jit = (memory_flags.contains(MemoryFlags::MMAP_ENABLE_JIT) || has_hardened_runtime())
658 && !memory_flags.contains(MemoryFlags::MAP_SHARED);
659
660 if use_map_jit {
661 if has_map_jit_support() {
662 return libc::MAP_JIT as i32;
663 } else {
664 0
665 }
666 } else {
667 0
668 }
669 } else {
670 let _ = memory_flags;
671 0
672 }
673 }
674}
675
676pub fn get_hardened_runtime_flags() -> HardenedRuntimeFlags {
677 let mut flags = 0;
678
679 if has_hardened_runtime() {
680 flags = HardenedRuntimeFlags::Enabled as u32;
681 }
682
683 if has_map_jit_support() {
684 flags |= HardenedRuntimeFlags::MapJit as u32;
685 }
686
687 match flags {
688 0 => HardenedRuntimeFlags::None,
689 1 => HardenedRuntimeFlags::Enabled,
690 2 => HardenedRuntimeFlags::MapJit,
691 3 => HardenedRuntimeFlags::EnabledMapJit,
692 _ => unreachable!(),
693 }
694}
695
696pub fn max_access_flags_to_regular_access_flags(memory_flags: MemoryFlags) -> MemoryFlags {
697 const MAX_PROT_SHIFT: u32 = MemoryFlags::MMAP_MAX_ACCESS_READ.trailing_zeros();
698
699 MemoryFlags((memory_flags.0 & MemoryFlags::MMAP_MAX_ACCESS_RWX) >> MAX_PROT_SHIFT)
700}
701
702pub fn regular_access_flags_to_max_access_flags(memory_flags: MemoryFlags) -> MemoryFlags {
703 const MAX_PROT_SHIFT: u32 = MemoryFlags::MMAP_MAX_ACCESS_READ.trailing_zeros();
704
705 MemoryFlags((memory_flags.0 & MemoryFlags::MMAP_MAX_ACCESS_RWX) << MAX_PROT_SHIFT)
706}
707
708pub fn mm_max_prot_from_memory_flags(_memory_flags: MemoryFlags) -> i32 {
709 _memory_flags.0 as _
710}
711
712
713fn map_memory(
714 size: usize,
715 memory_flags: MemoryFlags,
716 fd: i32,
717 offset: libc::off_t,
718) -> Result<*mut u8, Error> {
719 if size == 0 {
720 return Err(Error::InvalidArgument);
721 }
722
723 let protection = mm_prot_from_memory_flags(memory_flags);
724
725 let mut mm_flags = map_jit_from_memory_flags(memory_flags);
726
727 mm_flags |= if memory_flags.contains(MemoryFlags::MAP_SHARED) {
728 libc::MAP_SHARED
729 } else {
730 libc::MAP_PRIVATE
731 };
732
733 if fd == -1 {
734 mm_flags |= libc::MAP_ANONYMOUS;
735 }
736 unsafe {
737 let ptr = libc::mmap(
738 core::ptr::null_mut(),
739 size as _,
740 protection,
741 mm_flags,
742 fd,
743 offset,
744 );
745
746 if ptr == libc::MAP_FAILED {
747 return Err(error_from_errno());
748 }
749 Ok(ptr.cast())
750 }
751}
752
753fn unmap_memory(ptr: *mut u8, size: usize) -> Result<(), Error> {
754 if size == 0 {
755 return Err(Error::InvalidArgument);
756 }
757
758 unsafe {
759 if libc::munmap(ptr.cast(), size as _) == 0 {
760 Ok(())
761 } else {
762 Err(error_from_errno())
763 }
764 }
765}
766
767pub fn alloc(size: usize, memory_flags: MemoryFlags) -> Result<*mut u8, Error> {
768 map_memory(size, memory_flags, -1, 0)
769}
770
771pub fn release(ptr: *mut u8, size: usize) -> Result<(), Error> {
772 unmap_memory(ptr, size)
773}
774
775pub fn protect(p: *mut u8, size: usize, memory_flags: MemoryFlags) -> Result<(), Error> {
776 let protection = mm_prot_from_memory_flags(memory_flags);
777
778 unsafe {
779 if libc::mprotect(p.cast(), size as _, protection) == 0 {
780 Ok(())
781 } else {
782 Err(error_from_errno())
783 }
784 }
785}
786
787fn unmap_dual_mapping(dm: &mut DualMapping, size: usize) -> Result<(), Error> {
788 let err1 = unmap_memory(dm.rx as _, size);
789 let mut err2 = Ok(());
790
791 if dm.rx != dm.rw {
792 err2 = unmap_memory(dm.rw as _, size);
793 }
794
795 err1?;
796 err2?;
797
798 dm.rx = core::ptr::null_mut();
799 dm.rw = core::ptr::null_mut();
800
801 Ok(())
802}
803
804pub fn alloc_dual_mapping(size: usize, memory_flags: MemoryFlags) -> Result<DualMapping, Error> {
815 let mut dm = DualMapping {
816 rx: core::ptr::null_mut(),
817 rw: core::ptr::null_mut(),
818 };
819
820 if size as isize <= 0 {
821 return Err(Error::InvalidArgument);
822 }
823
824 let mut prefer_tmp_over_dev_shm = memory_flags.contains(MemoryFlags::MAPPING_PREFER_TMP);
825
826 if !prefer_tmp_over_dev_shm {
827 let strategy = get_anonymous_memory_strategy()?;
828
829 prefer_tmp_over_dev_shm = strategy == AnonymousMemoryStrategy::TmpDir;
830 }
831
832 let mut anon_mem = AnonymousMemory::new();
833
834 anon_mem.open(prefer_tmp_over_dev_shm)?;
835 anon_mem.allocate(size)?;
836
837 let mut ptr = [core::ptr::null_mut(), core::ptr::null_mut()];
838
839 for i in 0..2 {
840 let restricted_memory_flags = memory_flags.0 & !DUAL_MAPPING_FILTER[i];
841
842 ptr[i] = match map_memory(
843 size,
844 (restricted_memory_flags | MemoryFlags::MAP_SHARED).into(),
845 anon_mem.fd,
846 0,
847 ) {
848 Ok(p) => p,
849 Err(e) => {
850 if i == 1 {
851 let _ = unmap_memory(ptr[0], size);
852 }
853
854 return Err(e);
855 }
856 };
857 }
858
859 dm.rx = ptr[0];
860 dm.rw = ptr[1];
861
862 Ok(dm)
863}
864
865pub fn release_dual_mapping(dm: &mut DualMapping, size: usize) -> Result<(), Error> {
869 unmap_dual_mapping(dm, size)
870}
871
872
873 }
874}
875
876pub fn info() -> Info {
877 static INFO: once_cell::sync::Lazy<Info> = once_cell::sync::Lazy::new(|| get_vm_info());
878
879 *INFO
880}
881
882pub fn flush_instruction_cache(p: *const u8, size: usize) {
887 cfgenius::cond! {
888 if cfg(any(target_arch="x86", target_arch="x86_64")) {
889 let _ = p;
890 let _ = size;
891 } else if cfg(target_vendor="apple") {
892 extern "C" {
893 fn sys_icache_invalidate(p: *const u8, size: usize);
894 }
895
896 unsafe {
897 sys_icache_invalidate(p, size);
898 }
899 } else if cfg(windows) {
900 extern "C" {
901 fn GetCurrentProcess() -> *mut libc::c_void;
902 fn FlushInstructionCache(
903 proc: *mut libc::c_void,
904 lp: *const u8,
905 dw_size: usize,
906 ) -> i32;
907 }
908
909 unsafe {
910 FlushInstructionCache(GetCurrentProcess(), p, size);
911 }
912 } else if cfg(target_arch="aarch64")
913 {
914 let code = p as usize;
915 let end = code + size;
916
917
918 use core::arch::asm;
919
920 const ICACHE_LINE_SIZE: usize = 4;
921 const DCACHE_LINE_SIZE: usize = 4;
922
923 let mut addr = code & !(DCACHE_LINE_SIZE - 1);
924
925 while addr < end {
926 unsafe {
927 asm!("dc civac, {x}", x = in(reg) addr);
928 }
929 addr += ICACHE_LINE_SIZE;
930 }
931
932 unsafe {
933 asm!("dsb ish");
934 }
935
936 addr = code & !(ICACHE_LINE_SIZE - 1);
937
938 while addr < end {
939 unsafe {
940 asm!("ic ivau, {x}", x = in(reg) addr);
941 }
942 addr += ICACHE_LINE_SIZE;
943 }
944
945 unsafe {
946 asm!(
947 "dsb ish"
948 );
949 asm!(
950 "isb"
951 );
952 }
953
954 } else if cfg(target_arhc="riscv64") {
955 unsafe {
956 let _ = wasmtime_jit_icache_coherence::clear_cache(p.cast(), size);
957 let _ = wasmtime_jit_icache_coherence::pipeline_flush_mt();
958 }
959 } else {
960 }
963
964 }
965
966}
967
968#[cfg(not(windows))]
969pub fn hardened_runtime_info() -> HardenedRuntimeInfo {
970 HardenedRuntimeInfo {
971 flags: get_hardened_runtime_flags(),
972 }
973}
974pub fn protect_jit_memory(access: ProtectJitAccess) {
993 #[cfg(all(target_os = "macos", target_arch = "aarch64"))]
994 {
995 unsafe {
996 let x = match access {
997 ProtectJitAccess::ReadWrite => 0,
998 _ => 1,
999 };
1000
1001 libc::pthread_jit_write_protect_np(x);
1002 }
1003 }
1004 let _ = access;
1005}
1006
1007cfgenius::cond! {
1008
1009 if cfg(windows) {
1010
1011 use winapi::um::sysinfoapi::SYSTEM_INFO;
1012 use winapi::{
1013 shared::{minwindef::DWORD, ntdef::HANDLE},
1014 um::{
1015 handleapi::{CloseHandle, INVALID_HANDLE_VALUE},
1016 memoryapi::{
1017 CreateFileMappingW, MapViewOfFile, UnmapViewOfFile, VirtualAlloc, VirtualFree,
1018 VirtualProtect, FILE_MAP_EXECUTE, FILE_MAP_READ, FILE_MAP_WRITE,
1019 },
1020 sysinfoapi::GetSystemInfo,
1021 winnt::{
1022 MEM_COMMIT, MEM_RELEASE, MEM_RESERVE, PAGE_EXECUTE_READ, PAGE_EXECUTE_READWRITE,
1023 PAGE_READONLY, PAGE_READWRITE,
1024 },
1025 },
1026 };
1027
1028
1029 struct ScopedHandle {
1030 value: HANDLE
1031 }
1032
1033 impl ScopedHandle {
1034 fn new() -> Self {
1035 Self { value: core::ptr::null_mut() }
1036 }
1037 }
1038
1039 impl Drop for ScopedHandle {
1040 fn drop(&mut self) {
1041 if !self.value.is_null() {
1042 unsafe {
1043 CloseHandle(self.value);
1044 }
1045 }
1046 }
1047 }
1048
1049 fn get_vm_info() -> Info {
1050 let mut system_info = MaybeUninit::<SYSTEM_INFO>::uninit();
1051 unsafe {
1052 GetSystemInfo(system_info.as_mut_ptr());
1053
1054 let system_info = system_info.assume_init();
1055
1056 Info {
1057 page_size: system_info.dwPageSize as u32,
1058 page_granularity: system_info.dwAllocationGranularity as u32,
1059 }
1060 }
1061 }
1062
1063 fn protect_flags_from_memory_flags(memory_flags: MemoryFlags) -> DWORD {
1064 let protect_flags;
1065
1066 if memory_flags.contains(MemoryFlags::ACCESS_EXECUTE) {
1067 protect_flags = if memory_flags.contains(MemoryFlags::ACCESS_WRITE) {
1068 PAGE_EXECUTE_READWRITE
1069 } else {
1070 PAGE_EXECUTE_READ
1071 };
1072 } else if memory_flags.contains(MemoryFlags::ACCESS_RW) {
1073 protect_flags = if memory_flags.contains(MemoryFlags::ACCESS_WRITE) {
1074 PAGE_READWRITE
1075 } else {
1076 PAGE_READONLY
1077 };
1078 } else {
1079 protect_flags = PAGE_READONLY;
1080 }
1081
1082 protect_flags
1083 }
1084
1085 fn desired_access_from_memory_flags(memory_flags: MemoryFlags) -> DWORD {
1086 let mut access = if memory_flags.contains(MemoryFlags::ACCESS_WRITE) {
1087 FILE_MAP_WRITE
1088 } else {
1089 FILE_MAP_READ
1090 };
1091
1092 if memory_flags.contains(MemoryFlags::ACCESS_EXECUTE) {
1093 access |= FILE_MAP_EXECUTE;
1094 }
1095
1096 access
1097 }
1098
1099 pub fn alloc(size: usize, memory_flags: MemoryFlags) -> Result<*mut u8, Error> {
1100 if size == 0 {
1101 return Err(Error::InvalidArgument)
1102 }
1103
1104 unsafe {
1105 let protect = protect_flags_from_memory_flags(memory_flags);
1106 let result = VirtualAlloc(core::ptr::null_mut(), size, MEM_COMMIT | MEM_RESERVE, protect);
1107
1108 if result.is_null() {
1109 return Err(Error::OutOfMemory)
1110 }
1111
1112 Ok(result as *mut u8)
1113 }
1114 }
1115
1116 pub fn release(ptr: *mut u8, size: usize) -> Result<(), Error> {
1117 if size == 0 || ptr.is_null() {
1118 return Err(Error::InvalidArgument)
1119 }
1120
1121 unsafe {
1122 if VirtualFree(ptr as *mut _, 0, MEM_RELEASE) == 0 {
1123 return Err(Error::InvalidArgument)
1124 }
1125 }
1126
1127 Ok(())
1128 }
1129
1130 pub fn protect(p: *mut u8, size: usize, memory_flags: MemoryFlags) -> Result<(), Error> {
1131 let protect_flags = protect_flags_from_memory_flags(memory_flags);
1132 let mut old_flags = 0;
1133
1134 unsafe {
1135 if VirtualProtect(p as _, size, protect_flags, &mut old_flags) != 0 {
1136 return Ok(())
1137 }
1138
1139 Err(Error::InvalidArgument)
1140 }
1141 }
1142
1143 pub fn alloc_dual_mapping(size: usize, memory_flags: MemoryFlags) -> Result<DualMapping, Error> {
1144 if size == 0 {
1145 return Err(Error::InvalidArgument)
1146 }
1147
1148 let mut handle = ScopedHandle::new();
1149
1150 unsafe {
1151 handle.value = CreateFileMappingW(
1152 INVALID_HANDLE_VALUE,
1153 core::ptr::null_mut(),
1154 PAGE_EXECUTE_READWRITE,
1155 ((size as u64) >> 32) as _,
1156 (size & 0xFFFFFFFF) as _,
1157 core::ptr::null_mut()
1158 );
1159
1160 if handle.value.is_null() {
1161 return Err(Error::OutOfMemory);
1162 }
1163
1164 let mut ptr = [core::ptr::null_mut(), core::ptr::null_mut()];
1165
1166 for i in 0..2 {
1167 let access_flags = memory_flags.0 & !DUAL_MAPPING_FILTER[i];
1168 let desired_access = desired_access_from_memory_flags(access_flags.into());
1169 ptr[i] = MapViewOfFile(handle.value, desired_access, 0, 0, size);
1170
1171 if ptr[i].is_null() {
1172 if i == 0 {
1173 UnmapViewOfFile(ptr[0]);
1174 }
1175
1176 return Err(Error::OutOfMemory);
1177 }
1178 }
1179
1180 Ok(DualMapping {
1181 rx: ptr[0] as _,
1182 rw: ptr[1] as _,
1183 })
1184 }
1185 }
1186
1187 pub fn release_dual_mapping(dm: &mut DualMapping, _size: usize) -> Result<(), Error> {
1188 let mut failed = false;
1189
1190 unsafe {
1191 if UnmapViewOfFile(dm.rx as _) == 0 {
1192 failed = true;
1193 }
1194
1195 if dm.rx != dm.rw && UnmapViewOfFile(dm.rw as _) == 0 {
1196 failed = true;
1197 }
1198
1199 if failed {
1200 return Err(Error::InvalidArgument);
1201 }
1202
1203 dm.rx = core::ptr::null_mut();
1204 dm.rw = core::ptr::null_mut();
1205
1206 Ok(())
1207 }
1208 }
1209 }
1210}