1#![allow(unused_imports, dead_code)]
2use alloc::format;
3use alloc::string::String;
4
5use core::{
6 ffi::CStr,
7 mem::MaybeUninit,
8 ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign},
9 ptr::{addr_of, addr_of_mut},
10 sync::atomic::{AtomicBool, AtomicI32, AtomicU32, Ordering},
11};
12
13#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
15pub struct Info {
16 pub page_size: u32,
18 pub page_granularity: u32,
20}
21
22#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
23#[repr(transparent)]
24pub struct MemoryFlags(pub u32);
25
26impl From<MemoryFlags> for u32 {
27 fn from(val: MemoryFlags) -> Self {
28 val.0
29 }
30}
31
32impl From<u32> for MemoryFlags {
33 fn from(value: u32) -> Self {
34 Self(value)
35 }
36}
37
38impl MemoryFlags {
39 pub const NONE: u32 = 0;
41 pub const ACCESS_READ: u32 = 0x00000001;
43
44 pub const ACCESS_WRITE: u32 = 0x00000002;
46
47 pub const ACCESS_EXECUTE: u32 = 0x00000004;
49
50 pub const ACCESS_RW: u32 = Self::ACCESS_READ | Self::ACCESS_WRITE;
52
53 pub const ACCESS_RX: u32 = Self::ACCESS_READ | Self::ACCESS_EXECUTE;
55
56 pub const ACCESS_RWX: u32 = Self::ACCESS_READ | Self::ACCESS_WRITE | Self::ACCESS_EXECUTE;
58
59 pub const MMAP_ENABLE_JIT: u32 = 0x00000010;
68 pub const MMAP_MAX_ACCESS_READ: u32 = 0x00000020;
77
78 pub const MMAP_MAX_ACCESS_WRITE: u32 = 0x00000040;
87
88 pub const MMAP_MAX_ACCESS_EXECUTE: u32 = 0x00000080;
97
98 pub const MMAP_MAX_ACCESS_RW: u32 = Self::MMAP_MAX_ACCESS_READ | Self::MMAP_MAX_ACCESS_WRITE;
99 pub const MMAP_MAX_ACCESS_RX: u32 = Self::MMAP_MAX_ACCESS_READ | Self::MMAP_MAX_ACCESS_EXECUTE;
100 pub const MMAP_MAX_ACCESS_RWX: u32 =
101 Self::MMAP_MAX_ACCESS_READ | Self::MMAP_MAX_ACCESS_WRITE | Self::MMAP_MAX_ACCESS_EXECUTE;
102
103 pub const MAP_SHARED: u32 = 0x00000100;
110
111 pub const MAPPING_PREFER_TMP: u32 = 0x80000000;
120}
121
122impl MemoryFlags {
123 pub fn contains(self, other: u32) -> bool {
124 (self.0 & other) != 0
125 }
126}
127
128impl BitOr<MemoryFlags> for MemoryFlags {
129 type Output = Self;
130
131 #[inline]
132 fn bitor(self, rhs: Self) -> Self::Output {
133 Self(self.0 | rhs.0)
134 }
135}
136
137impl BitOr<u32> for MemoryFlags {
138 type Output = Self;
139
140 #[inline]
141 fn bitor(self, rhs: u32) -> Self::Output {
142 Self(self.0 | rhs)
143 }
144}
145
146impl BitOrAssign<MemoryFlags> for MemoryFlags {
147 #[inline]
148 fn bitor_assign(&mut self, rhs: Self) {
149 *self = *self | rhs;
150 }
151}
152
153impl BitOrAssign<u32> for MemoryFlags {
154 #[inline]
155 fn bitor_assign(&mut self, rhs: u32) {
156 *self = *self | rhs;
157 }
158}
159
160impl BitAnd<MemoryFlags> for MemoryFlags {
161 type Output = Self;
162
163 #[inline]
164 fn bitand(self, rhs: Self) -> Self::Output {
165 Self(self.0 & rhs.0)
166 }
167}
168
169impl BitAnd<u32> for MemoryFlags {
170 type Output = Self;
171
172 #[inline]
173 fn bitand(self, rhs: u32) -> Self::Output {
174 Self(self.0 & rhs)
175 }
176}
177
178impl BitAndAssign<MemoryFlags> for MemoryFlags {
179 #[inline]
180 fn bitand_assign(&mut self, rhs: Self) {
181 *self = *self & rhs;
182 }
183}
184
185impl BitAndAssign<u32> for MemoryFlags {
186 #[inline]
187 fn bitand_assign(&mut self, rhs: u32) {
188 *self = *self & rhs;
189 }
190}
191
192impl PartialEq<u32> for MemoryFlags {
193 #[inline]
194 fn eq(&self, other: &u32) -> bool {
195 self.0 == *other
196 }
197}
198
199pub struct DualMapping {
203 pub rx: *const u8,
205 pub rw: *mut u8,
207}
208
209#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
211#[repr(u32)]
212pub enum HardenedRuntimeFlags {
213 #[default]
215 None = 0,
216 Enabled = 0x00000001,
225 MapJit = 0x00000002,
227
228 EnabledMapJit = Self::Enabled as u32 | Self::MapJit as u32,
229}
230
231#[derive(Default)]
232pub struct HardenedRuntimeInfo {
233 pub flags: HardenedRuntimeFlags,
234}
235
236#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
238#[repr(u32)]
239pub enum ProtectJitAccess {
240 ReadWrite = 0,
242 ReadExecute = 1,
244}
245
246pub const DUAL_MAPPING_FILTER: [u32; 2] = [
247 MemoryFlags::ACCESS_WRITE | MemoryFlags::MMAP_MAX_ACCESS_WRITE,
248 MemoryFlags::ACCESS_EXECUTE | MemoryFlags::MMAP_MAX_ACCESS_EXECUTE,
249];
250
251use errno::errno;
252
253use libc::*;
254
255use crate::AsmError;
256
257cfgenius::define! {
258 vm_shm_detect = cfg(
259 any(
260 target_vendor="apple",
261 target_os="android"
262 )
263 );
264
265 has_shm_open = cfg(not(target_os="android"));
266 has_pthread_jit_write_protect_np = cfg(all(
267 target_os="macos"
268 ));
269
270 has_shm_anon = cfg(target_os="freebsd");
271
272
273}
274
275fn error_from_errno() -> AsmError {
276 match errno().0 {
277 EACCES | EAGAIN | ENODEV | EPERM => AsmError::InvalidState,
278 EFBIG | ENOMEM | EOVERFLOW => AsmError::OutOfMemory,
279 EMFILE | ENFILE => AsmError::TooManyHandles,
280
281 _ => AsmError::InvalidArgument,
282 }
283}
284
285cfgenius::cond! {
286 if cfg(not(windows))
287 {
288
289
290 fn get_vm_info() -> Info {
291 extern "C" {
292 fn getpagesize() -> c_int;
293 }
294
295 let page_size = unsafe { getpagesize() as usize };
296
297 Info {
298 page_size: page_size as _,
299 page_granularity: 65536.max(page_size) as _,
300 }
301 }
302
303 #[cfg(target_os="macos")]
304 fn get_osx_version() -> i32 {
305 static GLOBAL_VERSION: AtomicI32 = AtomicI32::new(0);
306
307 let mut ver = GLOBAL_VERSION.load(Ordering::Relaxed);
308
309 if ver == 0 {
310 unsafe {
311 let mut osname: MaybeUninit<utsname> = MaybeUninit::uninit();
312 uname(osname.as_mut_ptr());
313 ver = atoi(CStr::from_ptr((*osname.as_ptr()).release.as_ptr().cast()).to_bytes().as_ptr().cast());
314 GLOBAL_VERSION.store(ver, Ordering::Relaxed);
315 }
316 }
317
318 ver
319 }
320
321 fn mm_prot_from_memory_flags(memory_flags: MemoryFlags) -> i32 {
322 let mut prot = 0;
323
324
325 let x = memory_flags;
326 if x.contains(MemoryFlags::ACCESS_READ) { prot |= PROT_READ }
327 if x.contains(MemoryFlags::ACCESS_WRITE) { prot |= PROT_WRITE }
328 if x.contains(MemoryFlags::ACCESS_EXECUTE) { prot |= PROT_EXEC }
329
330
331
332 prot
333 }
334 #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
339 pub enum AnonymousMemoryStrategy {
340 Unknown = 0,
341 DevShm = 1,
342 TmpDir = 2,
343 }
344
345 #[cfg(not(target_os="freebsd"))]
346 fn get_tmp_dir() -> String {
347 unsafe{
348 let env = getenv(b"TMPDIR\0".as_ptr() as *const _);
349
350 if !env.is_null() {
351 CStr::from_ptr(env).to_string_lossy().into_owned()
352 } else {
353 String::from("/tmp")
354 }
355 }
356
357 }
358
359 #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
360 enum FileType {
361 None,
362 Shm,
363 Tmp,
364 }
365
366 struct AnonymousMemory {
367 fd: i32,
368 filetype: FileType,
369 tmpname: String,
370 }
371 #[allow(clippy::needless_late_init)]
372 impl AnonymousMemory {
373 #[allow(unused_variables)]
374 fn open(&mut self, prefer_tmp_over_dev_shm: bool) -> Result<(), AsmError> {
375 cfgenius::cond! {
376 if cfg(target_os="linux") {
377
378
379 static MEMFD_CREATE_NOT_SUPPORTED: AtomicBool = AtomicBool::new(false);
389
390 if !MEMFD_CREATE_NOT_SUPPORTED.load(Ordering::Relaxed) {
391 unsafe {
392 self.fd = libc::syscall(libc::SYS_memfd_create, b"vmem\0".as_ptr(), libc::MFD_CLOEXEC) as i32;
393
394 if self.fd >= 0 {
395
396 return Ok(());
397 }
398
399 if errno().0 == ENOSYS {
400 MEMFD_CREATE_NOT_SUPPORTED.store(true, Ordering::Relaxed);
401 } else {
402 return Err(error_from_errno());
403 }
404 }
405 }
406 }
407 }
408
409 cfgenius::cond! {
410 if all(macro(has_shm_open), macro(has_shm_anon)) {
411 unsafe {
412 let _ = prefer_tmp_over_dev_shm;
413 self.fd = shm_open(libc::SHM_ANON, libc::O_RDWR | libc::O_CREAT | libc::O_EXCL, libc::S_IRUSR | libc::S_IWUSR);
414
415 if self.fd >= 0 {
416 return Ok(())
417 } else {
418 return Err(error_from_errno());
419 }
420 }
421 } else {
422 static INTERNAL_COUNTER: AtomicU32 = AtomicU32::new(0);
427
428
429
430 let retry_count = 100;
431 let mut bits = self as *const Self as u64 & 0x55555555;
432
433 for _ in 0..retry_count {
434 bits = bits.wrapping_sub(super::os::get_tick_count() as u64 * 773703683);
435 bits = ((bits >> 14) ^ (bits << 6)) + INTERNAL_COUNTER.fetch_add(1, Ordering::AcqRel) as u64 + 10619863;
436
437 let use_tmp;
438 cfgenius::cond! {
439 if macro(vm_shm_detect) {
440 use_tmp = true;
441 } else {
442 use_tmp = prefer_tmp_over_dev_shm;
443 }
444 };
445
446 if use_tmp {
447 self.tmpname.push_str(&get_tmp_dir());
448 self.tmpname.push_str(&format!("/shm-id-{:016X}\0", bits));
449
450 unsafe {
451 self.fd = libc::open(
452 self.tmpname.as_ptr() as *const c_char,
453 libc::O_RDWR | libc::O_CREAT | libc::O_EXCL,
454 0
455 );
456
457 if self.fd >= 0 {
458 self.filetype = FileType::Tmp;
459 return Ok(());
460 }
461 }
462 } else {
463 self.tmpname = format!("shm-id-{:016X}\0", bits);
464
465 unsafe {
466 self.fd = libc::shm_open(
467 self.tmpname.as_ptr() as *const c_char,
468 libc::O_RDWR | libc::O_CREAT | libc::O_EXCL,
469 0
470 );
471
472 if self.fd >= 0 {
473 self.filetype = FileType::Shm;
474 return Ok(());
475 }
476 }
477 }
478
479 if errno().0 != EEXIST {
480 return Err(error_from_errno());
481 }
482 }
483 }
484 }
485
486 Err(AsmError::FailedToOpenAnonymousMemory)
487 }
488
489 fn unlink(&mut self) {
490 #[allow(unused_variables)]
491 let typ = self.filetype;
492 self.filetype = FileType::None;
493
494 cfgenius::cond! {
495 if macro(has_shm_open) {
496 if typ== FileType::Shm {
497 unsafe {
498 libc::shm_unlink(self.tmpname.as_ptr() as *const c_char);
499 return;
500 }
501 }
502
503 }
504 }
505 #[allow(unreachable_code)]
506 if typ == FileType::Tmp {
507 unsafe {
508 libc::unlink(self.tmpname.as_ptr() as *const c_char);
509 }
510
511
512 }
513
514 }
515
516 fn close(&mut self) {
517 if self.fd >= 0 {
518 unsafe {
519 libc::close(self.fd);
520 }
521
522 self.fd = -1;
523 }
524 }
525
526 const fn new() -> Self {
527 Self {
528 fd: -1,
529 filetype: FileType::None,
530 tmpname: String::new(),
531 }
532 }
533
534 fn allocate(&self, size: usize) -> Result<(), AsmError> {
535 unsafe {
536 if libc::ftruncate(self.fd, size as _) != 0 {
537 return Err(error_from_errno());
538 }
539
540 Ok(())
541 }
542 }
543 }
544
545 impl Drop for AnonymousMemory {
546 fn drop(&mut self) {
547 self.unlink();
548 self.close();
549 }
550 }
551 }
552}
553
554cfgenius::cond! {
555 if macro(vm_shm_detect) {
556 fn detect_anonymous_memory_strategy() -> Result<AnonymousMemoryStrategy, AsmError> {
557 let mut anon_mem = AnonymousMemory::new();
558 let vm_info = info();
559
560 anon_mem.open(false)?;
561 anon_mem.allocate(vm_info.page_size as usize)?;
562
563 unsafe {
564 let ptr = libc::mmap(core::ptr::null_mut(), vm_info.page_size as _, libc::PROT_READ | libc::PROT_EXEC, libc::MAP_SHARED, anon_mem.fd, 0);
565 if ptr == libc::MAP_FAILED {
566 if errno().0 == EINVAL {
567 return Ok(AnonymousMemoryStrategy::TmpDir);
568 }
569
570 return Err(error_from_errno());
571 } else {
572 libc::munmap(ptr, vm_info.page_size as _);
573 Ok(AnonymousMemoryStrategy::DevShm)
574 }
575 }
576 }
577 }
578}
579
580cfgenius::cond! {
581 if cfg(not(windows)) {
582 #[allow(unreachable_code)]
583 pub fn get_anonymous_memory_strategy() -> Result<AnonymousMemoryStrategy, AsmError> {
584 cfgenius::cond! {
585 if macro(vm_shm_detect) {
586 use core::sync::atomic::AtomicU8;
587 static GLOBAL_STRATEGY: AtomicU8 = AtomicU8::new(0);
588
589 if GLOBAL_STRATEGY.load(Ordering::Acquire) != 0 {
590 return Ok(unsafe { core::mem::transmute(GLOBAL_STRATEGY.load(Ordering::Acquire)) });
591 }
592
593 let strategy = detect_anonymous_memory_strategy()?;
594
595 GLOBAL_STRATEGY.store(strategy as u8, Ordering::Release);
596
597 return Ok(strategy)
598 }
599 }
600
601 Ok(AnonymousMemoryStrategy::TmpDir)
602 }
603#[cfg(not(windows))]
607pub fn has_hardened_runtime() -> bool {
608 cfgenius::cond! {
609 if cfg(all(target_os="macos")) {
610 true
611 } else {
612 static GLOBAL_HARDENED_FLAG: AtomicU32 = AtomicU32::new(0);
613
614 let mut flag = GLOBAL_HARDENED_FLAG.load(Ordering::Acquire);
615
616 if flag == 0 {
617 let page_size = info().page_size;
618
619 unsafe {
620 let ptr = libc::mmap(core::ptr::null_mut(), page_size as _, libc::PROT_READ | libc::PROT_WRITE | libc::PROT_EXEC, libc::MAP_PRIVATE | libc::MAP_ANONYMOUS, -1, 0);
621
622 if ptr == libc::MAP_FAILED {
623 flag = 2;
624 } else {
625 flag = 1;
626 libc::munmap(ptr, page_size as _);
627 }
628 }
629
630 GLOBAL_HARDENED_FLAG.store(flag, Ordering::Release);
631 }
632
633 flag == 2
634 }
635 }
636}
637
638pub const fn has_map_jit_support() -> bool {
639 cfgenius::cond! {
640 if cfg(all(target_os="macos")) {
641 true
642 } else {
643 false
644 }
645 }
646}
647
648pub fn map_jit_from_memory_flags(memory_flags: MemoryFlags) -> i32 {
649 cfgenius::cond! {
650 if cfg(target_vendor="apple") {
651 let use_map_jit = (memory_flags.contains(MemoryFlags::MMAP_ENABLE_JIT) || has_hardened_runtime())
659 && !memory_flags.contains(MemoryFlags::MAP_SHARED);
660
661 if use_map_jit {
662 if has_map_jit_support() {
663 return libc::MAP_JIT as i32;
664 } else {
665 0
666 }
667 } else {
668 0
669 }
670 } else {
671 let _ = memory_flags;
672 0
673 }
674 }
675}
676
677pub fn get_hardened_runtime_flags() -> HardenedRuntimeFlags {
678 let mut flags = 0;
679
680 if has_hardened_runtime() {
681 flags = HardenedRuntimeFlags::Enabled as u32;
682 }
683
684 if has_map_jit_support() {
685 flags |= HardenedRuntimeFlags::MapJit as u32;
686 }
687
688 match flags {
689 0 => HardenedRuntimeFlags::None,
690 1 => HardenedRuntimeFlags::Enabled,
691 2 => HardenedRuntimeFlags::MapJit,
692 3 => HardenedRuntimeFlags::EnabledMapJit,
693 _ => unreachable!(),
694 }
695}
696
697pub fn max_access_flags_to_regular_access_flags(memory_flags: MemoryFlags) -> MemoryFlags {
698 const MAX_PROT_SHIFT: u32 = MemoryFlags::MMAP_MAX_ACCESS_READ.trailing_zeros();
699
700 MemoryFlags((memory_flags.0 & MemoryFlags::MMAP_MAX_ACCESS_RWX) >> MAX_PROT_SHIFT)
701}
702
703pub fn regular_access_flags_to_max_access_flags(memory_flags: MemoryFlags) -> MemoryFlags {
704 const MAX_PROT_SHIFT: u32 = MemoryFlags::MMAP_MAX_ACCESS_READ.trailing_zeros();
705
706 MemoryFlags((memory_flags.0 & MemoryFlags::MMAP_MAX_ACCESS_RWX) << MAX_PROT_SHIFT)
707}
708
709pub fn mm_max_prot_from_memory_flags(_memory_flags: MemoryFlags) -> i32 {
710 _memory_flags.0 as _
711}
712
713
714fn map_memory(
715 size: usize,
716 memory_flags: MemoryFlags,
717 fd: i32,
718 offset: libc::off_t,
719) -> Result<*mut u8, AsmError> {
720 if size == 0 {
721 return Err(AsmError::InvalidArgument);
722 }
723
724 let protection = mm_prot_from_memory_flags(memory_flags);
725
726 let mut mm_flags = map_jit_from_memory_flags(memory_flags);
727
728 mm_flags |= if memory_flags.contains(MemoryFlags::MAP_SHARED) {
729 libc::MAP_SHARED
730 } else {
731 libc::MAP_PRIVATE
732 };
733
734 if fd == -1 {
735 mm_flags |= libc::MAP_ANONYMOUS;
736 }
737 unsafe {
738 let ptr = libc::mmap(
739 core::ptr::null_mut(),
740 size as _,
741 protection,
742 mm_flags,
743 fd,
744 offset,
745 );
746
747 if ptr == libc::MAP_FAILED {
748 return Err(error_from_errno());
749 }
750 Ok(ptr.cast())
751 }
752}
753
754fn unmap_memory(ptr: *mut u8, size: usize) -> Result<(), AsmError> {
755 if size == 0 {
756 return Err(AsmError::InvalidArgument);
757 }
758
759 unsafe {
760 if libc::munmap(ptr.cast(), size as _) == 0 {
761 Ok(())
762 } else {
763 Err(error_from_errno())
764 }
765 }
766}
767
768pub fn alloc(size: usize, memory_flags: MemoryFlags) -> Result<*mut u8, AsmError> {
769 map_memory(size, memory_flags, -1, 0)
770}
771
772pub fn release(ptr: *mut u8, size: usize) -> Result<(), AsmError> {
773 unmap_memory(ptr, size)
774}
775
776pub fn protect(p: *mut u8, size: usize, memory_flags: MemoryFlags) -> Result<(), AsmError> {
777 let protection = mm_prot_from_memory_flags(memory_flags);
778
779 unsafe {
780 if libc::mprotect(p.cast(), size as _, protection) == 0 {
781 Ok(())
782 } else {
783 Err(error_from_errno())
784 }
785 }
786}
787
788fn unmap_dual_mapping(dm: &mut DualMapping, size: usize) -> Result<(), AsmError> {
789 let err1 = unmap_memory(dm.rx as _, size);
790 let mut err2 = Ok(());
791
792 if dm.rx != dm.rw {
793 err2 = unmap_memory(dm.rw as _, size);
794 }
795
796 err1?;
797 err2?;
798
799 dm.rx = core::ptr::null_mut();
800 dm.rw = core::ptr::null_mut();
801
802 Ok(())
803}
804
805pub fn alloc_dual_mapping(size: usize, memory_flags: MemoryFlags) -> Result<DualMapping, AsmError> {
816 let mut dm = DualMapping {
817 rx: core::ptr::null_mut(),
818 rw: core::ptr::null_mut(),
819 };
820
821 if size as isize <= 0 {
822 return Err(AsmError::InvalidArgument);
823 }
824
825 let mut prefer_tmp_over_dev_shm = memory_flags.contains(MemoryFlags::MAPPING_PREFER_TMP);
826
827 if !prefer_tmp_over_dev_shm {
828 let strategy = get_anonymous_memory_strategy()?;
829
830 prefer_tmp_over_dev_shm = strategy == AnonymousMemoryStrategy::TmpDir;
831 }
832
833 let mut anon_mem = AnonymousMemory::new();
834
835 anon_mem.open(prefer_tmp_over_dev_shm)?;
836 anon_mem.allocate(size)?;
837
838 let mut ptr = [core::ptr::null_mut(), core::ptr::null_mut()];
839
840 for i in 0..2 {
841 let restricted_memory_flags = memory_flags.0 & !DUAL_MAPPING_FILTER[i];
842
843 ptr[i] = match map_memory(
844 size,
845 (restricted_memory_flags | MemoryFlags::MAP_SHARED).into(),
846 anon_mem.fd,
847 0,
848 ) {
849 Ok(p) => p,
850 Err(e) => {
851 if i == 1 {
852 let _ = unmap_memory(ptr[0], size);
853 }
854
855 return Err(e);
856 }
857 };
858 }
859
860 dm.rx = ptr[0];
861 dm.rw = ptr[1];
862
863 Ok(dm)
864}
865
866pub fn release_dual_mapping(dm: &mut DualMapping, size: usize) -> Result<(), AsmError> {
870 unmap_dual_mapping(dm, size)
871}
872
873
874 }
875}
876
877pub fn info() -> Info {
878 static mut INFO: Info = Info {
879 page_granularity: 0,
880 page_size: 0,
881 };
882 static INFO_INIT: AtomicBool = AtomicBool::new(false);
883 if INFO_INIT.load(Ordering::Relaxed) {
884 unsafe { addr_of!(INFO).read() }
885 } else {
886 unsafe {
887 let info = get_vm_info();
888 addr_of_mut!(INFO).write(info);
889 INFO_INIT.store(true, Ordering::Relaxed);
890 info
891 }
892 }
893}
894
895pub fn flush_instruction_cache(p: *const u8, size: usize) {
900 cfgenius::cond! {
901 if cfg(any(target_arch="x86", target_arch="x86_64")) {
902 let _ = p;
903 let _ = size;
904 } else if cfg(target_vendor="apple") {
905 extern "C" {
906 fn sys_icache_invalidate(p: *const u8, size: usize);
907 }
908
909 unsafe {
910 sys_icache_invalidate(p, size);
911 }
912 } else if cfg(windows) {
913 extern "C" {
914 fn GetCurrentProcess() -> *mut libc::c_void;
915 fn FlushInstructionCache(
916 proc: *mut libc::c_void,
917 lp: *const u8,
918 dw_size: usize,
919 ) -> i32;
920 }
921
922 unsafe {
923 FlushInstructionCache(GetCurrentProcess(), p, size);
924 }
925 } else if cfg(target_arch="aarch64")
926 {
927 let code = p as usize;
928 let end = code + size;
929
930
931 use core::arch::asm;
932
933 const ICACHE_LINE_SIZE: usize = 4;
934 const DCACHE_LINE_SIZE: usize = 4;
935
936 let mut addr = code & !(DCACHE_LINE_SIZE - 1);
937
938 while addr < end {
939 unsafe {
940 asm!("dc civac, {x}", x = in(reg) addr);
941 }
942 addr += ICACHE_LINE_SIZE;
943 }
944
945 unsafe {
946 asm!("dsb ish");
947 }
948
949 addr = code & !(ICACHE_LINE_SIZE - 1);
950
951 while addr < end {
952 unsafe {
953 asm!("ic ivau, {x}", x = in(reg) addr);
954 }
955 addr += ICACHE_LINE_SIZE;
956 }
957
958 unsafe {
959 asm!(
960 "dsb ish"
961 );
962 asm!(
963 "isb"
964 );
965 }
966
967 } else if cfg(any(target_arch="riscv64", target_arch = "riscv32")) {
968 unsafe {
969 let _ = wasmtime_jit_icache_coherence::clear_cache(p.cast(), size);
970 let _ = wasmtime_jit_icache_coherence::pipeline_flush_mt();
971 }
972 } else {
973 }
976
977 }
978}
979
980#[cfg(not(windows))]
981pub fn hardened_runtime_info() -> HardenedRuntimeInfo {
982 HardenedRuntimeInfo {
983 flags: get_hardened_runtime_flags(),
984 }
985}
986pub fn protect_jit_memory(access: ProtectJitAccess) {
1005 #[cfg(all(target_os = "macos", target_arch = "aarch64"))]
1006 {
1007 unsafe {
1008 let x = match access {
1009 ProtectJitAccess::ReadWrite => 0,
1010 _ => 1,
1011 };
1012
1013 libc::pthread_jit_write_protect_np(x);
1014 }
1015 }
1016 let _ = access;
1017}
1018
1019cfgenius::cond! {
1020
1021 if cfg(windows) {
1022
1023 use winapi::um::sysinfoapi::SYSTEM_INFO;
1024 use winapi::{
1025 shared::{minwindef::DWORD, ntdef::HANDLE},
1026 um::{
1027 handleapi::{CloseHandle, INVALID_HANDLE_VALUE},
1028 memoryapi::{
1029 CreateFileMappingW, MapViewOfFile, UnmapViewOfFile, VirtualAlloc, VirtualFree,
1030 VirtualProtect, FILE_MAP_EXECUTE, FILE_MAP_READ, FILE_MAP_WRITE,
1031 },
1032 sysinfoapi::GetSystemInfo,
1033 winnt::{
1034 MEM_COMMIT, MEM_RELEASE, MEM_RESERVE, PAGE_EXECUTE_READ, PAGE_EXECUTE_READWRITE,
1035 PAGE_READONLY, PAGE_READWRITE,
1036 },
1037 },
1038 };
1039
1040
1041 struct ScopedHandle {
1042 value: HANDLE
1043 }
1044
1045 impl ScopedHandle {
1046 fn new() -> Self {
1047 Self { value: core::ptr::null_mut() }
1048 }
1049 }
1050
1051 impl Drop for ScopedHandle {
1052 fn drop(&mut self) {
1053 if !self.value.is_null() {
1054 unsafe {
1055 CloseHandle(self.value);
1056 }
1057 }
1058 }
1059 }
1060
1061 fn get_vm_info() -> Info {
1062 let mut system_info = MaybeUninit::<SYSTEM_INFO>::uninit();
1063 unsafe {
1064 GetSystemInfo(system_info.as_mut_ptr());
1065
1066 let system_info = system_info.assume_init();
1067
1068 Info {
1069 page_size: system_info.dwPageSize as u32,
1070 page_granularity: system_info.dwAllocationGranularity as u32,
1071 }
1072 }
1073 }
1074
1075 fn protect_flags_from_memory_flags(memory_flags: MemoryFlags) -> DWORD {
1076 let protect_flags;
1077
1078 if memory_flags.contains(MemoryFlags::ACCESS_EXECUTE) {
1079 protect_flags = if memory_flags.contains(MemoryFlags::ACCESS_WRITE) {
1080 PAGE_EXECUTE_READWRITE
1081 } else {
1082 PAGE_EXECUTE_READ
1083 };
1084 } else if memory_flags.contains(MemoryFlags::ACCESS_RW) {
1085 protect_flags = if memory_flags.contains(MemoryFlags::ACCESS_WRITE) {
1086 PAGE_READWRITE
1087 } else {
1088 PAGE_READONLY
1089 };
1090 } else {
1091 protect_flags = PAGE_READONLY;
1092 }
1093
1094 protect_flags
1095 }
1096
1097 fn desired_access_from_memory_flags(memory_flags: MemoryFlags) -> DWORD {
1098 let mut access = if memory_flags.contains(MemoryFlags::ACCESS_WRITE) {
1099 FILE_MAP_WRITE
1100 } else {
1101 FILE_MAP_READ
1102 };
1103
1104 if memory_flags.contains(MemoryFlags::ACCESS_EXECUTE) {
1105 access |= FILE_MAP_EXECUTE;
1106 }
1107
1108 access
1109 }
1110
1111 pub fn alloc(size: usize, memory_flags: MemoryFlags) -> Result<*mut u8, AsmError> {
1112 if size == 0 {
1113 return Err(AsmError::InvalidArgument)
1114 }
1115
1116 unsafe {
1117 let protect = protect_flags_from_memory_flags(memory_flags);
1118 let result = VirtualAlloc(core::ptr::null_mut(), size, MEM_COMMIT | MEM_RESERVE, protect);
1119
1120 if result.is_null() {
1121 return Err(AsmError::OutOfMemory)
1122 }
1123
1124 Ok(result as *mut u8)
1125 }
1126 }
1127
1128 pub fn release(ptr: *mut u8, size: usize) -> Result<(), AsmError> {
1129 if size == 0 || ptr.is_null() {
1130 return Err(AsmError::InvalidArgument)
1131 }
1132
1133 unsafe {
1134 if VirtualFree(ptr as *mut _, 0, MEM_RELEASE) == 0 {
1135 return Err(AsmError::InvalidArgument)
1136 }
1137 }
1138
1139 Ok(())
1140 }
1141
1142 pub fn protect(p: *mut u8, size: usize, memory_flags: MemoryFlags) -> Result<(), AsmError> {
1143 let protect_flags = protect_flags_from_memory_flags(memory_flags);
1144 let mut old_flags = 0;
1145
1146 unsafe {
1147 if VirtualProtect(p as _, size, protect_flags, &mut old_flags) != 0 {
1148 return Ok(())
1149 }
1150
1151 Err(AsmError::InvalidArgument)
1152 }
1153 }
1154
1155 pub fn alloc_dual_mapping(size: usize, memory_flags: MemoryFlags) -> Result<DualMapping, AsmError> {
1156 if size == 0 {
1157 return Err(AsmError::InvalidArgument)
1158 }
1159
1160 let mut handle = ScopedHandle::new();
1161
1162 unsafe {
1163 handle.value = CreateFileMappingW(
1164 INVALID_HANDLE_VALUE,
1165 core::ptr::null_mut(),
1166 PAGE_EXECUTE_READWRITE,
1167 ((size as u64) >> 32) as _,
1168 (size & 0xFFFFFFFF) as _,
1169 core::ptr::null_mut()
1170 );
1171
1172 if handle.value.is_null() {
1173 return Err(AsmError::OutOfMemory);
1174 }
1175
1176 let mut ptr = [core::ptr::null_mut(), core::ptr::null_mut()];
1177
1178 for i in 0..2 {
1179 let access_flags = memory_flags.0 & !DUAL_MAPPING_FILTER[i];
1180 let desired_access = desired_access_from_memory_flags(access_flags.into());
1181 ptr[i] = MapViewOfFile(handle.value, desired_access, 0, 0, size);
1182
1183 if ptr[i].is_null() {
1184 if i == 0 {
1185 UnmapViewOfFile(ptr[0]);
1186 }
1187
1188 return Err(AsmError::OutOfMemory);
1189 }
1190 }
1191
1192 Ok(DualMapping {
1193 rx: ptr[0] as _,
1194 rw: ptr[1] as _,
1195 })
1196 }
1197 }
1198
1199 pub fn release_dual_mapping(dm: &mut DualMapping, _size: usize) -> Result<(), AsmError> {
1200 let mut failed = false;
1201
1202 unsafe {
1203 if UnmapViewOfFile(dm.rx as _) == 0 {
1204 failed = true;
1205 }
1206
1207 if dm.rx != dm.rw && UnmapViewOfFile(dm.rw as _) == 0 {
1208 failed = true;
1209 }
1210
1211 if failed {
1212 return Err(AsmError::InvalidArgument);
1213 }
1214
1215 dm.rx = core::ptr::null_mut();
1216 dm.rw = core::ptr::null_mut();
1217
1218 Ok(())
1219 }
1220 }
1221 }
1222}