use crate::util::alloc::AllocationError;
use crate::util::opaque_pointer::*;
use crate::util::Address;
use crate::vm::{Collection, VMBinding};
use libc::{PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE};
use std::io::{Error, Result};
pub fn result_is_mapped(result: Result<()>) -> bool {
match result {
Ok(_) => false,
Err(err) => err.raw_os_error().unwrap() == libc::EEXIST,
}
}
pub fn zero(start: Address, len: usize) {
let ptr = start.to_mut_ptr();
wrap_libc_call(&|| unsafe { libc::memset(ptr, 0, len) }, ptr).unwrap()
}
#[allow(clippy::let_and_return)] pub unsafe fn dzmmap(start: Address, size: usize) -> Result<()> {
let prot = PROT_READ | PROT_WRITE | PROT_EXEC;
let flags = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED;
let ret = mmap_fixed(start, size, prot, flags);
#[cfg(not(target_os = "linux"))]
if ret.is_ok() {
zero(start, size)
}
ret
}
#[allow(clippy::let_and_return)] pub fn dzmmap_noreplace(start: Address, size: usize) -> Result<()> {
let prot = PROT_READ | PROT_WRITE | PROT_EXEC;
let flags = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED_NOREPLACE;
let ret = mmap_fixed(start, size, prot, flags);
#[cfg(not(target_os = "linux"))]
if ret.is_ok() {
zero(start, size)
}
ret
}
pub fn mmap_noreserve(start: Address, size: usize) -> Result<()> {
let prot = PROT_NONE;
let flags =
libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED_NOREPLACE | libc::MAP_NORESERVE;
mmap_fixed(start, size, prot, flags)
}
pub fn mmap_fixed(
start: Address,
size: usize,
prot: libc::c_int,
flags: libc::c_int,
) -> Result<()> {
let ptr = start.to_mut_ptr();
wrap_libc_call(
&|| unsafe { libc::mmap(start.to_mut_ptr(), size, prot, flags, -1, 0) },
ptr,
)
}
pub fn munmap(start: Address, size: usize) -> Result<()> {
wrap_libc_call(&|| unsafe { libc::munmap(start.to_mut_ptr(), size) }, 0)
}
pub fn handle_mmap_error<VM: VMBinding>(error: Error, tls: VMThread) -> ! {
use std::io::ErrorKind;
match error.kind() {
ErrorKind::OutOfMemory => {
trace!("Signal MmapOutOfMemory!");
VM::VMCollection::out_of_memory(tls, AllocationError::MmapOutOfMemory);
unreachable!()
}
ErrorKind::Other => {
if let Some(os_errno) = error.raw_os_error() {
if os_errno == libc::ENOMEM {
trace!("Signal MmapOutOfMemory!");
VM::VMCollection::out_of_memory(tls, AllocationError::MmapOutOfMemory);
unreachable!()
}
}
}
ErrorKind::AlreadyExists => panic!("Failed to mmap, the address is already mapped. Should MMTk quanrantine the address range first?"),
_ => {}
}
panic!("Unexpected mmap failure: {:?}", error)
}
pub fn panic_if_unmapped(start: Address, size: usize) {
let prot = PROT_READ | PROT_WRITE;
let flags = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED_NOREPLACE;
match mmap_fixed(start, size, prot, flags) {
Ok(_) => panic!("{} of size {} is not mapped", start, size),
Err(e) => {
assert!(
e.kind() == std::io::ErrorKind::AlreadyExists,
"Failed to check mapped: {:?}",
e
);
}
}
}
pub fn munprotect(start: Address, size: usize) -> Result<()> {
wrap_libc_call(
&|| unsafe { libc::mprotect(start.to_mut_ptr(), size, PROT_READ | PROT_WRITE | PROT_EXEC) },
0,
)
}
pub fn mprotect(start: Address, size: usize) -> Result<()> {
wrap_libc_call(
&|| unsafe { libc::mprotect(start.to_mut_ptr(), size, PROT_NONE) },
0,
)
}
fn wrap_libc_call<T: PartialEq>(f: &dyn Fn() -> T, expect: T) -> Result<()> {
let ret = f();
if ret == expect {
Ok(())
} else {
Err(std::io::Error::last_os_error())
}
}
#[cfg(debug_assertions)]
#[cfg(target_os = "linux")]
pub fn get_process_memory_maps() -> String {
use std::fs::File;
use std::io::Read;
let mut data = String::new();
let mut f = File::open("/proc/self/maps").unwrap();
f.read_to_string(&mut data).unwrap();
data
}
#[cfg(test)]
mod tests {
use super::*;
use crate::util::constants::BYTES_IN_PAGE;
use crate::util::test_util::MEMORY_TEST_REGION;
use crate::util::test_util::{serial_test, with_cleanup};
const START: Address = MEMORY_TEST_REGION.start;
#[test]
fn test_mmap() {
serial_test(|| {
with_cleanup(
|| {
let res = unsafe { dzmmap(START, BYTES_IN_PAGE) };
assert!(res.is_ok());
let res = unsafe { dzmmap(START, BYTES_IN_PAGE) };
assert!(res.is_ok());
},
|| {
assert!(munmap(START, BYTES_IN_PAGE).is_ok());
},
);
});
}
#[test]
fn test_munmap() {
serial_test(|| {
with_cleanup(
|| {
let res = dzmmap_noreplace(START, BYTES_IN_PAGE);
assert!(res.is_ok());
let res = munmap(START, BYTES_IN_PAGE);
assert!(res.is_ok());
},
|| {
assert!(munmap(START, BYTES_IN_PAGE).is_ok());
},
)
})
}
#[test]
fn test_mmap_noreplace() {
serial_test(|| {
with_cleanup(
|| {
let res = unsafe { dzmmap(START, BYTES_IN_PAGE) };
assert!(res.is_ok());
let res = dzmmap_noreplace(START, BYTES_IN_PAGE);
assert!(res.is_err());
},
|| {
assert!(munmap(START, BYTES_IN_PAGE).is_ok());
},
)
});
}
#[test]
fn test_mmap_noreserve() {
serial_test(|| {
with_cleanup(
|| {
let res = mmap_noreserve(START, BYTES_IN_PAGE);
assert!(res.is_ok());
let res = unsafe { dzmmap(START, BYTES_IN_PAGE) };
assert!(res.is_ok());
},
|| {
assert!(munmap(START, BYTES_IN_PAGE).is_ok());
},
)
})
}
#[test]
#[should_panic]
fn test_check_is_mmapped_for_unmapped() {
serial_test(|| {
with_cleanup(
|| {
panic_if_unmapped(START, BYTES_IN_PAGE);
},
|| {
assert!(munmap(START, BYTES_IN_PAGE).is_ok());
},
)
})
}
#[test]
fn test_check_is_mmapped_for_mapped() {
serial_test(|| {
with_cleanup(
|| {
assert!(dzmmap_noreplace(START, BYTES_IN_PAGE).is_ok());
panic_if_unmapped(START, BYTES_IN_PAGE);
},
|| {
assert!(munmap(START, BYTES_IN_PAGE).is_ok());
},
)
})
}
#[test]
#[should_panic]
fn test_check_is_mmapped_for_unmapped_next_to_mapped() {
serial_test(|| {
with_cleanup(
|| {
assert!(dzmmap_noreplace(START, BYTES_IN_PAGE).is_ok());
panic_if_unmapped(START + BYTES_IN_PAGE, BYTES_IN_PAGE);
},
|| {
assert!(munmap(START, BYTES_IN_PAGE * 2).is_ok());
},
)
})
}
#[test]
#[should_panic]
#[ignore]
fn test_check_is_mmapped_for_partial_mapped() {
serial_test(|| {
with_cleanup(
|| {
assert!(dzmmap_noreplace(START, BYTES_IN_PAGE).is_ok());
panic_if_unmapped(START, BYTES_IN_PAGE * 2);
},
|| {
assert!(munmap(START, BYTES_IN_PAGE * 2).is_ok());
},
)
})
}
}