use super::HostAlignedByteCount;
use crate::prelude::*;
use crate::runtime::vm::sys::{mmap, vm::MemoryImageSource};
use alloc::sync::Arc;
use core::ops::Range;
use core::ptr::NonNull;
#[cfg(feature = "std")]
use std::fs::File;
#[derive(Clone, Debug)]
pub struct AlignedLength {}
#[derive(Clone, Debug)]
pub struct UnalignedLength {
#[cfg(feature = "std")]
file: Option<Arc<File>>,
}
#[derive(Debug)]
pub struct Mmap<T> {
sys: mmap::Mmap,
data: T,
}
impl Mmap<AlignedLength> {
pub fn with_at_least(size: usize) -> Result<Self> {
let rounded_size = HostAlignedByteCount::new_rounded_up(size)?;
Self::accessible_reserved(rounded_size, rounded_size)
}
pub fn accessible_reserved(
accessible_size: HostAlignedByteCount,
mapping_size: HostAlignedByteCount,
) -> Result<Self> {
assert!(accessible_size <= mapping_size);
if mapping_size.is_zero() {
Ok(Mmap {
sys: mmap::Mmap::new_empty(),
data: AlignedLength {},
})
} else if accessible_size == mapping_size {
Ok(Mmap {
sys: mmap::Mmap::new(mapping_size)
.context(format!("mmap failed to allocate {mapping_size:#x} bytes"))?,
data: AlignedLength {},
})
} else {
let result = Mmap {
sys: mmap::Mmap::reserve(mapping_size)
.context(format!("mmap failed to reserve {mapping_size:#x} bytes"))?,
data: AlignedLength {},
};
if !accessible_size.is_zero() {
unsafe {
result
.make_accessible(HostAlignedByteCount::ZERO, accessible_size)
.context(format!(
"mmap failed to allocate {accessible_size:#x} bytes"
))?;
}
}
Ok(result)
}
}
pub fn into_unaligned(self) -> Mmap<UnalignedLength> {
Mmap {
sys: self.sys,
data: UnalignedLength {
#[cfg(feature = "std")]
file: None,
},
}
}
pub fn len_aligned(&self) -> HostAlignedByteCount {
unsafe { HostAlignedByteCount::new_unchecked(self.sys.len()) }
}
pub fn offset(self: &Arc<Self>, offset: HostAlignedByteCount) -> Result<MmapOffset> {
if offset > self.len_aligned() {
bail!(
"offset {} is not in bounds for mmap: {}",
offset,
self.len_aligned()
);
}
Ok(MmapOffset::new(self.clone(), offset))
}
pub fn zero_offset(self: &Arc<Self>) -> MmapOffset {
MmapOffset::new(self.clone(), HostAlignedByteCount::ZERO)
}
pub unsafe fn make_accessible(
&self,
start: HostAlignedByteCount,
len: HostAlignedByteCount,
) -> Result<()> {
if len.is_zero() {
return Ok(());
}
let end = start
.checked_add(len)
.expect("start + len must not overflow");
assert!(
end <= self.len_aligned(),
"start + len ({end}) must be <= mmap region {}",
self.len_aligned()
);
unsafe { self.sys.make_accessible(start, len) }
}
}
#[cfg(feature = "std")]
impl Mmap<UnalignedLength> {
pub fn from_file(file: Arc<File>) -> Result<Self> {
let sys = mmap::Mmap::from_file(&file)?;
Ok(Mmap {
sys,
data: UnalignedLength { file: Some(file) },
})
}
pub fn original_file(&self) -> Option<&Arc<File>> {
self.data.file.as_ref()
}
}
impl<T> Mmap<T> {
#[inline]
pub unsafe fn slice(&self, range: Range<usize>) -> &[u8] {
assert!(range.start <= range.end);
assert!(range.end <= self.len());
unsafe {
core::slice::from_raw_parts(self.as_ptr().add(range.start), range.end - range.start)
}
}
pub unsafe fn slice_mut(&mut self, range: Range<usize>) -> &mut [u8] {
assert!(range.start <= range.end);
assert!(range.end <= self.len());
unsafe {
core::slice::from_raw_parts_mut(
self.as_mut_ptr().add(range.start),
range.end - range.start,
)
}
}
#[inline]
pub fn as_ptr(&self) -> *const u8 {
self.sys.as_send_sync_ptr().as_ptr() as *const u8
}
#[inline]
pub fn as_mut_ptr(&self) -> *mut u8 {
self.sys.as_send_sync_ptr().as_ptr()
}
#[inline]
pub fn as_non_null(&self) -> NonNull<u8> {
self.sys.as_send_sync_ptr().as_non_null()
}
#[inline]
pub fn len(&self) -> usize {
self.sys.len()
}
pub unsafe fn make_executable(
&self,
range: Range<usize>,
enable_branch_protection: bool,
) -> Result<()> {
assert!(range.start <= self.len());
assert!(range.end <= self.len());
assert!(range.start <= range.end);
assert!(
range.start % crate::runtime::vm::host_page_size() == 0,
"changing of protections isn't page-aligned",
);
if range.start == range.end {
return Ok(());
}
unsafe {
self.sys
.make_executable(range, enable_branch_protection)
.context("failed to make memory executable")
}
}
pub unsafe fn make_readonly(&self, range: Range<usize>) -> Result<()> {
assert!(range.start <= self.len());
assert!(range.end <= self.len());
assert!(range.start <= range.end);
assert!(
range.start % crate::runtime::vm::host_page_size() == 0,
"changing of protections isn't page-aligned",
);
if range.start == range.end {
return Ok(());
}
unsafe {
self.sys
.make_readonly(range)
.context("failed to make memory readonly")
}
}
pub unsafe fn make_readwrite(&self, range: Range<usize>) -> Result<()> {
assert!(range.start <= self.len());
assert!(range.end <= self.len());
assert!(range.start <= range.end);
assert!(
range.start % crate::runtime::vm::host_page_size() == 0,
"changing of protections isn't page-aligned",
);
if range.start == range.end {
return Ok(());
}
unsafe {
self.sys
.make_readwrite(range)
.context("failed to make memory read-write")
}
}
}
fn _assert() {
fn _assert_send_sync<T: Send + Sync>() {}
_assert_send_sync::<Mmap<AlignedLength>>();
_assert_send_sync::<Mmap<UnalignedLength>>();
}
impl From<Mmap<AlignedLength>> for Mmap<UnalignedLength> {
fn from(mmap: Mmap<AlignedLength>) -> Mmap<UnalignedLength> {
mmap.into_unaligned()
}
}
#[derive(Clone, Debug)]
pub struct MmapOffset {
mmap: Arc<Mmap<AlignedLength>>,
offset: HostAlignedByteCount,
}
impl MmapOffset {
#[inline]
fn new(mmap: Arc<Mmap<AlignedLength>>, offset: HostAlignedByteCount) -> Self {
assert!(
offset <= mmap.len_aligned(),
"offset {} is in bounds (< {})",
offset,
mmap.len_aligned(),
);
Self { mmap, offset }
}
#[inline]
pub fn mmap(&self) -> &Arc<Mmap<AlignedLength>> {
&self.mmap
}
#[inline]
pub fn offset(&self) -> HostAlignedByteCount {
self.offset
}
#[inline]
pub fn as_mut_ptr(&self) -> *mut u8 {
self.as_non_null().as_ptr()
}
#[inline]
pub fn as_non_null(&self) -> NonNull<u8> {
unsafe { self.mmap().as_non_null().byte_add(self.offset.byte_count()) }
}
pub unsafe fn map_image_at(
&self,
image_source: &MemoryImageSource,
source_offset: u64,
memory_offset: HostAlignedByteCount,
memory_len: HostAlignedByteCount,
) -> Result<()> {
let total_offset = self
.offset
.checked_add(memory_offset)
.expect("self.offset + memory_offset is in bounds");
unsafe {
self.mmap
.sys
.map_image_at(image_source, source_offset, total_offset, memory_len)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn mprotect_zero_length() {
let page_size = HostAlignedByteCount::host_page_size();
let pagex2 = page_size.checked_mul(2).unwrap();
let pagex3 = page_size.checked_mul(3).unwrap();
let pagex4 = page_size.checked_mul(4).unwrap();
let mem = Mmap::accessible_reserved(pagex2, pagex4).expect("allocated memory");
unsafe {
mem.make_accessible(pagex3, HostAlignedByteCount::ZERO)
.expect("make_accessible succeeded");
mem.make_executable(pagex3.byte_count()..pagex3.byte_count(), false)
.expect("make_executable succeeded");
mem.make_readonly(pagex3.byte_count()..pagex3.byte_count())
.expect("make_readonly succeeded");
};
}
}