#![warn(static_mut_refs)]
use std::collections::BTreeMap;
use std::num::NonZeroUsize;
use std::os::fd::{AsRawFd, BorrowedFd, IntoRawFd, RawFd};
use std::ptr::NonNull;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
pub use nix::libc;
use nix::libc::c_void;
use nix::sys::mman::MapFlags;
pub use nix::sys::mman::ProtFlags;
use nix::sys::signal;
use nix::unistd;
use parking_lot::Mutex;
mod machdep;
#[derive(Debug, PartialEq, Eq)]
pub enum Error {
NullBase,
ZeroSize,
BaseNotAligned,
SizeNotAligned,
PageSizeUnavail,
Unsupported,
SegmentOverlap,
SegmentOutOfBound,
UnixError(nix::errno::Errno),
}
#[derive(Debug, PartialEq, Eq)]
pub enum AccessType {
Read,
Write,
}
pub trait PageStore {
fn page_fault(
&mut self, offset: usize, length: usize, access: AccessType,
) -> Option<Box<dyn Iterator<Item = Box<dyn AsRef<[u8]> + '_>> + '_>>;
}
pub struct Segment {
base: AtomicPtr<u8>,
size: usize,
owned: bool,
shared: Mutex<Vec<SharedMemory>>,
}
impl Segment {
pub fn new(base: Option<*mut u8>, mut size: usize, page_size: usize, flags: ProtFlags) -> Result<Self, Error> {
let rem = size & (page_size - 1);
match base {
Some(base) => {
if (base as usize) & (page_size - 1) != 0 {
return Err(Error::BaseNotAligned);
}
if rem != 0 {
return Err(Error::SizeNotAligned);
}
}
None => {
if rem != 0 {
size += page_size - rem
}
}
}
let (base_ptr, map_flags) = match base {
Some(ptr) => (
Some(NonZeroUsize::new(ptr as usize).ok_or(Error::NullBase)?),
MapFlags::MAP_FIXED,
),
None => (None, MapFlags::empty()),
};
let new_base = unsafe {
nix::sys::mman::mmap_anonymous(
base_ptr,
NonZeroUsize::new(size).ok_or(Error::ZeroSize)?,
flags,
map_flags | MapFlags::MAP_PRIVATE,
)
.map_err(Error::UnixError)?
.cast::<u8>()
};
if let Some(base) = base {
if base != new_base.as_ptr() {
return Err(Error::Unsupported);
}
}
Ok(Self {
base: AtomicPtr::new(new_base.as_ptr()),
size,
owned: base.is_none(),
shared: Mutex::new(Vec::new()),
})
}
#[inline(always)]
pub fn base(&self) -> *mut u8 {
unsafe { *self.base.as_ptr() }
}
#[inline(always)]
pub fn as_slice(&self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self.base(), self.size) }
}
pub fn make_shared(&self, offset: usize, shm: &SharedMemory, flags: ProtFlags) -> Result<(), Error> {
let size = shm.0.size;
if offset + size >= self.size {
return Err(Error::SegmentOutOfBound);
}
unsafe {
nix::sys::mman::mmap(
Some(NonZeroUsize::new(self.base().add(offset) as usize).ok_or(Error::NullBase)?),
NonZeroUsize::new(size).ok_or(Error::ZeroSize)?,
flags,
MapFlags::MAP_FIXED | MapFlags::MAP_SHARED,
&shm.0.fd,
0,
)
.map_err(Error::UnixError)?;
}
self.shared.lock().push(shm.clone());
Ok(())
}
}
impl Drop for Segment {
fn drop(&mut self) {
if self.owned {
unsafe {
if let Some(ptr) = NonNull::new(self.base() as *mut c_void) {
if let Err(e) = nix::sys::mman::munmap(ptr, self.size) {
eprintln!("Segment: Failed to munmap: {e:?}.");
}
}
}
}
}
}
type SignalHandler = extern "C" fn(libc::c_int, *mut libc::siginfo_t, *mut c_void);
static HANDLER_SPIN: AtomicBool = AtomicBool::new(false);
static mut TO_HANDLER: (RawFd, RawFd) = (0, 1);
static mut FROM_HANDLER: (RawFd, RawFd) = (0, 1);
static mut FALLBACK_SIGSEGV_HANDLER: Option<SignalHandler> = None;
static mut FALLBACK_SIGBUS_HANDLER: Option<SignalHandler> = None;
static MANAGER: Mutex<PagedSegmentManager> = Mutex::new(PagedSegmentManager {
entries: BTreeMap::new(),
});
static MANAGER_THREAD: Mutex<Option<std::thread::JoinHandle<()>>> = Mutex::new(None);
static INITIALIZED: AtomicBool = AtomicBool::new(false);
const ADDR_SIZE: usize = std::mem::size_of::<usize>();
#[inline]
fn handle_page_fault_(info: *mut libc::siginfo_t, ctx: *mut c_void) -> bool {
let (tx, rx, addr, ctx) = unsafe {
let (rx, _) = TO_HANDLER;
let (_, tx) = FROM_HANDLER;
(tx, rx, (*info).si_addr() as usize, &mut *(ctx as *mut libc::ucontext_t))
};
let flag = machdep::check_page_fault_rw_flag_from_context(*ctx);
let mut buff = [0; ADDR_SIZE + 1];
buff[..ADDR_SIZE].copy_from_slice(&addr.to_le_bytes());
buff[ADDR_SIZE] = flag;
while HANDLER_SPIN.swap(true, Ordering::Acquire) {
std::thread::yield_now();
}
if unistd::write(unsafe { BorrowedFd::borrow_raw(tx) }, &buff).is_err() {
HANDLER_SPIN.swap(false, Ordering::Release);
return true;
}
let _ = unistd::read(unsafe { BorrowedFd::borrow_raw(rx) }, &mut buff[..1]);
HANDLER_SPIN.swap(false, Ordering::Release);
buff[0] == 1
}
extern "C" fn handle_page_fault(signum: libc::c_int, info: *mut libc::siginfo_t, ctx: *mut c_void) {
if !handle_page_fault_(info, ctx) {
return;
}
unsafe {
let sig = signal::Signal::try_from(signum).expect("Invalid signum.");
let fallback_handler = match sig {
signal::SIGSEGV => FALLBACK_SIGSEGV_HANDLER,
signal::SIGBUS => FALLBACK_SIGBUS_HANDLER,
_ => panic!("Unknown signal: {}.", sig),
};
if let Some(handler) = fallback_handler {
handler(signum, info, ctx);
} else {
let sig_action = signal::SigAction::new(
signal::SigHandler::SigDfl,
signal::SaFlags::empty(),
signal::SigSet::empty(),
);
signal::sigaction(sig, &sig_action).expect("Fail to reset signal handler.");
signal::raise(sig).expect("Fail to raise SIG_DFL.");
unreachable!("SIG_DFL should have terminated the process");
}
}
}
unsafe fn register_signal_handlers(handler: SignalHandler) {
let register = |fallback_handler: *mut Option<SignalHandler>, sig: signal::Signal| {
let sig_action = signal::SigAction::new(
signal::SigHandler::SigAction(handler),
signal::SaFlags::SA_NODEFER | signal::SaFlags::SA_SIGINFO | signal::SaFlags::SA_ONSTACK,
signal::SigSet::empty(),
);
unsafe {
let sig = signal::sigaction(sig, &sig_action).expect("Fail to register signal handler.");
*fallback_handler = match sig.handler() {
signal::SigHandler::SigAction(h)
if sig.flags() & signal::SaFlags::SA_SIGINFO == signal::SaFlags::SA_SIGINFO =>
{
Some(h)
}
_ => None,
};
}
};
register(&raw mut FALLBACK_SIGSEGV_HANDLER, signal::SIGSEGV);
register(&raw mut FALLBACK_SIGBUS_HANDLER, signal::SIGBUS);
}
struct PagedSegmentEntry {
mem: Arc<Segment>,
store: Box<dyn PageStore + Send + 'static>,
start: usize,
len: usize,
page_size: usize,
}
struct PagedSegmentManager {
entries: BTreeMap<usize, PagedSegmentEntry>,
}
impl PagedSegmentManager {
fn insert(&mut self, entry: PagedSegmentEntry) -> bool {
if let Some((start, e)) = self.entries.range(..=entry.start).next_back() {
if start == &entry.start || start + e.len > entry.start {
return false;
}
}
assert!(self.entries.insert(entry.start, entry).is_none()); true
}
fn remove(&mut self, start: usize, len: usize) {
use std::collections::btree_map::Entry;
if let Entry::Occupied(e) = self.entries.entry(start) {
if e.get().len == len {
e.remove();
return;
}
}
panic!(
"Failed to locate PagedSegmentEntry (start = 0x{:x}, end = 0x{:x}).",
start,
start + len
)
}
fn hit(&mut self, addr: usize) -> Option<&mut PagedSegmentEntry> {
if let Some((start, e)) = self.entries.range_mut(..=addr).next_back() {
assert!(start <= &addr);
if start + e.len > addr {
return Some(e);
}
}
None
}
}
fn init() {
let (to_read, to_write) = nix::unistd::pipe().expect("Fail to create pipe to the handler.");
let (from_read, from_write) = nix::unistd::pipe().expect("Fail to create pipe from the handler.");
let from_handler = unsafe { BorrowedFd::borrow_raw(from_read.as_raw_fd()) };
let to_handler = unsafe { BorrowedFd::borrow_raw(to_write.as_raw_fd()) };
unsafe {
TO_HANDLER = (to_read.into_raw_fd(), to_write.into_raw_fd());
FROM_HANDLER = (from_read.into_raw_fd(), from_write.into_raw_fd());
register_signal_handlers(handle_page_fault);
}
std::sync::atomic::fence(Ordering::SeqCst);
let handle = std::thread::spawn(move || {
let mut buff = [0; ADDR_SIZE + 1];
loop {
if unistd::read(&from_handler, &mut buff).is_err() {
break;
}
let addr = usize::from_le_bytes(buff[..ADDR_SIZE].try_into().unwrap());
let (access_type, mprotect_flag) = match buff[ADDR_SIZE] {
0 => (AccessType::Read, ProtFlags::PROT_READ),
_ => (AccessType::Write, ProtFlags::PROT_READ | ProtFlags::PROT_WRITE),
};
let mut mgr = MANAGER.lock();
let mut fallback = 1;
if let Some(entry) = mgr.hit(addr) {
let page_mask = usize::MAX ^ (entry.page_size - 1);
let page_addr = addr & page_mask;
let page_ptr = unsafe { NonNull::new_unchecked(page_addr as *mut c_void) };
let slice = entry.mem.as_slice();
let base = slice.as_ptr() as usize;
let page_offset = page_addr - base;
if let Some(page) = entry.store.page_fault(page_offset, entry.page_size, access_type) {
unsafe {
nix::sys::mman::mprotect(
page_ptr,
entry.page_size,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
)
.expect("Failed to mprotect.");
}
let target = &mut slice[page_offset..page_offset + entry.page_size];
let mut base = 0;
for chunk in page {
let chunk = (*chunk).as_ref();
let chunk_len = chunk.len();
target[base..base + chunk_len].copy_from_slice(&chunk);
base += chunk_len;
}
}
unsafe {
nix::sys::mman::mprotect(page_ptr, entry.page_size, mprotect_flag).expect("Failed to mprotect.");
}
fallback = 0;
}
if unistd::write(&to_handler, &[fallback]).is_err() {
break;
}
}
});
*MANAGER_THREAD.lock() = Some(handle);
}
pub struct PagedSegment<'a> {
mem: Arc<Segment>,
page_size: usize,
_phantom: std::marker::PhantomData<&'a ()>,
}
impl<'a> PagedSegment<'a> {
pub unsafe fn from_raw<S: PageStore + Send + 'static>(
base: *mut u8, size: usize, store: S, page_size: Option<usize>,
) -> Result<PagedSegment<'static>, Error> {
let mem: &'static mut [u8] = unsafe { std::slice::from_raw_parts_mut(base, size) };
Self::new_(Some(mem.as_ptr() as *mut u8), mem.len(), store, page_size)
}
pub fn new<S: PageStore + Send + 'static>(
length: usize, store: S, page_size: Option<usize>,
) -> Result<PagedSegment<'static>, Error> {
Self::new_(None, length, store, page_size)
}
fn new_<'b, S: PageStore + Send + 'static>(
base: Option<*mut u8>, length: usize, store: S, page_size: Option<usize>,
) -> Result<PagedSegment<'b>, Error> {
if !INITIALIZED.swap(true, Ordering::AcqRel) {
init();
}
let page_size = match page_size {
Some(s) => s,
None => get_page_size()?,
};
let mem = std::sync::Arc::new(Segment::new(base, length, page_size, ProtFlags::PROT_NONE)?);
let mut mgr = MANAGER.lock();
if !mgr.insert(PagedSegmentEntry {
mem: mem.clone(),
store: Box::new(store),
start: mem.base() as usize,
len: length,
page_size,
}) {
return Err(Error::SegmentOverlap);
}
Ok(PagedSegment {
mem,
page_size,
_phantom: std::marker::PhantomData,
})
}
pub fn as_slice_mut(&mut self) -> &mut [u8] {
self.mem.as_slice()
}
pub fn as_slice(&self) -> &[u8] {
self.mem.as_slice()
}
pub fn as_raw_parts(&self) -> (*mut u8, usize) {
let s = self.mem.as_slice();
(s.as_mut_ptr(), s.len())
}
pub fn page_size(&self) -> usize {
self.page_size
}
pub fn reset_write_detection(&self, offset: usize, size: usize) -> Result<(), Error> {
assert!(offset + size <= self.mem.size);
unsafe {
let ptr = NonNull::new_unchecked(self.mem.base().add(offset) as *mut c_void);
nix::sys::mman::mprotect(ptr, size, ProtFlags::PROT_READ).map_err(Error::UnixError)?;
}
Ok(())
}
pub fn release_page(&self, page_offset: usize) -> Result<(), Error> {
if page_offset & (self.page_size - 1) != 0 {
return Err(Error::BaseNotAligned);
}
if page_offset >= self.mem.size {
return Err(Error::SegmentOutOfBound);
}
let page_addr = self.mem.base() as usize + page_offset;
unsafe {
nix::sys::mman::mmap_anonymous(
Some(NonZeroUsize::new(page_addr).ok_or(Error::NullBase)?),
NonZeroUsize::new(self.page_size).ok_or(Error::ZeroSize)?,
ProtFlags::PROT_NONE,
MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE,
)
.map_err(Error::UnixError)?;
}
Ok(())
}
pub fn release_all_pages(&self) -> Result<(), Error> {
unsafe {
nix::sys::mman::mmap_anonymous(
Some(NonZeroUsize::new(self.mem.base() as usize).ok_or(Error::NullBase)?),
NonZeroUsize::new(self.mem.size).ok_or(Error::ZeroSize)?,
ProtFlags::PROT_NONE,
MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE,
)
.map_err(Error::UnixError)?;
}
self.mem.shared.lock().clear();
Ok(())
}
pub fn make_shared(&self, offset: usize, shm: &SharedMemory) -> Result<(), Error> {
self.mem.make_shared(offset, shm, ProtFlags::PROT_NONE)
}
}
impl<'a> Drop for PagedSegment<'a> {
fn drop(&mut self) {
let mut mgr = MANAGER.lock();
mgr.remove(self.mem.base() as usize, self.mem.size);
}
}
#[derive(Clone)]
pub struct SharedMemory(Arc<SharedMemoryInner>);
struct SharedMemoryInner {
fd: std::os::fd::OwnedFd,
size: usize,
}
impl SharedMemory {
pub fn new(size: usize) -> Result<Self, Error> {
let fd = machdep::get_shared_memory()?;
nix::unistd::ftruncate(&fd, size as libc::off_t).map_err(Error::UnixError)?;
Ok(Self(Arc::new(SharedMemoryInner { fd, size })))
}
}
pub fn get_page_size() -> Result<usize, Error> {
Ok(unistd::sysconf(unistd::SysconfVar::PAGE_SIZE)
.map_err(Error::UnixError)?
.ok_or(Error::PageSizeUnavail)? as usize)
}
pub struct VecPageStore(Vec<u8>);
impl VecPageStore {
pub fn new(vec: Vec<u8>) -> Self {
Self(vec)
}
}
impl PageStore for VecPageStore {
fn page_fault(
&mut self, offset: usize, length: usize, _access: AccessType,
) -> Option<Box<dyn Iterator<Item = Box<dyn AsRef<[u8]> + '_>> + '_>> {
#[cfg(debug_assertions)]
println!(
"{:?} loading page at 0x{:x} access={:?}",
self as *mut Self, offset, _access,
);
Some(Box::new(std::iter::once(
Box::new(&self.0[offset..offset + length]) as Box<dyn AsRef<[u8]>>
)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use lazy_static::lazy_static;
use parking_lot::Mutex;
lazy_static! {
static ref PAGE_SIZE: usize = unistd::sysconf(unistd::SysconfVar::PAGE_SIZE).unwrap().unwrap() as usize;
}
static TEST_MUTEX: Mutex<()> = Mutex::new(());
#[test]
fn test1() {
let _guard = TEST_MUTEX.lock();
for _ in 0..100 {
let mut v = Vec::new();
v.resize(*PAGE_SIZE * 100, 0);
v[0] = 42;
v[*PAGE_SIZE * 10 + 1] = 43;
v[*PAGE_SIZE * 20 + 1] = 44;
let pm = PagedSegment::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
let m = pm.as_slice();
assert_eq!(m[0], 42);
assert_eq!(m[*PAGE_SIZE * 10 + 1], 43);
assert_eq!(m[*PAGE_SIZE * 20 + 1], 44);
}
}
#[test]
fn test2() {
let _guard = TEST_MUTEX.lock();
for _ in 0..100 {
let mut v = Vec::new();
v.resize(*PAGE_SIZE * 100, 0);
v[0] = 1;
v[*PAGE_SIZE * 10 + 1] = 2;
v[*PAGE_SIZE * 20 + 1] = 3;
let pm1 = PagedSegment::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
let mut v = Vec::new();
v.resize(*PAGE_SIZE * 100, 0);
for (i, v) in v.iter_mut().enumerate() {
*v = i as u8;
}
let mut pm2 = PagedSegment::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
let m2 = pm2.as_slice_mut();
let m1 = pm1.as_slice();
assert_eq!(m2[100], 100);
m2[100] = 0;
assert_eq!(m2[100], 0);
assert_eq!(m1[0], 1);
assert_eq!(m1[*PAGE_SIZE * 10 + 1], 2);
assert_eq!(m1[*PAGE_SIZE * 20 + 1], 3);
}
}
#[test]
fn test_shared_memory() {
let _guard = TEST_MUTEX.lock();
let mut v = Vec::new();
v.resize(*PAGE_SIZE * 100, 0);
v[0] = 42;
v[*PAGE_SIZE * 10 + 1] = 43;
v[*PAGE_SIZE * 20 + 1] = 44;
let shm = SharedMemory::new(*PAGE_SIZE).unwrap();
let mut pm1 = PagedSegment::new(*PAGE_SIZE * 100, VecPageStore::new(v.clone()), None).unwrap();
let pm2 = PagedSegment::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
pm1.make_shared(*PAGE_SIZE * 10, &shm).unwrap();
pm2.make_shared(*PAGE_SIZE * 10, &shm).unwrap();
assert_eq!(pm1.as_slice()[*PAGE_SIZE * 10 + 1], 43);
assert_eq!(pm2.as_slice()[*PAGE_SIZE * 10 + 1], 43);
pm1.as_slice_mut()[*PAGE_SIZE * 10 + 1] = 99;
assert_eq!(pm2.as_slice()[*PAGE_SIZE * 10 + 1], 99);
assert_eq!(pm1.as_slice()[*PAGE_SIZE * 10 + 1], 99);
let m = pm1.as_slice();
assert_eq!(m[0], 42);
assert_eq!(m[*PAGE_SIZE * 20 + 1], 44);
}
#[test]
fn test_release_page() {
let _guard = TEST_MUTEX.lock();
let mut v = Vec::new();
v.resize(*PAGE_SIZE * 20, 0);
v[0] = 42;
v[*PAGE_SIZE * 10 + 1] = 43;
let pm = PagedSegment::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
let m = pm.as_slice();
assert_eq!(m[0], 42);
assert_eq!(m[*PAGE_SIZE * 10 + 1], 43);
for _ in 0..5 {
pm.release_page(0).unwrap();
pm.release_page(*PAGE_SIZE * 10).unwrap();
assert_eq!(m[0], 42);
assert_eq!(m[*PAGE_SIZE * 10 + 1], 43);
}
}
#[test]
fn out_of_order_scan() {
let _guard = TEST_MUTEX.lock();
let mut v = Vec::new();
v.resize(*PAGE_SIZE * 100, 0);
for (i, v) in v.iter_mut().enumerate() {
*v = i as u8;
}
let store = VecPageStore::new(v);
let pm = PagedSegment::new(*PAGE_SIZE * 100, store, None).unwrap();
use rand::{SeedableRng, seq::SliceRandom};
use rand_chacha::ChaChaRng;
let seed = [0; 32];
let mut rng = ChaChaRng::from_seed(seed);
let m = pm.as_slice();
let mut idxes = Vec::new();
for i in 0..m.len() {
idxes.push(i);
}
idxes.shuffle(&mut rng);
for i in idxes.into_iter() {
#[cfg(debug_assertions)]
{
let x = m[i];
println!("m[0x{:08x}] = {}", i, x);
}
assert_eq!(m[i], i as u8);
}
}
use signal::{SaFlags, SigAction, SigHandler, SigSet, Signal};
unsafe fn handler_reset_init() {
unsafe {
let (to_read, to_write) = TO_HANDLER;
let (from_read, from_write) = FROM_HANDLER;
if to_read != 0 {
let _ = nix::unistd::close(to_read);
}
if to_write != 1 {
let _ = nix::unistd::close(to_write);
}
if from_read != 0 {
let _ = nix::unistd::close(from_read);
}
if from_write != 1 {
let _ = nix::unistd::close(from_write);
}
if let Some(handle) = MANAGER_THREAD.lock().take() {
let _ = handle.join();
}
let sig_dfl = SigAction::new(SigHandler::SigDfl, SaFlags::empty(), SigSet::empty());
let _ = signal::sigaction(Signal::SIGSEGV, &sig_dfl);
let _ = signal::sigaction(Signal::SIGBUS, &sig_dfl);
FALLBACK_SIGSEGV_HANDLER = None;
FALLBACK_SIGBUS_HANDLER = None;
TO_HANDLER = (0, 1);
FROM_HANDLER = (0, 1);
INITIALIZED.store(false, Ordering::Release);
}
}
static SIGSEGV_CALLED: AtomicBool = AtomicBool::new(false);
static SIGBUS_CALLED: AtomicBool = AtomicBool::new(false);
fn make_test_mem_valid(info: *mut libc::siginfo_t) {
unsafe {
let addr = (*info).si_addr();
let page_size = nix::unistd::sysconf(nix::unistd::SysconfVar::PAGE_SIZE)
.unwrap()
.unwrap() as usize;
let page_addr = (addr as usize) & !(page_size - 1);
nix::sys::mman::mprotect(
NonNull::new_unchecked(page_addr as *mut c_void),
page_size,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
)
.expect("mprotect failed in handler");
}
}
extern "C" fn test_sigsegv_handler(_signum: libc::c_int, info: *mut libc::siginfo_t, _ctx: *mut c_void) {
SIGSEGV_CALLED.store(true, Ordering::SeqCst);
make_test_mem_valid(info);
}
extern "C" fn test_sigbus_handler(_signum: libc::c_int, info: *mut libc::siginfo_t, _ctx: *mut c_void) {
SIGBUS_CALLED.store(true, Ordering::SeqCst);
make_test_mem_valid(info);
}
#[test]
fn test_fallback_handlers_set_and_called() {
let _guard = TEST_MUTEX.lock();
unsafe {
handler_reset_init();
let sigsegv_action = SigAction::new(
SigHandler::SigAction(test_sigsegv_handler),
SaFlags::SA_SIGINFO | SaFlags::SA_NODEFER,
SigSet::empty(),
);
signal::sigaction(Signal::SIGSEGV, &sigsegv_action).expect("failed to set SIGSEGV handler");
let sigbus_action = SigAction::new(
SigHandler::SigAction(test_sigbus_handler),
SaFlags::SA_SIGINFO | SaFlags::SA_NODEFER,
SigSet::empty(),
);
signal::sigaction(Signal::SIGBUS, &sigbus_action).expect("failed to set SIGBUS handler");
let _pm1 = PagedSegment::new(*PAGE_SIZE, VecPageStore::new(vec![0u8; *PAGE_SIZE]), None).unwrap();
let saved_sigsegv = FALLBACK_SIGSEGV_HANDLER.map(|f| f as usize);
let saved_sigbus = FALLBACK_SIGBUS_HANDLER.map(|f| f as usize);
assert!(saved_sigsegv.is_some(), "SIGSEGV fallback handler should be saved");
assert!(saved_sigbus.is_some(), "SIGBUS fallback handler should be saved");
let _pm2 = PagedSegment::new(*PAGE_SIZE, VecPageStore::new(vec![0u8; *PAGE_SIZE]), None).unwrap();
let current_sigsegv = FALLBACK_SIGSEGV_HANDLER.map(|f| f as usize);
let current_sigbus = FALLBACK_SIGBUS_HANDLER.map(|f| f as usize);
assert_eq!(
current_sigsegv, saved_sigsegv,
"SIGSEGV fallback handler should not change"
);
assert_eq!(
current_sigbus, saved_sigbus,
"SIGBUS fallback handler should not change"
);
SIGSEGV_CALLED.store(false, Ordering::SeqCst);
SIGBUS_CALLED.store(false, Ordering::SeqCst);
let test_mem = nix::sys::mman::mmap_anonymous(
None,
NonZeroUsize::new(*PAGE_SIZE).unwrap(),
ProtFlags::PROT_NONE,
MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS,
)
.expect("mmap failed");
std::ptr::write_volatile(test_mem.cast::<u8>().as_ptr(), 42);
assert!(
SIGSEGV_CALLED.load(Ordering::SeqCst) || SIGBUS_CALLED.load(Ordering::SeqCst),
"SIGSEGV or SIGBUS fallback handler should have been called"
);
nix::sys::mman::munmap(test_mem.cast(), *PAGE_SIZE).expect("munmap failed");
handler_reset_init();
}
}
}