1#![warn(static_mut_refs)]
2
3use std::collections::BTreeMap;
4use std::num::NonZeroUsize;
5use std::os::fd::{AsRawFd, BorrowedFd, IntoRawFd, RawFd};
6use std::ptr::NonNull;
7use std::sync::Arc;
8use std::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
9
10pub use nix::libc;
11use nix::libc::c_void;
12use nix::sys::mman::MapFlags;
13pub use nix::sys::mman::ProtFlags;
14use nix::sys::signal;
15use nix::unistd;
16use parking_lot::Mutex;
17
18mod machdep;
19
20#[derive(Debug, PartialEq, Eq)]
21pub enum Error {
22 NullBase,
23 ZeroSize,
24 BaseNotAligned,
25 LengthNotAligned,
26 PageSizeNotAvail,
27 Unsupported,
28 UnixError(nix::errno::Errno),
29 SegmentOverlap,
30 SegmentOutOfBound,
31}
32
33#[derive(Debug, PartialEq, Eq)]
34pub enum AccessType {
35 Read,
36 Write,
37}
38
39pub trait PageStore {
55 fn page_fault(
62 &mut self, offset: usize, length: usize, access: AccessType,
63 ) -> Option<Box<dyn Iterator<Item = Box<dyn AsRef<[u8]> + '_>> + '_>>;
64}
65
66pub struct Segment {
68 base: AtomicPtr<u8>,
69 size: usize,
70 owned: bool,
71 shared: Mutex<Vec<SharedMemory>>,
72}
73
74impl Segment {
75 pub fn new(base: Option<*mut u8>, mut size: usize, page_size: usize, flags: ProtFlags) -> Result<Self, Error> {
81 let rem = size & (page_size - 1);
82 match base {
83 Some(base) => {
84 if (base as usize) & (page_size - 1) != 0 {
85 return Err(Error::BaseNotAligned);
86 }
87 if rem != 0 {
88 return Err(Error::LengthNotAligned);
89 }
90 }
91 None => {
92 if rem != 0 {
93 size += page_size - rem
95 }
96 }
97 }
98
99 let (base_ptr, map_flags) = match base {
100 Some(ptr) => (
101 Some(NonZeroUsize::new(ptr as usize).ok_or(Error::NullBase)?),
102 MapFlags::MAP_FIXED,
103 ),
104 None => (None, MapFlags::empty()),
105 };
106
107 let new_base = unsafe {
108 nix::sys::mman::mmap_anonymous(
109 base_ptr,
110 NonZeroUsize::new(size).ok_or(Error::ZeroSize)?,
111 flags,
112 map_flags | MapFlags::MAP_PRIVATE,
113 )
114 .map_err(Error::UnixError)?
115 .cast::<u8>()
116 };
117
118 if let Some(base) = base {
119 if base != new_base.as_ptr() {
120 return Err(Error::Unsupported);
121 }
122 }
123
124 Ok(Self {
125 base: AtomicPtr::new(new_base.as_ptr()),
126 size,
127 owned: base.is_none(),
128 shared: Mutex::new(Vec::new()),
129 })
130 }
131
132 #[inline(always)]
134 pub fn base(&self) -> *mut u8 {
135 unsafe { *self.base.as_ptr() }
136 }
137
138 #[inline(always)]
140 pub fn as_slice(&self) -> &mut [u8] {
141 unsafe { std::slice::from_raw_parts_mut(self.base(), self.size) }
142 }
143
144 pub fn make_shared(&self, offset: usize, shm: &SharedMemory, flags: ProtFlags) -> Result<(), Error> {
149 let size = shm.0.size;
150 if offset + size >= self.size {
151 return Err(Error::SegmentOutOfBound);
152 }
153 unsafe {
154 nix::sys::mman::mmap(
155 Some(NonZeroUsize::new(self.base().add(offset) as usize).ok_or(Error::NullBase)?),
156 NonZeroUsize::new(size).ok_or(Error::ZeroSize)?,
157 flags,
158 MapFlags::MAP_FIXED | MapFlags::MAP_SHARED,
159 &shm.0.fd,
160 0,
161 )
162 .map_err(Error::UnixError)?;
163 }
164 self.shared.lock().push(shm.clone());
166 Ok(())
167 }
168}
169
170impl Drop for Segment {
171 fn drop(&mut self) {
172 if self.owned {
173 unsafe {
174 if let Some(ptr) = NonNull::new(self.base() as *mut c_void) {
175 if let Err(e) = nix::sys::mman::munmap(ptr, self.size) {
176 eprintln!("Segment: Failed to munmap: {e:?}.");
177 }
178 }
179 }
180 }
181 }
182}
183
184type SignalHandler = extern "C" fn(libc::c_int, *mut libc::siginfo_t, *mut c_void);
185
186static HANDLER_SPIN: AtomicBool = AtomicBool::new(false);
187static mut TO_HANDLER: (RawFd, RawFd) = (0, 1);
188static mut FROM_HANDLER: (RawFd, RawFd) = (0, 1);
189static mut FALLBACK_SIGSEGV_HANDLER: Option<SignalHandler> = None;
190static mut FALLBACK_SIGBUS_HANDLER: Option<SignalHandler> = None;
191
192static MANAGER: Mutex<PagedSegmentManager> = Mutex::new(PagedSegmentManager {
193 entries: BTreeMap::new(),
194});
195static MANAGER_THREAD: Mutex<Option<std::thread::JoinHandle<()>>> = Mutex::new(None);
196static INITIALIZED: AtomicBool = AtomicBool::new(false);
197const ADDR_SIZE: usize = std::mem::size_of::<usize>();
198
199#[inline]
200fn handle_page_fault_(info: *mut libc::siginfo_t, ctx: *mut c_void) -> bool {
201 let (tx, rx, addr, ctx) = unsafe {
204 let (rx, _) = TO_HANDLER;
205 let (_, tx) = FROM_HANDLER;
206 (tx, rx, (*info).si_addr() as usize, &mut *(ctx as *mut libc::ucontext_t))
207 };
208 let flag = machdep::check_page_fault_rw_flag_from_context(*ctx);
209 let mut buff = [0; ADDR_SIZE + 1];
210 buff[..ADDR_SIZE].copy_from_slice(&addr.to_le_bytes());
211 buff[ADDR_SIZE] = flag;
212 while HANDLER_SPIN.swap(true, Ordering::Acquire) {
214 std::thread::yield_now();
215 }
216 if unistd::write(unsafe { BorrowedFd::borrow_raw(tx) }, &buff).is_err() {
217 HANDLER_SPIN.swap(false, Ordering::Release);
218 return true;
219 }
220 let _ = unistd::read(unsafe { BorrowedFd::borrow_raw(rx) }, &mut buff[..1]);
222 HANDLER_SPIN.swap(false, Ordering::Release);
223 buff[0] == 1
226}
227
228extern "C" fn handle_page_fault(signum: libc::c_int, info: *mut libc::siginfo_t, ctx: *mut c_void) {
229 if !handle_page_fault_(info, ctx) {
230 return;
231 }
232 unsafe {
234 let sig = signal::Signal::try_from(signum).expect("Invalid signum.");
235 let fallback_handler = match sig {
236 signal::SIGSEGV => FALLBACK_SIGSEGV_HANDLER,
237 signal::SIGBUS => FALLBACK_SIGBUS_HANDLER,
238 _ => panic!("Unknown signal: {}.", sig),
239 };
240
241 if let Some(handler) = fallback_handler {
242 handler(signum, info, ctx);
244 } else {
245 let sig_action = signal::SigAction::new(
247 signal::SigHandler::SigDfl,
248 signal::SaFlags::empty(),
249 signal::SigSet::empty(),
250 );
251 signal::sigaction(sig, &sig_action).expect("Fail to reset signal handler.");
252 signal::raise(sig).expect("Fail to raise SIG_DFL.");
253 unreachable!("SIG_DFL should have terminated the process");
254 }
255 }
256}
257
258unsafe fn register_signal_handlers(handler: SignalHandler) {
259 let register = |fallback_handler: *mut Option<SignalHandler>, sig: signal::Signal| {
260 let sig_action = signal::SigAction::new(
272 signal::SigHandler::SigAction(handler),
273 signal::SaFlags::SA_NODEFER | signal::SaFlags::SA_SIGINFO | signal::SaFlags::SA_ONSTACK,
274 signal::SigSet::empty(),
275 );
276
277 unsafe {
279 let sig = signal::sigaction(sig, &sig_action).expect("Fail to register signal handler.");
280 *fallback_handler = match sig.handler() {
281 signal::SigHandler::SigAction(h)
282 if sig.flags() & signal::SaFlags::SA_SIGINFO == signal::SaFlags::SA_SIGINFO =>
283 {
284 Some(h)
285 }
286 _ => None,
287 };
288 }
289 };
290
291 register(&raw mut FALLBACK_SIGSEGV_HANDLER, signal::SIGSEGV);
292 register(&raw mut FALLBACK_SIGBUS_HANDLER, signal::SIGBUS);
293}
294
295struct PagedSegmentEntry {
296 mem: Arc<Segment>,
297 store: Box<dyn PageStore + Send + 'static>,
298 start: usize,
299 len: usize,
300 page_size: usize,
301}
302
303struct PagedSegmentManager {
304 entries: BTreeMap<usize, PagedSegmentEntry>,
305}
306
307impl PagedSegmentManager {
308 fn insert(&mut self, entry: PagedSegmentEntry) -> bool {
309 if let Some((start, e)) = self.entries.range(..=entry.start).next_back() {
310 if start == &entry.start || start + e.len > entry.start {
311 return false;
312 }
313 }
314 assert!(self.entries.insert(entry.start, entry).is_none()); true
316 }
317
318 fn remove(&mut self, start: usize, len: usize) {
319 use std::collections::btree_map::Entry;
320 if let Entry::Occupied(e) = self.entries.entry(start) {
321 if e.get().len == len {
322 e.remove();
323 return;
324 }
325 }
326 panic!(
327 "Failed to locate PagedSegmentEntry (start = 0x{:x}, end = 0x{:x}).",
328 start,
329 start + len
330 )
331 }
332
333 fn hit(&mut self, addr: usize) -> Option<&mut PagedSegmentEntry> {
334 if let Some((start, e)) = self.entries.range_mut(..=addr).next_back() {
335 assert!(start <= &addr);
336 if start + e.len > addr {
337 return Some(e);
338 }
339 }
340 None
341 }
342}
343
344fn init() {
345 let (to_read, to_write) = nix::unistd::pipe().expect("Fail to create pipe to the handler.");
346 let (from_read, from_write) = nix::unistd::pipe().expect("Fail to create pipe from the handler.");
347 let from_handler = unsafe { BorrowedFd::borrow_raw(from_read.as_raw_fd()) };
348 let to_handler = unsafe { BorrowedFd::borrow_raw(to_write.as_raw_fd()) };
349 unsafe {
350 TO_HANDLER = (to_read.into_raw_fd(), to_write.into_raw_fd());
351 FROM_HANDLER = (from_read.into_raw_fd(), from_write.into_raw_fd());
352 register_signal_handlers(handle_page_fault);
353 }
354
355 std::sync::atomic::fence(Ordering::SeqCst);
356
357 let handle = std::thread::spawn(move || {
358 let mut buff = [0; ADDR_SIZE + 1];
359 loop {
360 if unistd::read(&from_handler, &mut buff).is_err() {
361 break;
363 }
364 let addr = usize::from_le_bytes(buff[..ADDR_SIZE].try_into().unwrap());
365 let (access_type, mprotect_flag) = match buff[ADDR_SIZE] {
366 0 => (AccessType::Read, ProtFlags::PROT_READ),
367 _ => (AccessType::Write, ProtFlags::PROT_READ | ProtFlags::PROT_WRITE),
368 };
369 let mut mgr = MANAGER.lock();
370 let mut fallback = 1;
371 if let Some(entry) = mgr.hit(addr) {
372 let page_mask = usize::MAX ^ (entry.page_size - 1);
373 let page_addr = addr & page_mask;
374 let page_ptr = unsafe { NonNull::new_unchecked(page_addr as *mut c_void) };
375 let slice = entry.mem.as_slice();
377 let base = slice.as_ptr() as usize;
378 let page_offset = page_addr - base;
379 if let Some(page) = entry.store.page_fault(page_offset, entry.page_size, access_type) {
380 unsafe {
381 nix::sys::mman::mprotect(
382 page_ptr,
383 entry.page_size,
384 ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
385 )
386 .expect("Failed to mprotect.");
387 }
388 let target = &mut slice[page_offset..page_offset + entry.page_size];
389 let mut base = 0;
390 for chunk in page {
391 let chunk = (*chunk).as_ref();
392 let chunk_len = chunk.len();
393 target[base..base + chunk_len].copy_from_slice(&chunk);
394 base += chunk_len;
395 }
396 }
397 unsafe {
399 nix::sys::mman::mprotect(page_ptr, entry.page_size, mprotect_flag).expect("Failed to mprotect.");
400 }
401 fallback = 0;
402 }
403 if unistd::write(&to_handler, &[fallback]).is_err() {
405 break;
407 }
408 }
409 });
410 *MANAGER_THREAD.lock() = Some(handle);
411}
412
413pub struct PagedSegment<'a> {
415 mem: Arc<Segment>,
416 page_size: usize,
417 _phantom: std::marker::PhantomData<&'a ()>,
418}
419
420impl<'a> PagedSegment<'a> {
421 pub unsafe fn from_raw<S: PageStore + Send + 'static>(
426 base: *mut u8, size: usize, store: S, page_size: Option<usize>,
427 ) -> Result<PagedSegment<'static>, Error> {
428 let mem: &'static mut [u8] = unsafe { std::slice::from_raw_parts_mut(base, size) };
429 Self::new_(Some(mem.as_ptr() as *mut u8), mem.len(), store, page_size)
430 }
431
432 pub fn new<S: PageStore + Send + 'static>(
434 length: usize, store: S, page_size: Option<usize>,
435 ) -> Result<PagedSegment<'static>, Error> {
436 Self::new_(None, length, store, page_size)
437 }
438
439 fn new_<'b, S: PageStore + Send + 'static>(
440 base: Option<*mut u8>, length: usize, store: S, page_size: Option<usize>,
441 ) -> Result<PagedSegment<'b>, Error> {
442 if !INITIALIZED.swap(true, Ordering::AcqRel) {
444 init();
445 }
446 let page_size = match page_size {
447 Some(s) => s,
448 None => get_page_size()?,
449 };
450 let mem = std::sync::Arc::new(Segment::new(base, length, page_size, ProtFlags::PROT_NONE)?);
451 let mut mgr = MANAGER.lock();
452 if !mgr.insert(PagedSegmentEntry {
453 mem: mem.clone(),
454 store: Box::new(store),
455 start: mem.base() as usize,
456 len: length,
457 page_size,
458 }) {
459 return Err(Error::SegmentOverlap);
460 }
461
462 Ok(PagedSegment {
463 mem,
464 page_size,
465 _phantom: std::marker::PhantomData,
466 })
467 }
468
469 pub fn as_slice_mut(&mut self) -> &mut [u8] {
470 self.mem.as_slice()
471 }
472
473 pub fn as_slice(&self) -> &[u8] {
474 self.mem.as_slice()
475 }
476
477 pub fn as_raw_parts(&self) -> (*mut u8, usize) {
478 let s = self.mem.as_slice();
479 (s.as_mut_ptr(), s.len())
480 }
481
482 pub fn page_size(&self) -> usize {
484 self.page_size
485 }
486
487 pub fn reset_write_detection(&self, offset: usize, size: usize) -> Result<(), Error> {
491 assert!(offset + size <= self.mem.size);
492 unsafe {
493 let ptr = NonNull::new_unchecked(self.mem.base().add(offset) as *mut c_void);
494 nix::sys::mman::mprotect(ptr, size, ProtFlags::PROT_READ).map_err(Error::UnixError)?;
495 }
496 Ok(())
497 }
498
499 pub fn release_page(&self, page_offset: usize) -> Result<(), Error> {
503 if page_offset & (self.page_size - 1) != 0 || page_offset >= self.mem.size {
504 panic!("Invalid page offset: {:x}.", page_offset);
505 }
506 let page_addr = self.mem.base() as usize + page_offset;
507 unsafe {
508 nix::sys::mman::mmap_anonymous(
509 Some(NonZeroUsize::new(page_addr).ok_or(Error::NullBase)?),
510 NonZeroUsize::new(self.page_size).ok_or(Error::ZeroSize)?,
511 ProtFlags::PROT_NONE,
512 MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE,
513 )
514 .map_err(Error::UnixError)?;
515 }
516 Ok(())
517 }
518
519 pub fn release_all_pages(&self) -> Result<(), Error> {
520 unsafe {
521 nix::sys::mman::mmap_anonymous(
522 Some(NonZeroUsize::new(self.mem.base() as usize).ok_or(Error::NullBase)?),
523 NonZeroUsize::new(self.mem.size).ok_or(Error::ZeroSize)?,
524 ProtFlags::PROT_NONE,
525 MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE,
526 )
527 .map_err(Error::UnixError)?;
528 }
529 self.mem.shared.lock().clear();
530 Ok(())
531 }
532
533 pub fn make_shared(&self, offset: usize, shm: &SharedMemory) -> Result<(), Error> {
538 self.mem.make_shared(offset, shm, ProtFlags::PROT_NONE)
539 }
540}
541
542impl<'a> Drop for PagedSegment<'a> {
543 fn drop(&mut self) {
544 let mut mgr = MANAGER.lock();
545 mgr.remove(self.mem.base() as usize, self.mem.size);
546 }
547}
548
549#[derive(Clone)]
555pub struct SharedMemory(Arc<SharedMemoryInner>);
556
557struct SharedMemoryInner {
558 fd: std::os::fd::OwnedFd,
559 size: usize,
560}
561
562impl SharedMemory {
563 pub fn new(size: usize) -> Result<Self, Error> {
564 let fd = machdep::get_shared_memory()?;
565 nix::unistd::ftruncate(&fd, size as libc::off_t).map_err(Error::UnixError)?;
566 Ok(Self(Arc::new(SharedMemoryInner { fd, size })))
567 }
568}
569
570pub fn get_page_size() -> Result<usize, Error> {
571 Ok(unistd::sysconf(unistd::SysconfVar::PAGE_SIZE)
572 .map_err(Error::UnixError)?
573 .ok_or(Error::PageSizeNotAvail)? as usize)
574}
575
576pub struct VecPageStore(Vec<u8>);
577
578impl VecPageStore {
579 pub fn new(vec: Vec<u8>) -> Self {
580 Self(vec)
581 }
582}
583
584impl PageStore for VecPageStore {
585 fn page_fault(
586 &mut self, offset: usize, length: usize, _access: AccessType,
587 ) -> Option<Box<dyn Iterator<Item = Box<dyn AsRef<[u8]> + '_>> + '_>> {
588 #[cfg(debug_assertions)]
589 println!(
590 "{:?} loading page at 0x{:x} access={:?}",
591 self as *mut Self, offset, _access,
592 );
593 Some(Box::new(std::iter::once(
594 Box::new(&self.0[offset..offset + length]) as Box<dyn AsRef<[u8]>>
595 )))
596 }
597}
598
599#[cfg(test)]
600mod tests {
601 use super::*;
602 use lazy_static::lazy_static;
603 use parking_lot::Mutex;
604
605 lazy_static! {
606 static ref PAGE_SIZE: usize = unistd::sysconf(unistd::SysconfVar::PAGE_SIZE).unwrap().unwrap() as usize;
607 }
608
609 static TEST_MUTEX: Mutex<()> = Mutex::new(());
610
611 #[test]
612 fn test1() {
613 let _guard = TEST_MUTEX.lock();
614 for _ in 0..100 {
615 let mut v = Vec::new();
616 v.resize(*PAGE_SIZE * 100, 0);
617 v[0] = 42;
618 v[*PAGE_SIZE * 10 + 1] = 43;
619 v[*PAGE_SIZE * 20 + 1] = 44;
620
621 let pm = PagedSegment::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
622 let m = pm.as_slice();
623 assert_eq!(m[0], 42);
624 assert_eq!(m[*PAGE_SIZE * 10 + 1], 43);
625 assert_eq!(m[*PAGE_SIZE * 20 + 1], 44);
626 }
627 }
628
629 #[test]
630 fn test2() {
631 let _guard = TEST_MUTEX.lock();
632 for _ in 0..100 {
633 let mut v = Vec::new();
634 v.resize(*PAGE_SIZE * 100, 0);
635 v[0] = 1;
636 v[*PAGE_SIZE * 10 + 1] = 2;
637 v[*PAGE_SIZE * 20 + 1] = 3;
638
639 let pm1 = PagedSegment::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
640
641 let mut v = Vec::new();
642 v.resize(*PAGE_SIZE * 100, 0);
643 for (i, v) in v.iter_mut().enumerate() {
644 *v = i as u8;
645 }
646 let mut pm2 = PagedSegment::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
647
648 let m2 = pm2.as_slice_mut();
649 let m1 = pm1.as_slice();
650
651 assert_eq!(m2[100], 100);
652 m2[100] = 0;
653 assert_eq!(m2[100], 0);
654
655 assert_eq!(m1[0], 1);
656 assert_eq!(m1[*PAGE_SIZE * 10 + 1], 2);
657 assert_eq!(m1[*PAGE_SIZE * 20 + 1], 3);
658 }
659 }
660
661 #[test]
662 fn test_shared_memory() {
663 let _guard = TEST_MUTEX.lock();
664 let mut v = Vec::new();
665 v.resize(*PAGE_SIZE * 100, 0);
666 v[0] = 42;
667 v[*PAGE_SIZE * 10 + 1] = 43;
668 v[*PAGE_SIZE * 20 + 1] = 44;
669
670 let shm = SharedMemory::new(*PAGE_SIZE).unwrap();
671 let mut pm1 = PagedSegment::new(*PAGE_SIZE * 100, VecPageStore::new(v.clone()), None).unwrap();
672 let pm2 = PagedSegment::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
673 pm1.make_shared(*PAGE_SIZE * 10, &shm).unwrap();
674 pm2.make_shared(*PAGE_SIZE * 10, &shm).unwrap();
675
676 assert_eq!(pm1.as_slice()[*PAGE_SIZE * 10 + 1], 43);
677 assert_eq!(pm2.as_slice()[*PAGE_SIZE * 10 + 1], 43);
678 pm1.as_slice_mut()[*PAGE_SIZE * 10 + 1] = 99;
679 assert_eq!(pm2.as_slice()[*PAGE_SIZE * 10 + 1], 99);
680 assert_eq!(pm1.as_slice()[*PAGE_SIZE * 10 + 1], 99);
681
682 let m = pm1.as_slice();
683 assert_eq!(m[0], 42);
684 assert_eq!(m[*PAGE_SIZE * 20 + 1], 44);
685 }
686
687 #[test]
688 fn test_release_page() {
689 let _guard = TEST_MUTEX.lock();
690 let mut v = Vec::new();
691 v.resize(*PAGE_SIZE * 20, 0);
692 v[0] = 42;
693 v[*PAGE_SIZE * 10 + 1] = 43;
694
695 let pm = PagedSegment::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
696 let m = pm.as_slice();
697 assert_eq!(m[0], 42);
698 assert_eq!(m[*PAGE_SIZE * 10 + 1], 43);
699 for _ in 0..5 {
700 pm.release_page(0).unwrap();
701 pm.release_page(*PAGE_SIZE * 10).unwrap();
702 assert_eq!(m[0], 42);
703 assert_eq!(m[*PAGE_SIZE * 10 + 1], 43);
704 }
705 }
706
707 #[test]
708 fn out_of_order_scan() {
709 let _guard = TEST_MUTEX.lock();
710 let mut v = Vec::new();
711 v.resize(*PAGE_SIZE * 100, 0);
712 for (i, v) in v.iter_mut().enumerate() {
713 *v = i as u8;
714 }
715 let store = VecPageStore::new(v);
716 let pm = PagedSegment::new(*PAGE_SIZE * 100, store, None).unwrap();
717 use rand::{SeedableRng, seq::SliceRandom};
718 use rand_chacha::ChaChaRng;
719 let seed = [0; 32];
720 let mut rng = ChaChaRng::from_seed(seed);
721
722 let m = pm.as_slice();
723 let mut idxes = Vec::new();
724 for i in 0..m.len() {
725 idxes.push(i);
726 }
727 idxes.shuffle(&mut rng);
728 for i in idxes.into_iter() {
729 #[cfg(debug_assertions)]
730 {
731 let x = m[i];
732 println!("m[0x{:08x}] = {}", i, x);
733 }
734 assert_eq!(m[i], i as u8);
735 }
736 }
737
738 use signal::{SaFlags, SigAction, SigHandler, SigSet, Signal};
739
740 unsafe fn handler_reset_init() {
742 unsafe {
743 let (to_read, to_write) = TO_HANDLER;
745 let (from_read, from_write) = FROM_HANDLER;
746
747 if to_read != 0 {
748 let _ = nix::unistd::close(to_read);
749 }
750 if to_write != 1 {
751 let _ = nix::unistd::close(to_write);
752 }
753 if from_read != 0 {
754 let _ = nix::unistd::close(from_read);
755 }
756 if from_write != 1 {
757 let _ = nix::unistd::close(from_write);
758 }
759
760 if let Some(handle) = MANAGER_THREAD.lock().take() {
762 let _ = handle.join();
763 }
764
765 let sig_dfl = SigAction::new(SigHandler::SigDfl, SaFlags::empty(), SigSet::empty());
767 let _ = signal::sigaction(Signal::SIGSEGV, &sig_dfl);
768 let _ = signal::sigaction(Signal::SIGBUS, &sig_dfl);
769
770 FALLBACK_SIGSEGV_HANDLER = None;
772 FALLBACK_SIGBUS_HANDLER = None;
773
774 TO_HANDLER = (0, 1);
776 FROM_HANDLER = (0, 1);
777
778 INITIALIZED.store(false, Ordering::Release);
780 }
781 }
782
783 static SIGSEGV_CALLED: AtomicBool = AtomicBool::new(false);
784 static SIGBUS_CALLED: AtomicBool = AtomicBool::new(false);
785
786 fn make_test_mem_valid(info: *mut libc::siginfo_t) {
788 unsafe {
789 let addr = (*info).si_addr();
790 let page_size = nix::unistd::sysconf(nix::unistd::SysconfVar::PAGE_SIZE)
791 .unwrap()
792 .unwrap() as usize;
793 let page_addr = (addr as usize) & !(page_size - 1);
794 nix::sys::mman::mprotect(
795 NonNull::new_unchecked(page_addr as *mut c_void),
796 page_size,
797 ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
798 )
799 .expect("mprotect failed in handler");
800 }
801 }
802
803 extern "C" fn test_sigsegv_handler(_signum: libc::c_int, info: *mut libc::siginfo_t, _ctx: *mut c_void) {
804 SIGSEGV_CALLED.store(true, Ordering::SeqCst);
805 make_test_mem_valid(info);
806 }
807
808 extern "C" fn test_sigbus_handler(_signum: libc::c_int, info: *mut libc::siginfo_t, _ctx: *mut c_void) {
809 SIGBUS_CALLED.store(true, Ordering::SeqCst);
810 make_test_mem_valid(info);
811 }
812
813 #[test]
814 fn test_fallback_handlers_set_and_called() {
815 let _guard = TEST_MUTEX.lock();
816
817 unsafe {
818 handler_reset_init();
820
821 let sigsegv_action = SigAction::new(
823 SigHandler::SigAction(test_sigsegv_handler),
824 SaFlags::SA_SIGINFO | SaFlags::SA_NODEFER,
825 SigSet::empty(),
826 );
827 signal::sigaction(Signal::SIGSEGV, &sigsegv_action).expect("failed to set SIGSEGV handler");
828
829 let sigbus_action = SigAction::new(
831 SigHandler::SigAction(test_sigbus_handler),
832 SaFlags::SA_SIGINFO | SaFlags::SA_NODEFER,
833 SigSet::empty(),
834 );
835 signal::sigaction(Signal::SIGBUS, &sigbus_action).expect("failed to set SIGBUS handler");
836
837 let _pm1 = PagedSegment::new(*PAGE_SIZE, VecPageStore::new(vec![0u8; *PAGE_SIZE]), None).unwrap();
840
841 let saved_sigsegv = FALLBACK_SIGSEGV_HANDLER.map(|f| f as usize);
843 let saved_sigbus = FALLBACK_SIGBUS_HANDLER.map(|f| f as usize);
844
845 assert!(saved_sigsegv.is_some(), "SIGSEGV fallback handler should be saved");
847 assert!(saved_sigbus.is_some(), "SIGBUS fallback handler should be saved");
848
849 let _pm2 = PagedSegment::new(*PAGE_SIZE, VecPageStore::new(vec![0u8; *PAGE_SIZE]), None).unwrap();
851
852 let current_sigsegv = FALLBACK_SIGSEGV_HANDLER.map(|f| f as usize);
854 let current_sigbus = FALLBACK_SIGBUS_HANDLER.map(|f| f as usize);
855 assert_eq!(
856 current_sigsegv, saved_sigsegv,
857 "SIGSEGV fallback handler should not change"
858 );
859 assert_eq!(
860 current_sigbus, saved_sigbus,
861 "SIGBUS fallback handler should not change"
862 );
863
864 SIGSEGV_CALLED.store(false, Ordering::SeqCst);
866 SIGBUS_CALLED.store(false, Ordering::SeqCst);
867
868 let test_mem = nix::sys::mman::mmap_anonymous(
870 None,
871 NonZeroUsize::new(*PAGE_SIZE).unwrap(),
872 ProtFlags::PROT_NONE,
873 MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS,
874 )
875 .expect("mmap failed");
876 std::ptr::write_volatile(test_mem.cast::<u8>().as_ptr(), 42);
878 assert!(
880 SIGSEGV_CALLED.load(Ordering::SeqCst) || SIGBUS_CALLED.load(Ordering::SeqCst),
881 "SIGSEGV or SIGBUS fallback handler should have been called"
882 );
883 nix::sys::mman::munmap(test_mem.cast(), *PAGE_SIZE).expect("munmap failed");
885 handler_reset_init();
887 }
888 }
889}