Struct userfaultfd::Uffd
source · pub struct Uffd { /* private fields */ }Expand description
The userfaultfd object.
The userspace representation of the object is a file descriptor, so this type implements
AsRawFd, FromRawFd, and IntoRawFd. These methods should be used with caution, but can be
essential for using functions like poll on a worker thread.
Implementations§
source§impl Uffd
impl Uffd
sourcepub fn register(&self, start: *mut c_void, len: usize) -> Result<IoctlFlags>
pub fn register(&self, start: *mut c_void, len: usize) -> Result<IoctlFlags>
Register a memory address range with the userfaultfd object, and returns the IoctlFlags
that are available for the selected range.
This method only registers the given range for missing page faults.
Examples found in repository?
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
fn main() {
let num_pages = env::args()
.nth(1)
.expect("Usage: manpage <num_pages>")
.parse::<usize>()
.unwrap();
let page_size = sysconf(SysconfVar::PAGE_SIZE).unwrap().unwrap() as usize;
let len = num_pages * page_size;
// Create and enable userfaultfd object
let uffd = UffdBuilder::new()
.close_on_exec(true)
.non_blocking(true)
.user_mode_only(true)
.create()
.expect("uffd creation");
// Create a private anonymous mapping. The memory will be demand-zero paged--that is, not yet
// allocated. When we actually touch the memory, it will be allocated via the userfaultfd.
let addr = unsafe {
mmap(
None,
len.try_into().unwrap(),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS,
-1,
0,
)
.expect("mmap")
};
println!("Address returned by mmap() = {:p}", addr);
// Register the memory range of the mapping we just created for handling by the userfaultfd
// object. In mode, we request to track missing pages (i.e., pages that have not yet been
// faulted in).
uffd.register(addr, len).expect("uffd.register()");
// Create a thread that will process the userfaultfd events
let _s = std::thread::spawn(move || fault_handler_thread(uffd));
// Main thread now touches memory in the mapping, touching locations 1024 bytes apart. This will
// trigger userfaultfd events for all pages in the region.
// Ensure that faulting address is not on a page boundary, in order to test that we correctly
// handle that case in fault_handling_thread()
let mut l = 0xf;
while l < len {
let ptr = (addr as usize + l) as *mut u8;
let c = unsafe { *ptr };
println!("Read address {:p} in main(): {:?}", ptr, c as char);
l += 1024;
std::thread::sleep(std::time::Duration::from_micros(100000));
}
}sourcepub fn register_with_mode(
&self,
start: *mut c_void,
len: usize,
mode: RegisterMode
) -> Result<IoctlFlags>
pub fn register_with_mode( &self, start: *mut c_void, len: usize, mode: RegisterMode ) -> Result<IoctlFlags>
Register a memory address range with the userfaultfd object for the given mode and
returns the IoctlFlags that are available for the selected range.
sourcepub fn unregister(&self, start: *mut c_void, len: usize) -> Result<()>
pub fn unregister(&self, start: *mut c_void, len: usize) -> Result<()>
Unregister a memory address range from the userfaultfd object.
sourcepub unsafe fn copy(
&self,
src: *const c_void,
dst: *mut c_void,
len: usize,
wake: bool
) -> Result<usize>
pub unsafe fn copy( &self, src: *const c_void, dst: *mut c_void, len: usize, wake: bool ) -> Result<usize>
Atomically copy a continuous memory chunk into the userfaultfd-registered range, and return the number of bytes that were successfully copied.
If wake is true, wake up the thread waiting for page fault resolution on the memory
range.
Examples found in repository?
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
fn fault_handler_thread(uffd: Uffd) {
let page_size = sysconf(SysconfVar::PAGE_SIZE).unwrap().unwrap() as usize;
// Create a page that will be copied into the faulting region
let page = unsafe {
mmap(
None,
page_size.try_into().unwrap(),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS,
-1,
0,
)
.expect("mmap")
};
// Loop, handling incoming events on the userfaultfd file descriptor
let mut fault_cnt = 0;
loop {
// See what poll() tells us about the userfaultfd
let pollfd = PollFd::new(uffd.as_raw_fd(), PollFlags::POLLIN);
let nready = poll(&mut [pollfd], -1).expect("poll");
println!("\nfault_handler_thread():");
let revents = pollfd.revents().unwrap();
println!(
" poll() returns: nready = {}; POLLIN = {}; POLLERR = {}",
nready,
revents.contains(PollFlags::POLLIN),
revents.contains(PollFlags::POLLERR),
);
// Read an event from the userfaultfd
let event = uffd
.read_event()
.expect("read uffd_msg")
.expect("uffd_msg ready");
// We expect only one kind of event; verify that assumption
if let Event::Pagefault { addr, .. } = event {
// Display info about the page-fault event
println!(" UFFD_EVENT_PAGEFAULT event: {:?}", event);
// Copy the page pointed to by 'page' into the faulting region. Vary the contents that are
// copied in, so that it is more obvious that each fault is handled separately.
for c in unsafe { std::slice::from_raw_parts_mut(page as *mut u8, page_size) } {
*c = b'A' + fault_cnt % 20;
}
fault_cnt += 1;
let dst = (addr as usize & !(page_size - 1)) as *mut c_void;
let copy = unsafe { uffd.copy(page, dst, page_size, true).expect("uffd copy") };
println!(" (uffdio_copy.copy returned {})", copy);
} else {
panic!("Unexpected event on userfaultfd");
}
}
}sourcepub unsafe fn zeropage(
&self,
start: *mut c_void,
len: usize,
wake: bool
) -> Result<usize>
pub unsafe fn zeropage( &self, start: *mut c_void, len: usize, wake: bool ) -> Result<usize>
Zero out a memory address range registered with userfaultfd, and return the number of bytes that were successfully zeroed.
If wake is true, wake up the thread waiting for page fault resolution on the memory
address range.
sourcepub fn wake(&self, start: *mut c_void, len: usize) -> Result<()>
pub fn wake(&self, start: *mut c_void, len: usize) -> Result<()>
Wake up the thread waiting for page fault resolution on the specified memory address range.
sourcepub fn read_event(&self) -> Result<Option<Event>>
pub fn read_event(&self) -> Result<Option<Event>>
Read an Event from the userfaultfd object.
If the Uffd object was created with non_blocking set to false, this will block until
an event is successfully read (returning Some(event), or an error is returned.
If non_blocking was true, this will immediately return None if no event is ready to
read.
Note that while this method doesn’t require a mutable reference to the Uffd object, it
does consume bytes (thread-safely) from the underlying file descriptor.
Examples
fn read_event(uffd: &Uffd) -> Result<()> {
// Read a single event
match uffd.read_event()? {
Some(e) => {
// Do something with the event
},
None => {
// This was a non-blocking read and the descriptor was not ready for read
},
}
Ok(())
}Examples found in repository?
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
fn fault_handler_thread(uffd: Uffd) {
let page_size = sysconf(SysconfVar::PAGE_SIZE).unwrap().unwrap() as usize;
// Create a page that will be copied into the faulting region
let page = unsafe {
mmap(
None,
page_size.try_into().unwrap(),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS,
-1,
0,
)
.expect("mmap")
};
// Loop, handling incoming events on the userfaultfd file descriptor
let mut fault_cnt = 0;
loop {
// See what poll() tells us about the userfaultfd
let pollfd = PollFd::new(uffd.as_raw_fd(), PollFlags::POLLIN);
let nready = poll(&mut [pollfd], -1).expect("poll");
println!("\nfault_handler_thread():");
let revents = pollfd.revents().unwrap();
println!(
" poll() returns: nready = {}; POLLIN = {}; POLLERR = {}",
nready,
revents.contains(PollFlags::POLLIN),
revents.contains(PollFlags::POLLERR),
);
// Read an event from the userfaultfd
let event = uffd
.read_event()
.expect("read uffd_msg")
.expect("uffd_msg ready");
// We expect only one kind of event; verify that assumption
if let Event::Pagefault { addr, .. } = event {
// Display info about the page-fault event
println!(" UFFD_EVENT_PAGEFAULT event: {:?}", event);
// Copy the page pointed to by 'page' into the faulting region. Vary the contents that are
// copied in, so that it is more obvious that each fault is handled separately.
for c in unsafe { std::slice::from_raw_parts_mut(page as *mut u8, page_size) } {
*c = b'A' + fault_cnt % 20;
}
fault_cnt += 1;
let dst = (addr as usize & !(page_size - 1)) as *mut c_void;
let copy = unsafe { uffd.copy(page, dst, page_size, true).expect("uffd copy") };
println!(" (uffdio_copy.copy returned {})", copy);
} else {
panic!("Unexpected event on userfaultfd");
}
}
}sourcepub fn read_events<'a>(
&self,
buf: &'a mut EventBuffer
) -> Result<impl Iterator<Item = Result<Event>> + 'a>
pub fn read_events<'a>( &self, buf: &'a mut EventBuffer ) -> Result<impl Iterator<Item = Result<Event>> + 'a>
Read multiple events from the userfaultfd object using the given event buffer.
If the Uffd object was created with non_blocking set to false, this will block until
an event is successfully read or an error is returned.
If non_blocking was true, this will immediately return an empty iterator if the file
descriptor is not ready for reading.
Examples
fn read_events(uffd: &Uffd) -> userfaultfd::Result<()> {
// Read up to 100 events at a time
let mut buf = EventBuffer::new(100);
for event in uffd.read_events(&mut buf)? {
let event = event?;
// Do something with the event...
}
Ok(())
}