userspace_pagefault/
lib.rs

1pub use nix::libc;
2use nix::libc::c_void;
3use nix::sys::mman::MapFlags;
4pub use nix::sys::mman::ProtFlags;
5use nix::sys::signal;
6use nix::unistd;
7use parking_lot::Mutex;
8use std::mem;
9use std::num::NonZeroUsize;
10use std::os::fd::RawFd;
11use std::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
12use std::sync::Arc;
13
14mod machdep;
15
16const ADDR_SIZE: usize = std::mem::size_of::<usize>();
17
18#[derive(Debug, PartialEq, Eq)]
19pub enum AccessType {
20    Read,
21    Write,
22}
23
24pub trait PageStore {
25    /// Callback that is triggered upon a page fault. Return `Some()` if the page needs to be loaded with the given data.
26    fn page_fault(
27        &mut self, offset: usize, length: usize, access: AccessType,
28    ) -> Option<Box<dyn Iterator<Item = Box<dyn AsRef<[u8]> + '_>> + '_>>;
29}
30
31pub struct MappedMemory {
32    base: AtomicPtr<u8>,
33    length: usize,
34    unmap: bool,
35    shared: Mutex<Vec<SharedMemory>>,
36}
37
38impl MappedMemory {
39    pub fn new(base: Option<*mut u8>, mut length: usize, page_size: usize, flags: ProtFlags) -> Result<Self, Error> {
40        let rem = length & (page_size - 1);
41        match base {
42            Some(base) => {
43                if (base as usize) % page_size != 0 {
44                    return Err(Error::BaseNotAligned);
45                }
46                if rem != 0 {
47                    return Err(Error::LengthNotAligned);
48                }
49            }
50            None => {
51                if rem != 0 {
52                    length += page_size - rem
53                }
54            }
55        }
56
57        let new_base = unsafe {
58            nix::sys::mman::mmap(
59                match base {
60                    Some(b) => Some(NonZeroUsize::new(b as usize).ok_or(Error::NullBase)?),
61                    None => None,
62                },
63                NonZeroUsize::new(length).ok_or(Error::LengthIsZero)?,
64                flags,
65                match base {
66                    Some(_) => MapFlags::MAP_FIXED,
67                    None => MapFlags::empty(),
68                } | MapFlags::MAP_PRIVATE |
69                    MapFlags::MAP_ANONYMOUS,
70                Option::<std::fs::File>::None,
71                0,
72            )
73            .map_err(Error::UnixError)?
74        } as *mut u8;
75
76        if let Some(base) = base {
77            if base != new_base {
78                return Err(Error::NotSupported);
79            }
80        }
81
82        Ok(Self {
83            base: new_base.into(),
84            length,
85            unmap: base.is_none(),
86            shared: Mutex::new(Vec::new()),
87        })
88    }
89    #[inline(always)]
90    pub fn base(&self) -> *mut u8 {
91        unsafe { *self.base.as_ptr() }
92    }
93
94    #[inline(always)]
95    pub fn as_slice(&self) -> &mut [u8] {
96        unsafe { std::slice::from_raw_parts_mut(self.base(), self.length) }
97    }
98
99    pub fn make_shared(&self, offset: usize, shm: &SharedMemory, flags: ProtFlags) -> Result<(), Error> {
100        let len = shm.0.size;
101        if offset + len >= self.length {
102            return Err(Error::MemoryOverflow);
103        }
104        unsafe {
105            nix::sys::mman::mmap(
106                Some(NonZeroUsize::new(self.base().add(offset) as usize).unwrap()),
107                NonZeroUsize::new(len).unwrap(),
108                flags,
109                MapFlags::MAP_FIXED | MapFlags::MAP_SHARED,
110                Some(&shm.0.fd),
111                0,
112            )
113            .map_err(Error::UnixError)?;
114        }
115        // keep a reference to the shared memory so it is not deallocated
116        self.shared.lock().push(shm.clone());
117        Ok(())
118    }
119}
120
121impl Drop for MappedMemory {
122    fn drop(&mut self) {
123        if self.unmap {
124            unsafe {
125                nix::sys::mman::munmap(self.base() as *mut c_void, self.length).unwrap();
126            }
127        }
128    }
129}
130
131pub struct PagedMemory<'a> {
132    mem: Arc<MappedMemory>,
133    page_size: usize,
134    _phantom: std::marker::PhantomData<&'a ()>,
135}
136
137struct PagedMemoryEntry {
138    start: usize,
139    len: usize,
140    mem: Arc<MappedMemory>,
141    store: Box<dyn PageStore + Send + 'static>,
142    page_size: usize,
143}
144
145#[derive(Debug, PartialEq, Eq)]
146pub enum Error {
147    BaseNotAligned,
148    NullBase,
149    LengthNotAligned,
150    LengthIsZero,
151    PageSizeNotAvail,
152    NotSupported,
153    UnixError(nix::errno::Errno),
154    MemoryOverlap,
155    MemoryOverflow,
156}
157
158static HANDLER_SPIN: AtomicBool = AtomicBool::new(false);
159static mut TO_HANDLER: (RawFd, RawFd) = (0, 1);
160static mut FROM_HANDLER: (RawFd, RawFd) = (0, 1);
161static mut PREV_SIGSEGV: mem::MaybeUninit<signal::SigAction> = mem::MaybeUninit::uninit();
162static mut PREV_SIGBUS: mem::MaybeUninit<signal::SigAction> = mem::MaybeUninit::uninit();
163
164#[inline]
165fn handle_page_fault_(info: *mut libc::siginfo_t, ctx: *mut c_void) -> bool {
166    let (tx, rx, addr, ctx) = unsafe {
167        let (rx, _) = TO_HANDLER;
168        let (_, tx) = FROM_HANDLER;
169        (tx, rx, (*info).si_addr() as usize, &mut *(ctx as *mut libc::ucontext_t))
170    };
171    let flag = machdep::check_page_fault_rw_flag_from_context(*ctx);
172    let mut buff = [0; ADDR_SIZE + 1];
173    buff[..ADDR_SIZE].copy_from_slice(&addr.to_le_bytes());
174    buff[ADDR_SIZE] = flag;
175    // use a spin lock to avoid ABA (another thread could interfere in-between read and write)
176    while HANDLER_SPIN.swap(true, Ordering::Acquire) {
177        std::thread::yield_now();
178    }
179    if unistd::write(tx, &buff).is_err() {
180        HANDLER_SPIN.swap(false, Ordering::Release);
181        return true;
182    }
183    unistd::read(rx, &mut buff[..1]).ok();
184    HANDLER_SPIN.swap(false, Ordering::Release);
185    buff[0] == 1
186}
187
188// The fallback signal handling is inspired by wasmtime trap handlers:
189// https://github.com/bytecodealliance/wasmtime/blob/v22.0.0/crates/wasmtime/src/runtime/vm/sys/unix/signals.rs
190extern "C" fn handle_page_fault(signum: libc::c_int, info: *mut libc::siginfo_t, ctx: *mut c_void) {
191    if !handle_page_fault_(info, ctx) {
192        return;
193    }
194
195    unsafe {
196        // not hitting a managed memory region, fallback to some handler
197        let previous_signal = signal::Signal::try_from(signum).expect("invalid signum");
198        let previous = *(match previous_signal {
199            signal::SIGSEGV => PREV_SIGSEGV.as_ptr(),
200            signal::SIGBUS => PREV_SIGBUS.as_ptr(),
201            _ => panic!("unknown signal: {}", previous_signal),
202        });
203
204        match previous.handler() {
205            signal::SigHandler::SigDfl => {
206                signal::signal(previous_signal, signal::SigHandler::SigDfl).expect("fail to reset signal handler");
207                let _ = signal::raise(previous_signal);
208            }
209            signal::SigHandler::SigIgn => {}
210            signal::SigHandler::SigAction(handler)
211                if previous.flags() & signal::SaFlags::SA_SIGINFO == signal::SaFlags::SA_SIGINFO =>
212            {
213                handler(signum, info, ctx);
214            }
215            signal::SigHandler::Handler(handler) => handler(signum),
216            _ => panic!("unexpected signal handler"),
217        }
218    }
219}
220
221unsafe fn register_signal_handlers_(handler: extern "C" fn(i32, *mut libc::siginfo_t, *mut c_void)) {
222    let register = |slot: *mut signal::SigAction, signal: signal::Signal| {
223        // The flags here are relatively careful, and they are...
224        //
225        // SA_SIGINFO gives us access to information like the program
226        // counter from where the fault happened.
227        //
228        // SA_ONSTACK allows us to handle signals on an alternate stack,
229        // so that the handler can run in response to running out of
230        // stack space on the main stack. Rust installs an alternate
231        // stack with sigaltstack, so we rely on that.
232        //
233        // SA_NODEFER allows us to reenter the signal handler if we
234        // crash while handling the signal, and fall through to the
235        // Breakpad handler by testing handlingSegFault.
236        let sig_action = signal::SigAction::new(
237            signal::SigHandler::SigAction(handler),
238            signal::SaFlags::SA_NODEFER | signal::SaFlags::SA_SIGINFO | signal::SaFlags::SA_ONSTACK,
239            signal::SigSet::empty(),
240        );
241
242        *slot = signal::sigaction(signal, &sig_action).expect("fail to register signal handler");
243    };
244
245    register(PREV_SIGSEGV.as_mut_ptr(), signal::SIGSEGV);
246    register(PREV_SIGBUS.as_mut_ptr(), signal::SIGBUS);
247}
248
249pub unsafe fn register_signal_handlers() {
250    register_signal_handlers_(handle_page_fault);
251}
252
253struct PagedMemoryManager {
254    entries: Vec<PagedMemoryEntry>,
255}
256
257impl PagedMemoryManager {
258    fn insert(&mut self, entry: PagedMemoryEntry) -> bool {
259        for (i, PagedMemoryEntry { start, len, .. }) in self.entries.iter().enumerate() {
260            if entry.start + entry.len <= *start {
261                // insert before this entry
262                self.entries.insert(i, entry);
263                return true;
264            }
265            if entry.start < *start + *len {
266                // overlapping space
267                return false;
268            }
269        }
270        self.entries.push(entry);
271        true
272    }
273
274    fn remove(&mut self, start_: usize, len_: usize) {
275        for (i, PagedMemoryEntry { start, len, .. }) in self.entries.iter().enumerate() {
276            if *start == start_ && *len == len_ {
277                self.entries.remove(i);
278                return;
279            }
280        }
281        panic!(
282            "failed to locate PagedMemoryEntry (start = 0x{:x}, end = 0x{:x})",
283            start_,
284            start_ + len_
285        )
286    }
287}
288
289static MANAGER: Mutex<PagedMemoryManager> = Mutex::new(PagedMemoryManager { entries: Vec::new() });
290
291fn handler_init() {
292    let to_handler = nix::unistd::pipe().expect("fail to create pipe to the handler");
293    let from_handler = nix::unistd::pipe().expect("fail to create pipe from the handler");
294    unsafe {
295        TO_HANDLER = to_handler;
296        FROM_HANDLER = from_handler;
297        register_signal_handlers();
298    }
299    std::sync::atomic::fence(Ordering::SeqCst);
300    std::thread::spawn(move || {
301        let from_handler = from_handler.0;
302        let to_handler = to_handler.1;
303        let mut buff = [0; ADDR_SIZE + 1];
304        loop {
305            unistd::read(from_handler, &mut buff).unwrap();
306            let addr = usize::from_le_bytes(buff[..ADDR_SIZE].try_into().unwrap());
307            let (access_type, mprotect_flag) = match buff[ADDR_SIZE] {
308                0 => (AccessType::Read, ProtFlags::PROT_READ),
309                _ => (AccessType::Write, ProtFlags::PROT_READ | ProtFlags::PROT_WRITE),
310            };
311            let mut mgr = MANAGER.lock();
312            let mut fallback = 1;
313            for entry in mgr.entries.iter_mut() {
314                if entry.start <= addr && addr < entry.start + entry.len {
315                    let page_mask = usize::MAX ^ (entry.page_size - 1);
316                    let page_addr = addr & page_mask;
317                    // load the page data
318                    let slice = entry.mem.as_slice();
319                    let base = slice.as_ptr() as usize;
320                    let page_offset = page_addr - base;
321                    if let Some(page) = entry.store.page_fault(page_offset, entry.page_size, access_type) {
322                        unsafe {
323                            nix::sys::mman::mprotect(
324                                page_addr as *mut c_void,
325                                entry.page_size,
326                                ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
327                            )
328                            .expect("mprotect failed");
329                        }
330                        let target = &mut slice[page_offset..page_offset + entry.page_size];
331                        let mut base = 0;
332                        for chunk in page {
333                            let chunk = (*chunk).as_ref();
334                            let chunk_len = chunk.len();
335                            target[base..base + chunk_len].copy_from_slice(&chunk);
336                            base += chunk_len;
337                        }
338                    }
339                    // mark as readable/writable
340                    unsafe {
341                        nix::sys::mman::mprotect(page_addr as *mut c_void, entry.page_size, mprotect_flag)
342                            .expect("mprotect failed");
343                    }
344                    fallback = 0;
345                    break;
346                }
347            }
348            // otherwise this SIGSEGV falls through and we don't do anything about it
349            unistd::write(to_handler, &[fallback]).unwrap();
350        }
351    });
352}
353
354impl<'a> PagedMemory<'a> {
355    /// Make the memory paged in userspace with raw base pointer and length. Note that the range of
356    /// addresses should be valid throughout the lifetime of [PagedMemory] and no access should be
357    /// made to the memory unless it is wrapped in [PagedMemory::run].
358    pub unsafe fn from_raw<S: PageStore + Send + 'static>(
359        base: *mut u8, length: usize, store: S, page_size: Option<usize>,
360    ) -> Result<PagedMemory<'static>, Error> {
361        let mem: &'static mut [u8] = std::slice::from_raw_parts_mut(base, length);
362        Self::new_(Some(mem.as_ptr() as *mut u8), mem.len(), store, page_size)
363    }
364
365    /// Create a slice of memory that is pagged.
366    pub fn new<S: PageStore + Send + 'static>(
367        length: usize, store: S, page_size: Option<usize>,
368    ) -> Result<PagedMemory<'static>, Error> {
369        Self::new_(None, length, store, page_size)
370    }
371
372    fn new_<'b, S: PageStore + Send + 'static>(
373        base: Option<*mut u8>, length: usize, store: S, page_size: Option<usize>,
374    ) -> Result<PagedMemory<'b>, Error> {
375        static INIT: std::sync::Once = std::sync::Once::new();
376        INIT.call_once(|| handler_init());
377        let page_size = match page_size {
378            Some(s) => s,
379            None => get_page_size()?,
380        };
381        let mem = std::sync::Arc::new(MappedMemory::new(base, length, page_size, ProtFlags::PROT_NONE)?);
382        let mut mgr = MANAGER.lock();
383        if !mgr.insert(PagedMemoryEntry {
384            start: mem.base() as usize,
385            len: length,
386            mem: mem.clone(),
387            store: Box::new(store),
388            page_size,
389        }) {
390            return Err(Error::MemoryOverlap);
391        }
392
393        Ok(PagedMemory {
394            mem,
395            page_size,
396            _phantom: std::marker::PhantomData,
397        })
398    }
399
400    /*
401    /// Run code that possibly accesses the paged memory. Because an access to the memory could
402    /// wait upon the [PageStore] to load the page in case of a page fault, to avoid dead-lock,
403    /// make sure the resources the store will acquire will not be held by this code before
404    /// accessing the paged memory. For example, using `println!("{}", mem[0])` could dead-lock the
405    /// system if the `read()` implementation of [PageStore] also uses `println`: the I/O is first
406    /// locked before the dereference of `mem[0]` which could possibly induces a page fault that
407    /// invokes `read()` to bring in the page content, then when the page store tries to invoke
408    /// `println`, it gets stuck (the dereference is imcomplete). As a good practice, always make
409    /// sure [PageStore] grabs the least resources that do not overlap with the code here.
410    pub fn run<F, T>(&mut self, f: F) -> std::thread::JoinHandle<T>
411    where
412        F: FnOnce(&mut [u8]) -> T + Send + 'static,
413        T: Send + 'static,
414    {
415        let mem = self.mem.clone();
416        std::thread::spawn(move || f(mem.as_slice()))
417    }
418    */
419
420    /// Because an access to the memory could
421    /// wait upon the [PageStore] to load the page in case of a page fault, to avoid dead-lock,
422    /// make sure the resources the store will acquire will not be held by this code before
423    /// accessing the paged memory. For example, using `println!("{}", mem.as_slice()[0])` could dead-lock the
424    /// system if the `read()` implementation of [PageStore] also uses `println`: the I/O is first
425    /// locked before the dereference of `mem[0]` which could possibly induces a page fault that
426    /// invokes `read()` to bring in the page content, then when the page store tries to invoke
427    /// `println`, it gets stuck (the dereference is imcomplete). As a good practice, always make
428    /// sure [PageStore] grabs the least resources that do not overlap with the code here.
429    pub fn as_slice_mut(&mut self) -> &mut [u8] {
430        self.mem.as_slice()
431    }
432
433    pub fn as_slice(&self) -> &[u8] {
434        self.mem.as_slice()
435    }
436
437    pub fn as_raw_parts(&self) -> (*mut u8, usize) {
438        let s = self.mem.as_slice();
439        (s.as_mut_ptr(), s.len())
440    }
441
442    /// Return the configured page size.
443    pub fn page_size(&self) -> usize {
444        self.page_size
445    }
446
447    /// Mark the entire PagedMemory to be read-only, this will trigger write-access page faults
448    /// again when write operation is made in the future.
449    pub fn mark_read_only(&self, offset: usize, length: usize) {
450        assert!(offset + length <= self.mem.length);
451        unsafe {
452            nix::sys::mman::mprotect(self.mem.base().add(offset) as *mut c_void, length, ProtFlags::PROT_READ)
453                .expect("mprotect failed");
454        }
455    }
456
457    /// Release the page content loaded from PageStore. The next access to an address within this
458    /// page will trigger a page fault. `page_offset` must be one of the offset passed in by page
459    /// fault handler.
460    pub fn release_page(&self, page_offset: usize) {
461        if page_offset & (self.page_size - 1) != 0 || page_offset >= self.mem.length {
462            panic!("invalid page offset: {:x}", page_offset);
463        }
464        let page_addr = self.mem.base() as usize + page_offset;
465        unsafe {
466            nix::sys::mman::mmap(
467                Some(NonZeroUsize::new(page_addr).unwrap()),
468                NonZeroUsize::new(self.page_size).unwrap(),
469                ProtFlags::PROT_NONE,
470                MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS,
471                Option::<std::fs::File>::None,
472                0,
473            )
474            .expect("mmap failed");
475        }
476    }
477
478    pub fn release_all_pages(&self) {
479        unsafe {
480            nix::sys::mman::mmap(
481                Some(NonZeroUsize::new(self.mem.base() as usize).unwrap()),
482                NonZeroUsize::new(self.mem.length).unwrap(),
483                ProtFlags::PROT_NONE,
484                MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS,
485                Option::<std::fs::File>::None,
486                0,
487            )
488            .expect("mmap failed");
489        }
490        self.mem.shared.lock().clear();
491    }
492
493    pub fn make_shared(&self, offset: usize, shm: &SharedMemory) -> Result<(), Error> {
494        self.mem.make_shared(offset, shm, ProtFlags::PROT_NONE)
495    }
496}
497
498impl<'a> Drop for PagedMemory<'a> {
499    fn drop(&mut self) {
500        let mut mgr = MANAGER.lock();
501        mgr.remove(self.mem.base() as usize, self.mem.length);
502    }
503}
504
505pub struct VecPageStore(Vec<u8>);
506
507impl VecPageStore {
508    pub fn new(vec: Vec<u8>) -> Self {
509        Self(vec)
510    }
511}
512
513impl PageStore for VecPageStore {
514    fn page_fault(
515        &mut self, offset: usize, length: usize, _access: AccessType,
516    ) -> Option<Box<dyn Iterator<Item = Box<dyn AsRef<[u8]> + '_>> + '_>> {
517        #[cfg(debug_assertions)]
518        println!(
519            "{:?} loading page at 0x{:x} access={:?}",
520            self as *mut Self, offset, _access,
521        );
522        Some(Box::new(std::iter::once(
523            Box::new(&self.0[offset..offset + length]) as Box<dyn AsRef<[u8]>>
524        )))
525    }
526}
527
528#[derive(Clone)]
529pub struct SharedMemory(Arc<SharedMemoryInner>);
530
531struct SharedMemoryInner {
532    fd: std::os::fd::OwnedFd,
533    size: usize,
534}
535
536impl SharedMemory {
537    pub fn new(size: usize) -> Result<Self, Error> {
538        let fd = machdep::get_shared_memory()?;
539        nix::unistd::ftruncate(&fd, size as libc::off_t).map_err(Error::UnixError)?;
540        Ok(Self(Arc::new(SharedMemoryInner { fd, size })))
541    }
542}
543
544pub fn get_page_size() -> Result<usize, Error> {
545    Ok(unistd::sysconf(unistd::SysconfVar::PAGE_SIZE)
546        .map_err(Error::UnixError)?
547        .ok_or(Error::PageSizeNotAvail)? as usize)
548}
549
550#[cfg(test)]
551mod tests {
552    use super::*;
553    use lazy_static::lazy_static;
554
555    lazy_static! {
556        static ref PAGE_SIZE: usize = unistd::sysconf(unistd::SysconfVar::PAGE_SIZE).unwrap().unwrap() as usize;
557    }
558
559    #[test]
560    fn test1() {
561        for _ in 0..100 {
562            let mut v = Vec::new();
563            v.resize(*PAGE_SIZE * 100, 0);
564            v[0] = 42;
565            v[*PAGE_SIZE * 10 + 1] = 43;
566            v[*PAGE_SIZE * 20 + 1] = 44;
567
568            let pm = PagedMemory::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
569            let m = pm.as_slice();
570            assert_eq!(m[0], 42);
571            assert_eq!(m[*PAGE_SIZE * 10 + 1], 43);
572            assert_eq!(m[*PAGE_SIZE * 20 + 1], 44);
573        }
574    }
575
576    #[test]
577    fn test2() {
578        for _ in 0..100 {
579            let mut v = Vec::new();
580            v.resize(*PAGE_SIZE * 100, 0);
581            v[0] = 1;
582            v[*PAGE_SIZE * 10 + 1] = 2;
583            v[*PAGE_SIZE * 20 + 1] = 3;
584
585            let pm1 = PagedMemory::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
586
587            let mut v = Vec::new();
588            v.resize(*PAGE_SIZE * 100, 0);
589            for (i, v) in v.iter_mut().enumerate() {
590                *v = i as u8;
591            }
592            let mut pm2 = PagedMemory::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
593
594            let m2 = pm2.as_slice_mut();
595            let m1 = pm1.as_slice();
596
597            assert_eq!(m2[100], 100);
598            m2[100] = 0;
599            assert_eq!(m2[100], 0);
600
601            assert_eq!(m1[0], 1);
602            assert_eq!(m1[*PAGE_SIZE * 10 + 1], 2);
603            assert_eq!(m1[*PAGE_SIZE * 20 + 1], 3);
604        }
605    }
606
607    #[test]
608    fn test_shared_memory() {
609        let mut v = Vec::new();
610        v.resize(*PAGE_SIZE * 100, 0);
611        v[0] = 42;
612        v[*PAGE_SIZE * 10 + 1] = 43;
613        v[*PAGE_SIZE * 20 + 1] = 44;
614
615        let shm = SharedMemory::new(*PAGE_SIZE).unwrap();
616        let mut pm1 = PagedMemory::new(*PAGE_SIZE * 100, VecPageStore::new(v.clone()), None).unwrap();
617        let pm2 = PagedMemory::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
618        pm1.make_shared(*PAGE_SIZE * 10, &shm).unwrap();
619        pm2.make_shared(*PAGE_SIZE * 10, &shm).unwrap();
620
621        assert_eq!(pm1.as_slice()[*PAGE_SIZE * 10 + 1], 43);
622        assert_eq!(pm2.as_slice()[*PAGE_SIZE * 10 + 1], 43);
623        pm1.as_slice_mut()[*PAGE_SIZE * 10 + 1] = 99;
624        assert_eq!(pm2.as_slice()[*PAGE_SIZE * 10 + 1], 99);
625        assert_eq!(pm1.as_slice()[*PAGE_SIZE * 10 + 1], 99);
626
627        let m = pm1.as_slice();
628        assert_eq!(m[0], 42);
629        assert_eq!(m[*PAGE_SIZE * 20 + 1], 44);
630    }
631
632    #[test]
633    fn test_release_page() {
634        let mut v = Vec::new();
635        v.resize(*PAGE_SIZE * 20, 0);
636        v[0] = 42;
637        v[*PAGE_SIZE * 10 + 1] = 43;
638
639        let pm = PagedMemory::new(*PAGE_SIZE * 100, VecPageStore::new(v), None).unwrap();
640        let m = pm.as_slice();
641        assert_eq!(m[0], 42);
642        assert_eq!(m[*PAGE_SIZE * 10 + 1], 43);
643        for _ in 0..5 {
644            pm.release_page(0);
645            pm.release_page(*PAGE_SIZE * 10);
646            assert_eq!(m[0], 42);
647            assert_eq!(m[*PAGE_SIZE * 10 + 1], 43);
648        }
649    }
650
651    #[test]
652    fn out_of_order_scan() {
653        let mut v = Vec::new();
654        v.resize(*PAGE_SIZE * 100, 0);
655        for (i, v) in v.iter_mut().enumerate() {
656            *v = i as u8;
657        }
658        let store = VecPageStore::new(v);
659        let pm = PagedMemory::new(*PAGE_SIZE * 100, store, None).unwrap();
660        use rand::{seq::SliceRandom, SeedableRng};
661        use rand_chacha::ChaChaRng;
662        let seed = [0; 32];
663        let mut rng = ChaChaRng::from_seed(seed);
664
665        let m = pm.as_slice();
666        let mut idxes = Vec::new();
667        for i in 0..m.len() {
668            idxes.push(i);
669        }
670        idxes.shuffle(&mut rng);
671        for i in idxes.into_iter() {
672            #[cfg(debug_assertions)]
673            {
674                let x = m[i];
675                println!("m[0x{:08x}] = {}", i, x);
676            }
677            assert_eq!(m[i], i as u8);
678        }
679    }
680}