uringy/runtime/
stack.rs

1//! ...
2
3use std::num::NonZeroUsize;
4use std::{ffi, io, ptr};
5
6#[derive(Debug)]
7pub(super) struct Stack {
8    pub(super) pointer: *mut u8,
9    pub(super) length: usize,
10}
11
12impl Stack {
13    /// Allocates a general purpose stack.
14    /// Demand paging ensures that physical memory is allocated only as necessary, during a page fault.
15    /// The stack is protected from overflow using guard pages.
16    pub(super) fn new(guard_pages: NonZeroUsize, usable_pages: NonZeroUsize) -> io::Result<Self> {
17        let (guard_pages, usable_pages) = (guard_pages.get(), usable_pages.get());
18
19        // page aligned sizes
20        let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize };
21        assert_eq!(page_size, 4096);
22        let length = (guard_pages + usable_pages) * page_size;
23
24        // kernel allocates an unused block of virtual memory
25        let pointer = unsafe {
26            libc::mmap(
27                ptr::null_mut(),
28                length,
29                libc::PROT_READ | libc::PROT_WRITE,
30                libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
31                -1,
32                0,
33            )
34        };
35        if pointer == libc::MAP_FAILED {
36            let error = io::Error::last_os_error();
37            return Err(error);
38        }
39
40        // if guarding memory goes wrong then mmap gets cleaned up in Stack's drop
41        let stack = Stack {
42            pointer: pointer as *mut u8,
43            length,
44        };
45
46        // located at the lowest addresses since the stack grows downward
47        let result = unsafe { libc::mprotect(pointer, guard_pages * page_size, libc::PROT_NONE) };
48        if result == -1 {
49            let error = io::Error::last_os_error();
50            return Err(error);
51        }
52
53        Ok(stack)
54    }
55
56    /// ...
57    pub(super) fn base(&self) -> *mut u8 {
58        // safety: part of same allocation, can't overflow
59        unsafe { self.pointer.add(self.length) }
60    }
61}
62
63impl Drop for Stack {
64    fn drop(&mut self) {
65        let result = unsafe { libc::munmap(self.pointer as *mut ffi::c_void, self.length) };
66        assert_eq!(result, 0);
67    }
68}
69
70#[cfg(test)]
71mod tests {
72    use super::*;
73
74    #[test]
75    fn reads_and_writes() {
76        let stack = Stack::new(NonZeroUsize::MIN, NonZeroUsize::MIN).unwrap();
77        let pointer = stack.base() as *mut u8;
78        unsafe {
79            let pointer = pointer.sub(1);
80            pointer.write(123);
81            assert_eq!(pointer.read(), 123);
82        }
83    }
84
85    #[test]
86    fn cant_execute() {
87        // TODO
88    }
89
90    #[test]
91    #[ignore = "aborts process"] // TODO: test with fork()
92    fn overflow() {
93        let stack = Stack::new(NonZeroUsize::MIN, NonZeroUsize::MIN).unwrap();
94        let pointer = stack.base() as *mut u8;
95        unsafe {
96            let pointer = pointer.sub(4096 + 1);
97            pointer.write(123);
98        }
99    }
100}