1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ProtectedStack
{
bottom_including_guard_page: usize,
size_including_guard_page: usize,
}
impl Drop for ProtectedStack
{
#[inline(always)]
fn drop(&mut self)
{
unsafe { munmap(self.bottom_including_guard_page as *mut _, self.size_including_guard_page) };
}
}
impl Stack for ProtectedStack
{
#[inline(always)]
fn top(&self) -> StackPointer
{
(self.bottom_including_guard_page + self.size_including_guard_page) as StackPointer
}
#[inline(always)]
fn size(&self) -> usize
{
self.size_including_guard_page - Self::page_size()
}
}
impl ProtectedStack
{
#[inline(always)]
pub fn allocate(size: usize) -> Result<Self, io::Error>
{
const NoFileDescriptor: c_int = -1;
const NoOffset: off_t = 0;
let page_size = Self::page_size();
let size_including_guard_page_but_might_be_bigger_than_maximum_stack_size = Self::round_up_to_page_size(size, page_size) + page_size;
let size_including_guard_page = min(size_including_guard_page_but_might_be_bigger_than_maximum_stack_size, Self::maximum_stack_size());
#[cfg(not(any(target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "linux", target_os = "openbsd")))] const MAP_STACK: i32 = 0;
let bottom_including_guard_page = unsafe { mmap(null_mut(), size_including_guard_page, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_STACK | MAP_NORESERVE, NoFileDescriptor, NoOffset) };
if unlikely!(bottom_including_guard_page == MAP_FAILED)
{
Err(io::Error::last_os_error())
}
else
{
let result = unsafe { mprotect(bottom_including_guard_page, page_size, PROT_NONE) };
if likely!(result == 0)
{;
Ok
(
Self
{
bottom_including_guard_page: (bottom_including_guard_page as usize),
size_including_guard_page,
}
)
}
else if likely!(result == -1)
{
Err(io::Error::last_os_error())
}
else
{
unreachable!()
}
}
}
#[inline(always)]
fn round_up_to_page_size(size: usize, page_size: usize) -> usize
{
(size + page_size - 1) & !(page_size - 1)
}
#[inline(always)]
fn page_size() -> usize
{
(unsafe { getpagesize() }) as usize
}
#[inline(always)]
fn maximum_stack_size() -> usize
{
#[inline(always)]
fn uncached_maximum_stack_size() -> usize
{
let mut limit = unsafe { uninitialized()};
let result = unsafe { getrlimit(RLIMIT_STACK, &mut limit) };
if likely!(result == 0)
{
let maximum = limit.rlim_max;
if maximum == RLIM_INFINITY || maximum > (::std::usize::MAX as rlim_t)
{
::std::usize::MAX
}
else
{
maximum as usize
}
}
else if likely!(result == -1)
{
panic!("getrlimit() failed with `{:?}`", io::Error::last_os_error())
}
else
{
unreachable!()
}
}
use self::Ordering::Relaxed;
static MaximumStackSize: AtomicUsize = AtomicUsize::new(0);
let potential_maximum_stack_size = MaximumStackSize.load(Relaxed);
if unlikely!(potential_maximum_stack_size == 0)
{
let maximum_stack_size = uncached_maximum_stack_size();
MaximumStackSize.store(maximum_stack_size, Relaxed);
maximum_stack_size
}
else
{
potential_maximum_stack_size
}
}
}