1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
//! This module provides a `MmapAllocator` type that implements the `Allocator` trait by creating
//! an anonymous and private memory mapping for every allocation.
//!
//! It is inefficient both in terms of memory usage as allocations are rounded up to multiples of
//! `PAGE_SIZE` and in terms of CPU usage as it requires calling into the kernel and modifying the
//! process' memory map.
use core::{
alloc::{
AllocError,
Allocator,
Layout,
},
mem,
ptr::{
self,
NonNull,
},
};
use crate::{
mmap_anonymous,
mprotect,
mremap,
Mmap,
ENOMEM,
MAP_PRIVATE,
MREMAP_MAYMOVE,
PAGE_SIZE,
PROT_NONE,
PROT_READ,
PROT_WRITE,
};
/// An `Allocator` that uses `mmap` and `mremap` to allocate memory.
pub struct MmapAllocator;
impl MmapAllocator {
fn round_up_size(size: usize) -> Option<usize> {
// The kernel rounds up the size to a page, so we might was well return the end of the
// page to the caller.
size.checked_add(PAGE_SIZE - 1)
.map(|s| s & !(PAGE_SIZE - 1))
}
unsafe fn mmap_from_allocation(mut ptr: NonNull<u8>, layout: Layout) -> Mmap {
// SAFETY: `layout`'s size was already checked.
let size = Self::round_up_size(layout.size()).unwrap_unchecked();
let slice = ptr::slice_from_raw_parts_mut(ptr.as_mut(), size);
Mmap::from_raw(slice)
}
unsafe fn resize(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: `old_layout`'s size was already checked.
let old_size = Self::round_up_size(old_layout.size()).unwrap_unchecked();
let new_size = Self::round_up_size(new_layout.size()).ok_or(AllocError)?;
let still_aligned = (ptr.as_ptr() as usize & (new_layout.align() - 1)) == 0;
if still_aligned {
if new_size == old_size {
// Nothing to do.
let mem = ptr::slice_from_raw_parts_mut(ptr.as_ptr(), old_size);
return Ok(NonNull::new_unchecked(mem));
}
let mut mmap = Self::mmap_from_allocation(ptr, old_layout);
let mut flags = 0;
let new_align_lower_than_or_equal_to_page_size =
(new_layout.align() & !(PAGE_SIZE - 1)) == 0;
if new_size > old_size && new_align_lower_than_or_equal_to_page_size {
// `mremap` always returns addresses aligned to `PAGE_SIZE` which is greater or
// equal to the requested alignment so moving the mapping is fine.
flags |= MREMAP_MAYMOVE;
}
match mremap(&mut mmap, new_size, flags, ptr::null_mut()) {
Ok(_) => return Ok(Self::slice_from_mmap(mmap)),
Err(e) => {
mem::forget(mmap);
// `mremap` also returns `-ENOMEM` when `MREMAP_MAYMOVE` is not set and the
// mapping cannot grow in place due to a lack of space nearby. In that case, we
// can still fallback to the slow path below.
if e.code() != ENOMEM || (flags & MREMAP_MAYMOVE) != 0 {
return Err(AllocError);
}
}
}
}
// This is the slow path: try to get a new memory mapping, copy the memory there and unmap
// the old one.
let new_ptr = self.allocate(new_layout)?;
let bytes_in_common = old_layout.size().min(new_layout.size());
// SAFETY: because both `new_layout.size()` and `old_layout.size()` are greater than or
// equal to `bytes_in_common` by definition of the minimum, both the old and new memory
// allocations are valid for reads and writes for `bytes_in_common` bytes. Also, because
// the old allocation wasn't yet deallocated, it cannot overlap `new_ptr`. Thus, the call
// to `copy_nonoverlapping` is safe. The safety contract for `deallocate` must be upheld by
// the caller.
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, bytes_in_common);
self.deallocate(ptr, old_layout);
Ok(new_ptr)
}
fn slice_from_mmap(mut mmap: Mmap) -> NonNull<[u8]> {
let slice = mmap.as_mut();
let slice = unsafe { NonNull::new_unchecked(slice as *mut [u8]) };
mem::forget(mmap);
slice
}
}
unsafe impl Allocator for MmapAllocator {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let size = Self::round_up_size(layout.size()).ok_or(AllocError)?;
let align_greater_than_page_size = (layout.align() & !(PAGE_SIZE - 1)) != 0;
let mmap = if align_greater_than_page_size {
// An example scenario is represented below. The application wants to allocate `9 *
// PAGE_SIZE` bytes aligned to `8 * PAGE_SIZE`.
//
// Every character represents a page of size `PAGE_SIZE`. "a"s represent pages that are
// mapped to respect alignment and "s"s represent pages that will be returned and thus
// must respect the sizing constraint.
//
// Positions that respect the alignment are marked with a "#" above. Note that the
// alignment is always a power of two so in this case also a multiple of `PAGE_SIZE`.
//
// # # #
// aaaaaasssssssaaa
let padded_size = size + layout.align() - PAGE_SIZE;
// Use `PROT_NONE` to not actually commit anything until the padding is removed, even
// when overcommit is disabled.
let mmap = mmap_anonymous(ptr::null_mut(), padded_size, PROT_NONE, MAP_PRIVATE)
.map_err(|_| AllocError)?;
// Unmap padding at the left.
let align_mask = layout.align() - 1;
let addr = mmap.as_ref().as_ptr() as usize;
let align_offset = addr.wrapping_neg() & align_mask;
let mmap = if align_offset != 0 {
// SAFETY: the memory mapping represented by `mmap` does not employ huge pages so
// the only safety requirement is that `align_offset` is a multiple of `PAGE_SIZE`.
// `addr` is non-null and a multiple of `PAGE_SIZE` since it comes from a
// successful call to `mmap`. Because `addr.wrapping_neg()` equals `2^N - addr`
// where `N` is the number of bits in a `usize` and `PAGE_SIZE` equals `2^M` where
// `M < N`, then `addr.wrapping_neg()` is also a multiple of `PAGE_SIZE` and has
// its lowest `M` bits unset. `align_offset` thus also has its lowest `M` bits
// unset and is therefore a multiple of `PAGE_SIZE`.
unsafe { mmap.split_at(align_offset).1 }
} else {
mmap
};
// Unmap padding at the right.
let mmap = if mmap.as_ref().len() != size {
// SAFETY: the memory mapping represented by `mmap` does not employ huge pages so
// the only safety requirement is that `align_offset` is a multiple of `PAGE_SIZE`.
// `round_up_size` returns multiples of `PAGE_SIZE` so that requirement is
// fullfilled.
unsafe { mmap.split_at(size).0 }
} else {
mmap
};
mprotect(&mmap, PROT_READ | PROT_WRITE).map_err(|_| AllocError)?;
mmap
} else {
// `mmap` already returns addresses aligned to `PAGE_SIZE` size which is greater or
// equal to the requested alignment.
mmap_anonymous(ptr::null_mut(), size, PROT_READ | PROT_WRITE, MAP_PRIVATE)
.map_err(|_| AllocError)?
};
Ok(Self::slice_from_mmap(mmap))
}
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
// Anonymous memory mappings are already zero-initialized.
self.allocate(layout)
}
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
Self::resize(self, ptr, old_layout, new_layout)
}
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// Anonymous memory mappings are already zero-initialized.
self.grow(ptr, old_layout, new_layout)
}
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
Self::resize(self, ptr, old_layout, new_layout)
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
// An `Mmap` object calls `munmap` when it is dropped.
let _mmap = Self::mmap_from_allocation(ptr, layout);
}
}