1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
#[cfg(windows)]
extern crate kernel32;
#[cfg(windows)]
extern crate winapi;
#[cfg(target_os = "linux")]
extern crate libc;
use std::alloc::{handle_alloc_error, GlobalAlloc, Layout};
use std::cell::UnsafeCell;
use std::ptr::null_mut;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
fn align_to(size: usize, align: usize) -> usize {
(size + align - 1) & !(align - 1)
}
struct Inner {
offset: AtomicUsize,
mmap: *mut u8,
initializing: AtomicBool,
}
pub struct BumpAlloc {
inner: UnsafeCell<Inner>,
size: usize,
}
unsafe impl Sync for BumpAlloc {}
impl BumpAlloc {
pub const fn new() -> BumpAlloc {
BumpAlloc::with_size(1024 * 1024 * 1024)
}
pub const fn with_size(size: usize) -> BumpAlloc {
BumpAlloc {
inner: UnsafeCell::new(Inner {
initializing: AtomicBool::new(true),
mmap: null_mut(),
offset: AtomicUsize::new(0),
}),
size,
}
}
}
#[cfg(all(windows, target_pointer_width = "32"))]
type WindowsSize = u32;
#[cfg(all(windows, target_pointer_width = "64"))]
type WindowsSize = u64;
#[cfg(windows)]
unsafe fn mmap_wrapper(size: usize) -> *mut u8 {
kernel32::VirtualAlloc(
null_mut(),
size as WindowsSize,
winapi::um::winnt::MEM_COMMIT | winapi::um::winnt::MEM_RESERVE,
winapi::um::winnt::PAGE_READWRITE,
) as *mut u8
}
#[cfg(all(unix, not(target_os = "android")))]
unsafe fn mmap_wrapper(size: usize) -> *mut u8 {
libc::mmap(
null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
-1,
0,
) as *mut u8
}
unsafe impl GlobalAlloc for BumpAlloc {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let inner = &mut *self.inner.get();
if inner.initializing.swap(false, Ordering::Relaxed) {
inner.mmap = mmap_wrapper(self.size);
if (*inner.mmap as isize) == -1isize {
handle_alloc_error(layout);
}
} else {
while 0 == inner.offset.load(Ordering::Relaxed) {}
}
let bytes_required = align_to(layout.size() + layout.align(), layout.align());
let my_offset = inner.offset.fetch_add(bytes_required, Ordering::Relaxed);
let aligned_offset = align_to(my_offset, layout.align());
if (aligned_offset + layout.size()) > self.size {
handle_alloc_error(layout);
}
inner.mmap.offset(aligned_offset as isize)
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {}
}