dumpalo/
lib.rs

1//! Usage:
2//! ```rust
3//! #[global_allocator]
4//! static DUMP: dumpalo::DumpAlloc = dumpalo::DumpAlloc;
5//! ```
6use std::{
7    alloc::{GlobalAlloc, Layout},
8    cell::UnsafeCell,
9    cmp::max,
10    ptr::null_mut,
11};
12
13const AREA_SIZE: usize = 64 * 1024 * 1024 - 4096;
14const MAX_ALLOC_SIZE: usize = AREA_SIZE / 4;
15
16thread_local! {
17    static DUMP: UnsafeCell<Dump> = UnsafeCell::new(Dump {
18        next: null_mut(),
19        end: null_mut(),
20    });
21}
22
23struct Dump {
24    next: *mut u8,
25    end: *mut u8,
26}
27
28pub struct DumpAlloc;
29
30unsafe impl GlobalAlloc for DumpAlloc {
31    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
32        if MAX_ALLOC_SIZE <= layout.size() {
33            return new_area(layout.size());
34        }
35        DUMP.with(|dump| alloc_local(layout, dump.get().as_mut().unwrap()))
36    }
37
38    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
39        if cfg!(feature = "zero-on-dealloc") {
40            // Maybe zram will have an easier time as your machine inevitably descends into swap-induced slugishness
41            for o in 0..layout.size() {
42                ptr.add(o).write_unaligned(0);
43            }
44        }
45    }
46}
47
48unsafe fn alloc_local(layout: Layout, dump: &mut Dump) -> *mut u8 {
49    let require = layout.size() + layout.align();
50    if require > dump.end as usize - dump.next as usize {
51        let a = new_area(max(AREA_SIZE, require));
52        if a.is_null() {
53            return null_mut();
54        }
55        dump.next = a;
56        dump.end = a.add(AREA_SIZE);
57    }
58    let at = dump.next.add(dump.next.align_offset(layout.align()));
59    dump.next = at.add(layout.size());
60    at
61}
62
63unsafe fn new_area(size: usize) -> *mut u8 {
64    libc::malloc(size) as *mut u8
65}