dumpalo 0.1.0

A minimalistic global bump allocator
Documentation
//! Usage:
//! ```rust
//! #[global_allocator]
//! static DUMP: dumpalo::DumpAlloc = dumpalo::DumpAlloc;
//! ```
use std::{
    alloc::{GlobalAlloc, Layout},
    cell::UnsafeCell,
    cmp::max,
    ptr::null_mut,
};

const AREA_SIZE: usize = 64 * 1024 * 1024 - 4096;
const MAX_ALLOC_SIZE: usize = AREA_SIZE / 4;

thread_local! {
    static DUMP: UnsafeCell<Dump> = UnsafeCell::new(Dump {
        next: null_mut(),
        end: null_mut(),
    });
}

struct Dump {
    next: *mut u8,
    end: *mut u8,
}

pub struct DumpAlloc;

unsafe impl GlobalAlloc for DumpAlloc {
    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
        if MAX_ALLOC_SIZE <= layout.size() {
            return new_area(layout.size());
        }
        DUMP.with(|dump| alloc_local(layout, dump.get().as_mut().unwrap()))
    }

    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
        if cfg!(feature = "zero-on-dealloc") {
            // Maybe zram will have an easier time as your machine inevitably descends into swap-induced slugishness
            for o in 0..layout.size() {
                ptr.add(o).write_unaligned(0);
            }
        }
    }
}

unsafe fn alloc_local(layout: Layout, dump: &mut Dump) -> *mut u8 {
    let require = layout.size() + layout.align();
    if require > dump.end as usize - dump.next as usize {
        let a = new_area(max(AREA_SIZE, require));
        if a.is_null() {
            return null_mut();
        }
        dump.next = a;
        dump.end = a.add(AREA_SIZE);
    }
    let at = dump.next.add(dump.next.align_offset(layout.align()));
    dump.next = at.add(layout.size());
    at
}

unsafe fn new_area(size: usize) -> *mut u8 {
    libc::malloc(size) as *mut u8
}