lx 0.4.0

A no_std crate to use Linux system calls
Documentation
//! This module provides a `MmapAllocator` type that implements the `Allocator` trait by creating
//! an anonymous and private memory mapping for every allocation.
//!
//! It is inefficient both in terms of memory usage as allocations are rounded up to multiples of
//! `PAGE_SIZE` and in terms of CPU usage as it requires calling into the kernel and modifying the
//! process' memory map.

use core::{
    alloc::{
        AllocError,
        Allocator,
        Layout,
    },
    mem,
    ptr::{
        self,
        NonNull,
    },
};

use crate::{
    mmap_anonymous,
    mprotect,
    mremap,
    Mmap,
    ENOMEM,
    MAP_PRIVATE,
    MREMAP_MAYMOVE,
    PAGE_SIZE,
    PROT_NONE,
    PROT_READ,
    PROT_WRITE,
};

/// An `Allocator` that uses `mmap` and `mremap` to allocate memory.
pub struct MmapAllocator;

impl MmapAllocator {
    fn round_up_size(size: usize) -> Option<usize> {
        // The kernel rounds up the size to a page, so we might was well return the end of the
        // page to the caller.
        size.checked_add(PAGE_SIZE - 1)
            .map(|s| s & !(PAGE_SIZE - 1))
    }

    unsafe fn mmap_from_allocation(mut ptr: NonNull<u8>, layout: Layout) -> Mmap {
        // SAFETY: `layout`'s size was already checked.
        let size = Self::round_up_size(layout.size()).unwrap_unchecked();

        let slice = ptr::slice_from_raw_parts_mut(ptr.as_mut(), size);
        Mmap::from_raw(slice)
    }

    unsafe fn resize(
        &self,
        ptr: NonNull<u8>,
        old_layout: Layout,
        new_layout: Layout,
    ) -> Result<NonNull<[u8]>, AllocError> {
        // SAFETY: `old_layout`'s size was already checked.
        let old_size = Self::round_up_size(old_layout.size()).unwrap_unchecked();

        let new_size = Self::round_up_size(new_layout.size()).ok_or(AllocError)?;

        let still_aligned = (ptr.as_ptr() as usize & (new_layout.align() - 1)) == 0;
        if still_aligned {
            if new_size == old_size {
                // Nothing to do.
                let mem = ptr::slice_from_raw_parts_mut(ptr.as_ptr(), old_size);
                return Ok(NonNull::new_unchecked(mem));
            }
            let mut mmap = Self::mmap_from_allocation(ptr, old_layout);
            let mut flags = 0;
            let new_align_lower_than_or_equal_to_page_size =
                (new_layout.align() & !(PAGE_SIZE - 1)) == 0;
            if new_size > old_size && new_align_lower_than_or_equal_to_page_size {
                // `mremap` always returns addresses aligned to `PAGE_SIZE` which is greater or
                // equal to the requested alignment so moving the mapping is fine.
                flags |= MREMAP_MAYMOVE;
            }
            match mremap(&mut mmap, new_size, flags, ptr::null_mut()) {
                Ok(_) => return Ok(Self::slice_from_mmap(mmap)),
                Err(e) => {
                    mem::forget(mmap);
                    // `mremap` also returns `-ENOMEM` when `MREMAP_MAYMOVE` is not set and the
                    // mapping cannot grow in place due to a lack of space nearby. In that case, we
                    // can still fallback to the slow path below.
                    if e.code() != ENOMEM || (flags & MREMAP_MAYMOVE) != 0 {
                        return Err(AllocError);
                    }
                }
            }
        }

        // This is the slow path: try to get a new memory mapping, copy the memory there and unmap
        // the old one.

        let new_ptr = self.allocate(new_layout)?;

        let bytes_in_common = old_layout.size().min(new_layout.size());

        // SAFETY: because both `new_layout.size()` and `old_layout.size()` are greater than or
        // equal to `bytes_in_common` by definition of the minimum, both the old and new memory
        // allocations are valid for reads and writes for `bytes_in_common` bytes. Also, because
        // the old allocation wasn't yet deallocated, it cannot overlap `new_ptr`. Thus, the call
        // to `copy_nonoverlapping` is safe. The safety contract for `deallocate` must be upheld by
        // the caller.
        ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, bytes_in_common);
        self.deallocate(ptr, old_layout);

        Ok(new_ptr)
    }

    fn slice_from_mmap(mut mmap: Mmap) -> NonNull<[u8]> {
        let slice = mmap.as_mut();
        let slice = unsafe { NonNull::new_unchecked(slice as *mut [u8]) };
        mem::forget(mmap);
        slice
    }
}

unsafe impl Allocator for MmapAllocator {
    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
        let size = Self::round_up_size(layout.size()).ok_or(AllocError)?;

        let align_greater_than_page_size = (layout.align() & !(PAGE_SIZE - 1)) != 0;
        let mmap = if align_greater_than_page_size {
            // An example scenario is represented below. The application wants to allocate `9 *
            // PAGE_SIZE` bytes aligned to `8 * PAGE_SIZE`.
            //
            // Every character represents a page of size `PAGE_SIZE`. "a"s represent pages that are
            // mapped to respect alignment and "s"s represent pages that will be returned and thus
            // must respect the sizing constraint.
            //
            // Positions that respect the alignment are marked with a "#" above. Note that the
            // alignment is always a power of two so in this case also a multiple of `PAGE_SIZE`.
            //
            // #       #       #
            //   aaaaaasssssssaaa

            let padded_size = size + layout.align() - PAGE_SIZE;

            // Use `PROT_NONE` to not actually commit anything until the padding is removed, even
            // when overcommit is disabled.
            let mmap = mmap_anonymous(ptr::null_mut(), padded_size, PROT_NONE, MAP_PRIVATE)
                .map_err(|_| AllocError)?;

            // Unmap padding at the left.
            let align_mask = layout.align() - 1;
            let addr = mmap.as_ref().as_ptr() as usize;
            let align_offset = addr.wrapping_neg() & align_mask;
            let mmap = if align_offset != 0 {
                // SAFETY: the memory mapping represented by `mmap` does not employ huge pages so
                // the only safety requirement is that `align_offset` is a multiple of `PAGE_SIZE`.
                // `addr` is non-null and a multiple of `PAGE_SIZE` since it comes from a
                // successful call to `mmap`. Because `addr.wrapping_neg()` equals `2^N - addr`
                // where `N` is the number of bits in a `usize` and `PAGE_SIZE` equals `2^M` where
                // `M < N`, then `addr.wrapping_neg()` is also a multiple of `PAGE_SIZE` and has
                // its lowest `M` bits unset. `align_offset` thus also has its lowest `M` bits
                // unset and is therefore a multiple of `PAGE_SIZE`.
                unsafe { mmap.split_at(align_offset).1 }
            } else {
                mmap
            };

            // Unmap padding at the right.
            let mmap = if mmap.as_ref().len() != size {
                // SAFETY: the memory mapping represented by `mmap` does not employ huge pages so
                // the only safety requirement is that `align_offset` is a multiple of `PAGE_SIZE`.
                // `round_up_size` returns multiples of `PAGE_SIZE` so that requirement is
                // fullfilled.
                unsafe { mmap.split_at(size).0 }
            } else {
                mmap
            };

            mprotect(&mmap, PROT_READ | PROT_WRITE).map_err(|_| AllocError)?;

            mmap
        } else {
            // `mmap` already returns addresses aligned to `PAGE_SIZE` size which is greater or
            // equal to the requested alignment.
            mmap_anonymous(ptr::null_mut(), size, PROT_READ | PROT_WRITE, MAP_PRIVATE)
                .map_err(|_| AllocError)?
        };

        Ok(Self::slice_from_mmap(mmap))
    }

    fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
        // Anonymous memory mappings are already zero-initialized.
        self.allocate(layout)
    }

    unsafe fn grow(
        &self,
        ptr: NonNull<u8>,
        old_layout: Layout,
        new_layout: Layout,
    ) -> Result<NonNull<[u8]>, AllocError> {
        debug_assert!(
            new_layout.size() >= old_layout.size(),
            "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
        );
        Self::resize(self, ptr, old_layout, new_layout)
    }

    unsafe fn grow_zeroed(
        &self,
        ptr: NonNull<u8>,
        old_layout: Layout,
        new_layout: Layout,
    ) -> Result<NonNull<[u8]>, AllocError> {
        // Anonymous memory mappings are already zero-initialized.
        self.grow(ptr, old_layout, new_layout)
    }

    unsafe fn shrink(
        &self,
        ptr: NonNull<u8>,
        old_layout: Layout,
        new_layout: Layout,
    ) -> Result<NonNull<[u8]>, AllocError> {
        debug_assert!(
            new_layout.size() <= old_layout.size(),
            "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
        );
        Self::resize(self, ptr, old_layout, new_layout)
    }

    unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
        // An `Mmap` object calls `munmap` when it is dropped.
        let _mmap = Self::mmap_from_allocation(ptr, layout);
    }
}