#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
pub struct MemoryMapAllocator
{
map_flags: i32,
#[cfg(not(any(target_os = "android", target_os = "netbsd", target_os = "linux")))] lock: bool,
#[cfg(any(target_os = "android", target_os = "linux"))] madvise_flags: i32,
#[cfg(any(target_os = "android", target_os = "linux"))] numa_settings: Option<NumaSettings>,
}
impl Default for MemoryMapAllocator
{
#[inline(always)]
fn default() -> Self
{
Self::new(true, true, true, false, HugePageSize::default(), None)
}
}
impl MemorySource for MemoryMapAllocator
{
#[inline(always)]
fn obtain(&self, non_zero_size: NonZeroUsize) -> Result<MemoryAddress, AllocErr>
{
self.mmap_memory(non_zero_size.get())
}
#[inline(always)]
fn release(&self, non_zero_size: NonZeroUsize, current_memory: MemoryAddress)
{
Self::munmap_memory(current_memory, non_zero_size.get())
}
}
impl Allocator for MemoryMapAllocator
{
#[inline(always)]
fn allocate(&self, non_zero_size: NonZeroUsize, non_zero_power_of_two_alignment: NonZeroUsize) -> Result<MemoryAddress, AllocErr>
{
const AssumedPageSize: usize = 4096;
if unlikely!(non_zero_power_of_two_alignment.get() > AssumedPageSize)
{
return Err(AllocErr)
}
self.mmap_memory(non_zero_size.get())
}
#[inline(always)]
fn deallocate(&self, non_zero_size: NonZeroUsize, _non_zero_power_of_two_alignment: NonZeroUsize, current_memory: MemoryAddress)
{
Self::munmap_memory(current_memory, non_zero_size.get())
}
#[inline(always)]
fn growing_reallocate(&self, non_zero_new_size: NonZeroUsize, _non_zero_power_of_two_alignment: NonZeroUsize, non_zero_current_size: NonZeroUsize, current_memory: MemoryAddress) -> Result<MemoryAddress, AllocErr>
{
self.mremap_memory(current_memory, non_zero_current_size.get(), non_zero_new_size.get())
}
#[inline(always)]
fn shrinking_reallocate(&self, non_zero_new_size: NonZeroUsize, _non_zero_power_of_two_alignment: NonZeroUsize, non_zero_current_size: NonZeroUsize, current_memory: MemoryAddress) -> Result<MemoryAddress, AllocErr>
{
self.mremap_memory(current_memory, non_zero_current_size.get(), non_zero_new_size.get())
}
}
impl MemoryMapAllocator
{
#[allow(unused_variables)]
#[inline(always)]
pub fn new(lock: bool, prefault: bool, do_not_reserve_swap_space: bool, allocate_within_first_32Gb: bool, huge_page_size: HugePageSize, numa_settings: Option<NumaSettings>) -> Self
{
Self
{
map_flags: Self::map_flags(lock, prefault, do_not_reserve_swap_space, allocate_within_first_32Gb, huge_page_size),
#[cfg(not(any(target_os = "android", target_os = "netbsd", target_os = "linux")))] lock,
#[cfg(any(target_os = "android", target_os = "linux"))] madvise_flags: Self::madvise_flags(huge_page_size),
#[cfg(any(target_os = "android", target_os = "linux"))] numa_settings,
}
}
#[inline(always)]
fn mmap_memory(&self, size: usize) -> Result<MemoryAddress, AllocErr>
{
const UnusedFileDescriptor: i32 = -1;
const NoOffset: i64 = 0;
let result = unsafe { mmap(null_mut(), size, PROT_READ | PROT_WRITE, self.map_flags, UnusedFileDescriptor, NoOffset) };
if unlikely!(result == MAP_FAILED)
{
Err(AllocErr)
}
else
{
#[cfg(any(target_os = "android", target_os = "linux"))] self.madvise_memory(result, size)?;
#[cfg(any(target_os = "android", target_os = "linux"))] self.numa_memory(result, size)?;
#[cfg(not(any(target_os = "android", target_os = "netbsd", target_os = "linux")))] self.mlock_memory(result, size)?;
Ok(Self::cast_address(result))
}
}
#[cfg(any(target_os = "android", target_os = "linux"))]
#[inline(always)]
fn madvise_memory(&self, address: *mut c_void, size: usize) -> Result<(), AllocErr>
{
let result = unsafe { madvise(address, size, self.madvise_flags) };
if likely!(result == 0)
{
}
else if likely!(result == -1)
{
Self::munmap_memory(Self::cast_address(address), size);
return Err(AllocErr)
}
else
{
unreachable!()
}
Ok(())
}
#[cfg(any(target_os = "android", target_os = "linux"))]
#[inline(always)]
fn numa_memory(&self, address: *mut c_void, size: usize) -> Result<(), AllocErr>
{
match self.numa_settings
{
None => Ok(()),
Some(ref numa_settings) =>
{
let outcome = numa_settings.post_allocate(address, size);
if unlikely!(outcome.is_err())
{
Self::munmap_memory(Self::cast_address(address), size);
return Err(AllocErr)
}
Ok(())
}
}
}
#[cfg(not(any(target_os = "android", target_os = "netbsd", target_os = "linux")))]
#[inline(always)]
fn mlock_memory(&self, address: *mut c_void, size: usize) -> Result<(), AllocErr>
{
if self.lock
{
let result = unsafe { mlock(address, size) };
if likely!(result == 0)
{
}
else if likely!(result == -1)
{
Self::munmap_memory(Self::cast_address(address), size);
return Err(AllocErr)
}
else
{
unreachable!()
}
}
Ok(())
}
#[cfg(any(target_os = "android", target_os = "linux", target_os = "netbsd"))]
#[inline(always)]
fn mremap_memory(&self, memory_address: MemoryAddress, old_size: usize, new_size: usize) -> Result<MemoryAddress, AllocErr>
{
#[cfg(target_os = "netbsd")] const MREMAP_MAYMOVE: i32 = 0;
let result = unsafe { mremap(memory_address.as_ptr() as *mut _, old_size, new_size, MREMAP_MAYMOVE) };
if unlikely!(result == MAP_FAILED)
{
Err(AllocErr)
}
else
{
Ok(Self::cast_address(result))
}
}
#[cfg(not(any(target_os = "android", target_os = "linux", target_os = "netbsd")))]
#[inline(always)]
fn mremap_memory(&self, memory_address: MemoryAddress, old_size: usize, new_size: usize) -> Result<MemoryAddress, AllocErr>
{
let new_memory_address = self.mmap_memory(new_size)?;
unsafe { new_memory_address.as_ptr().copy_from_nonoverlapping(memory_address.as_ptr() as *const _, old_size) };
Self::munmap_memory(memory_address, old_size);
Ok(new_memory_address)
}
#[inline(always)]
fn munmap_memory(memory_address: MemoryAddress, size: usize)
{
unsafe { munmap(memory_address.as_ptr() as *mut _, size) };
}
#[inline(always)]
fn cast_address(address: *mut c_void) -> MemoryAddress
{
address.non_null().cast::<u8>()
}
#[allow(unused_variables)]
#[inline(always)]
fn map_flags(lock: bool, prefault: bool, do_not_reserve_swap_space: bool, allocate_within_first_32Gb: bool, huge_page_size: HugePageSize) -> i32
{
#[cfg(target_os = "netbsd")] const MAP_ANONYMOUS: i32 = MAP_ANON;
#[cfg(all(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))] const OmitFromCoreDumps: i32 = MAP_NOCORE;
#[cfg(not(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd")))] const OmitFromCoreDumps: i32 = 0;
#[cfg(any(target_os = "android", target_os = "linux"))] const Locked: i32 = MAP_LOCKED;
#[cfg(target_os = "netbsd")] const Locked: i32 = MAP_WIRED;
#[cfg(not(any(target_os = "android", target_os = "netbsd", target_os = "linux")))] const Locked: i32 = 0;
#[cfg(any(target_os = "android", target_os = "linux"))] const Prefault: i32 = MAP_POPULATE;
#[cfg(target_os = "freebsd")] const Prefault: i32 = MAP_PREFAULT_READ;
#[cfg(not(any(target_os = "android", target_os = "freebsd", target_os = "linux")))] const Prefault: i32 = 0;
#[cfg(any(target_os = "android", target_os = "linux", target_os = "netbsd"))] const DoNotReserveSwapSpace: i32 = MAP_NORESERVE;
#[cfg(not(any(target_os = "android", target_os = "linux", target_os = "netbsd")))] const DoNotReserveSwapSpace: i32 = 0;
#[cfg(all(target_pointer_width = "64", any(target_os = "android", target_os = "freebsd", target_os = "linux")))] const AllocateWithnFirst32Gb: i32 = MAP_32BIT;
#[cfg(not(all(target_pointer_width = "64", any(target_os = "android", target_os = "freebsd", target_os = "linux"))))] const AllocateWithnFirst32Gb: i32 = 0;
let map_flags: i32 = MAP_PRIVATE | MAP_ANONYMOUS | OmitFromCoreDumps;
let map_flags = if lock
{
map_flags | Locked
}
else
{
map_flags
};
let map_flags = if prefault
{
map_flags | Prefault
}
else
{
map_flags
};
let map_flags = if do_not_reserve_swap_space
{
map_flags | DoNotReserveSwapSpace
}
else
{
map_flags
};
let map_flags = if allocate_within_first_32Gb
{
map_flags | AllocateWithnFirst32Gb
}
else
{
map_flags
};
if cfg!(any(target_os = "android", target_os = "linux"))
{
map_flags | (huge_page_size as i32)
}
else
{
map_flags
}
}
#[cfg(any(target_os = "android", target_os = "linux"))]
#[inline(always)]
fn madvise_flags(huge_page_size: HugePageSize) -> i32
{
const madvise_flags: i32 = MADV_DONTDUMP;
if huge_page_size != HugePageSize::None
{
madvise_flags | MADV_HUGEPAGE
}
else
{
madvise_flags
}
}
}