use std::mem::align_of;
use std::ptr::NonNull;
use std::{
alloc::{handle_alloc_error, Layout},
sync::atomic::AtomicIsize,
};
#[cfg(target_arch = "x86")]
pub const ALIGNMENT: usize = 1 << 6;
#[cfg(target_arch = "x86_64")]
pub const ALIGNMENT: usize = 1 << 7;
#[cfg(target_arch = "mips")]
pub const ALIGNMENT: usize = 1 << 5;
#[cfg(target_arch = "mips64")]
pub const ALIGNMENT: usize = 1 << 5;
#[cfg(target_arch = "powerpc")]
pub const ALIGNMENT: usize = 1 << 5;
#[cfg(target_arch = "powerpc64")]
pub const ALIGNMENT: usize = 1 << 6;
#[cfg(target_arch = "riscv")]
pub const ALIGNMENT: usize = 1 << 6;
#[cfg(target_arch = "s390x")]
pub const ALIGNMENT: usize = 1 << 8;
#[cfg(target_arch = "sparc")]
pub const ALIGNMENT: usize = 1 << 5;
#[cfg(target_arch = "sparc64")]
pub const ALIGNMENT: usize = 1 << 6;
#[cfg(target_arch = "thumbv6")]
pub const ALIGNMENT: usize = 1 << 5;
#[cfg(target_arch = "thumbv7")]
pub const ALIGNMENT: usize = 1 << 5;
#[cfg(target_arch = "wasm32")]
pub const ALIGNMENT: usize = FALLBACK_ALIGNMENT;
#[cfg(target_arch = "arm")]
pub const ALIGNMENT: usize = 1 << 5;
#[cfg(target_arch = "nvptx")]
pub const ALIGNMENT: usize = 1 << 7;
#[cfg(target_arch = "nvptx64")]
pub const ALIGNMENT: usize = 1 << 7;
#[cfg(target_arch = "aarch64")]
pub const ALIGNMENT: usize = 1 << 6;
#[doc(hidden)]
const FALLBACK_ALIGNMENT: usize = 1 << 6;
const BYPASS_PTR: NonNull<u8> = unsafe { NonNull::new_unchecked(ALIGNMENT as *mut u8) };
pub static mut ALLOCATIONS: AtomicIsize = AtomicIsize::new(0);
pub fn allocate_aligned(size: usize) -> NonNull<u8> {
unsafe {
if size == 0 {
BYPASS_PTR
} else {
ALLOCATIONS.fetch_add(size as isize, std::sync::atomic::Ordering::SeqCst);
let layout = Layout::from_size_align_unchecked(size, ALIGNMENT);
let raw_ptr = std::alloc::alloc_zeroed(layout);
NonNull::new(raw_ptr).unwrap_or_else(|| handle_alloc_error(layout))
}
}
}
pub unsafe fn free_aligned(ptr: NonNull<u8>, size: usize) {
if ptr != BYPASS_PTR {
ALLOCATIONS.fetch_sub(size as isize, std::sync::atomic::Ordering::SeqCst);
std::alloc::dealloc(
ptr.as_ptr(),
Layout::from_size_align_unchecked(size, ALIGNMENT),
);
}
}
pub unsafe fn reallocate(
ptr: NonNull<u8>,
old_size: usize,
new_size: usize,
) -> NonNull<u8> {
if ptr == BYPASS_PTR {
return allocate_aligned(new_size);
}
if new_size == 0 {
free_aligned(ptr, old_size);
return BYPASS_PTR;
}
ALLOCATIONS.fetch_add(
new_size as isize - old_size as isize,
std::sync::atomic::Ordering::SeqCst,
);
let raw_ptr = std::alloc::realloc(
ptr.as_ptr(),
Layout::from_size_align_unchecked(old_size, ALIGNMENT),
new_size,
);
let ptr = NonNull::new(raw_ptr).unwrap_or_else(|| {
handle_alloc_error(Layout::from_size_align_unchecked(new_size, ALIGNMENT))
});
if new_size > old_size {
ptr.as_ptr()
.add(old_size)
.write_bytes(0, new_size - old_size);
}
ptr
}
pub unsafe fn memcpy(dst: NonNull<u8>, src: NonNull<u8>, count: usize) {
if src != BYPASS_PTR {
std::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), count)
}
}
pub fn is_aligned<T>(p: NonNull<T>, a: usize) -> bool {
let a_minus_one = a.wrapping_sub(1);
let pmoda = p.as_ptr() as usize & a_minus_one;
pmoda == 0
}
pub fn is_ptr_aligned<T>(p: NonNull<T>) -> bool {
p.as_ptr().align_offset(align_of::<T>()) == 0
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_allocate() {
for _ in 0..10 {
let p = allocate_aligned(1024);
assert_eq!(0, (p.as_ptr() as usize) % 64);
unsafe { free_aligned(p, 1024) };
}
}
#[test]
fn test_is_aligned() {
let ptr = allocate_aligned(10);
assert_eq!(true, is_aligned::<u8>(ptr, 1));
assert_eq!(true, is_aligned::<u8>(ptr, 2));
assert_eq!(true, is_aligned::<u8>(ptr, 4));
let ptr = unsafe { NonNull::new_unchecked(ptr.as_ptr().offset(1)) };
assert_eq!(true, is_aligned::<u8>(ptr, 1));
assert_eq!(false, is_aligned::<u8>(ptr, 2));
assert_eq!(false, is_aligned::<u8>(ptr, 4));
unsafe { free_aligned(NonNull::new_unchecked(ptr.as_ptr().offset(-1)), 10) };
}
}