warlock 0.0.4

Arena allocation optimized collections
Documentation
#![allow(clippy::missing_safety_doc)]

use core::{
	alloc::Layout,
	ptr,
	ptr::NonNull,
	cell::UnsafeCell,
};

use crate::util::layout_for_n;

#[derive(Copy, Clone, Debug)]
pub struct AllocError;

/// See https://doc.rust-lang.org/nightly/std/alloc/trait.Allocator.html
/// for complete documentation. This and the associated default method
/// implementations were lifted (mostly) verbatim from there.
pub unsafe trait Allocator {
	fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError>;
	unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout);

	fn allocate_zeroed(
		&self,
		layout: Layout
	) -> Result<NonNull<[u8]>, AllocError> {
		let mut ptr = self.allocate(layout)?;

		unsafe {
			let slice = ptr.as_mut();
			let ptr = slice.as_mut_ptr();
			ptr.write_bytes(0, slice.len());
		};

		Ok(ptr)
	}

	unsafe fn grow(
		&self,
		ptr: NonNull<u8>,
		old_layout: Layout,
		new_layout: Layout
	) -> Result<NonNull<[u8]>, AllocError> {
		debug_assert!(
			new_layout.size() >= old_layout.size(),
			"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
		);

		let mut new_ptr = self.allocate(new_layout)?;

		let slice = new_ptr.as_mut();
		ptr::copy_nonoverlapping(ptr.as_ptr(), slice.as_mut_ptr(), old_layout.size());
		self.deallocate(ptr, old_layout);

		Ok(new_ptr)
	}


	unsafe fn grow_zeroed(
		&self,
		ptr: NonNull<u8>,
		old_layout: Layout,
		new_layout: Layout
	) -> Result<NonNull<[u8]>, AllocError> {
		debug_assert!(
			new_layout.size() >= old_layout.size(),
			"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
		);

		let mut new_ptr = self.allocate_zeroed(new_layout)?;

		let slice = new_ptr.as_mut();
		ptr::copy_nonoverlapping(ptr.as_ptr(), slice.as_mut_ptr(), old_layout.size());
		self.deallocate(ptr, old_layout);

		Ok(new_ptr)
	}

	unsafe fn shrink(
		&self,
		ptr: NonNull<u8>,
		old_layout: Layout,
		new_layout: Layout
	) -> Result<NonNull<[u8]>, AllocError> {
		debug_assert!(
			new_layout.size() <= old_layout.size(),
			"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
		);

		let mut new_ptr = self.allocate(new_layout)?;

		let slice = new_ptr.as_mut();
		ptr::copy_nonoverlapping(ptr.as_ptr(), slice.as_mut_ptr(), old_layout.size());
		self.deallocate(ptr, old_layout);

		Ok(new_ptr)
	}

	#[inline(always)]
	fn by_ref(&self) -> &Self where Self: Sized {
		self
	}
}

unsafe impl <A: Allocator> Allocator for &A {
	#[inline(always)]
	fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
		(**self).allocate(layout)
	}

	#[inline(always)]
	unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
		(**self).deallocate(ptr, layout)
	}

	#[inline(always)]
	unsafe fn grow(
		&self,
		ptr: NonNull<u8>,
		old_layout: Layout,
		new_layout: Layout
	) -> Result<NonNull<[u8]>, AllocError> {
		(**self).grow(ptr, old_layout, new_layout)
	}


	#[inline(always)]
	unsafe fn grow_zeroed(
		&self,
		ptr: NonNull<u8>,
		old_layout: Layout,
		new_layout: Layout
	) -> Result<NonNull<[u8]>, AllocError> {
		(**self).grow_zeroed(ptr, old_layout, new_layout)
	}

	#[inline(always)]
	unsafe fn shrink(
		&self,
		ptr: NonNull<u8>,
		old_layout: Layout,
		new_layout: Layout
	) -> Result<NonNull<[u8]>, AllocError> {
		(**self).shrink(ptr, old_layout, new_layout)
	}
}

struct _Bump<'a> {
	memory: &'a mut [u8],
	current_offset: usize,
}

/// A bump allocator suitable for use in single-threaded contexts with a
/// fixed amount of memory. Unable to deallocate memory.
#[derive(Debug)]
pub struct Bump<'a> {
	bump: UnsafeCell<_Bump<'a>>,
}

impl <'a> Bump<'a> {
	pub fn new(memory: &'a mut [u8]) -> Self {
		Bump { bump: UnsafeCell::new(_Bump { memory, current_offset: 0 }) }
	}
}

unsafe impl <'a> Allocator for Bump<'a> {
	fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
		let bump = unsafe { &mut *self.bump.get() };
		let off_adj = bump.current_offset % layout.align();
		let start = bump.current_offset + off_adj;
		let end = start + layout.size();

		if end > bump.memory.len() {
			return Err(AllocError);
		};

		bump.current_offset = end;

		Ok(unsafe {
			NonNull::new_unchecked(&mut bump.memory[start..end] as *mut [u8])
		})
	}

	unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {
	}
}

struct _BumpGrow {
	mem: NonNull<u8>,
	cap: usize,
	off: usize,
}

/// A growable bump allocator, leveraging the inner Allocator to grow its
/// arena. On Drop, calls the inner Allocator to deallocate its memory
#[derive(Debug)]
pub struct BumpGrow<A: Allocator> {
	grow: UnsafeCell<_BumpGrow>,
	alloc: A,
}

impl <A: Allocator> BumpGrow<A> {
	pub fn with_capacity_in(cap: usize, alloc: A) -> Result<Self, AllocError> {
		let mem = alloc.allocate(layout_for_n::<u8>(cap))?;
		let mem = unsafe { NonNull::new_unchecked(mem.as_ptr() as *mut u8) };

		Ok(Self {
			alloc,
			grow: _BumpGrow { mem, cap, off: 0 }.into(),
		})
	}
}

impl <A: Allocator> Drop for BumpGrow<A> {
	fn drop(&mut self) {
		unsafe {
			let grow = &mut *self.grow.get();
			self.alloc.deallocate(grow.mem, layout_for_n::<u8>(grow.cap));
		}
	}
}

unsafe impl <A: Allocator> Allocator for BumpGrow<A> {
	fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
		let grow = unsafe { &mut *self.grow.get() };
		let cap = grow.cap;
		let off = grow.off;

		let off_adj = off % layout.align();
		let start = off + off_adj;
		let end = start + layout.size();

		if end > cap {
			let more = core::cmp::max(cap, layout.size());
			let mem = unsafe {self.alloc.grow(
				grow.mem,
				layout_for_n::<u8>(cap),
				layout_for_n::<u8>(cap + more),
			)? };

			let mem = unsafe { NonNull::new_unchecked(mem.as_ptr() as *mut u8) };
			grow.mem = mem;
			grow.cap += more;
		};

		grow.off = end;

		Ok(unsafe {
			let slice = ptr::slice_from_raw_parts_mut(
				grow.mem.as_ptr().add(start),
				end - start,
			);

			NonNull::new_unchecked(slice)
		})
	}

	unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {
	}
}

#[cfg(test)]
mod test {
	use super::{ Bump, BumpGrow };
	use crate::vec::Vec;

	#[test]
	fn test_bump() {
		let mut memory: [u8; 512] = [0; 512];
		let bump = Bump::new(&mut memory);

		{
			let mut vec: Vec<u8, _> = Vec::with_capacity_in(1, &bump).unwrap();
			vec.try_push(1).unwrap();
			vec.try_push(2).unwrap();
			vec.try_push(3).unwrap();
			assert_eq!(vec.as_slice(), &[1, 2, 3]);
		}

		assert_eq!(&memory[0..6], &[1, 1, 2, 1, 2, 3]);
	}

	#[test]
	fn test_bump_oom() {
		let mut memory: [u8; 1] = [0];
		let bump = Bump::new(&mut memory);

		assert!(Vec::<u8, _>::with_capacity_in(2, &bump).is_err());

		{
			let mut vec: Vec<u8, _> = Vec::with_capacity_in(1, &bump).unwrap();
			vec.try_push(1).unwrap();
			assert!(vec.try_push(2).is_err());
		}

		assert_eq!(&memory, &[1]);
	}

	#[test]
	fn test_bump_grow() {
		let mut memory: [u8; 512] = [0; 512];
		let bump = Bump::new(&mut memory);

		{
			let grow = BumpGrow::with_capacity_in(1, &bump).unwrap();
			let mut vec: Vec<u8, _> = Vec::with_capacity_in(1, &grow).unwrap();
			vec.try_push(1).unwrap();
			vec.try_push(2).unwrap();
			vec.try_push(3).unwrap();
			assert_eq!(vec.as_slice(), &[1, 2, 3]);
		}

		assert_eq!(&memory[0..10], &[1, 1, 1, 2, 1, 1, 2, 1, 2, 3]);
	}

	#[test]
	fn test_bump_grow_oom() {
		let mut memory: [u8; 1] = [0];
		let bump = Bump::new(&mut memory);

		assert!(BumpGrow::with_capacity_in(4, &bump).is_err());
	}
}