use self::host::SlotId;
pub use self::region::Region;
use super::{
align_down, align_up, array_vec::ArrayVec, AllocationHandle, DeviceAlignment, DeviceLayout,
};
use crate::{image::ImageTiling, memory::is_aligned, DeviceSize, NonZeroDeviceSize};
use std::{
cell::{Cell, UnsafeCell},
cmp,
error::Error,
fmt::{self, Debug, Display},
};
pub unsafe trait Suballocator {
fn new(region: Region) -> Self
where
Self: Sized;
fn allocate(
&self,
layout: DeviceLayout,
allocation_type: AllocationType,
buffer_image_granularity: DeviceAlignment,
) -> Result<Suballocation, SuballocatorError>;
unsafe fn deallocate(&self, suballocation: Suballocation);
fn free_size(&self) -> DeviceSize;
fn cleanup(&mut self);
}
impl Debug for dyn Suballocator {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Suballocator").finish_non_exhaustive()
}
}
mod region {
use super::{DeviceLayout, DeviceSize};
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct Region {
offset: DeviceSize,
size: DeviceSize,
}
impl Region {
#[inline]
pub const fn new(offset: DeviceSize, size: DeviceSize) -> Option<Self> {
if offset.saturating_add(size) <= DeviceLayout::MAX_SIZE {
Some(unsafe { Region::new_unchecked(offset, size) })
} else {
None
}
}
#[inline]
pub const unsafe fn new_unchecked(offset: DeviceSize, size: DeviceSize) -> Self {
Region { offset, size }
}
#[inline]
pub const fn offset(&self) -> DeviceSize {
self.offset
}
#[inline]
pub const fn size(&self) -> DeviceSize {
self.size
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum AllocationType {
Unknown = 0,
Linear = 1,
NonLinear = 2,
}
impl From<ImageTiling> for AllocationType {
#[inline]
fn from(tiling: ImageTiling) -> Self {
match tiling {
ImageTiling::Optimal => AllocationType::NonLinear,
ImageTiling::Linear => AllocationType::Linear,
ImageTiling::DrmFormatModifier => AllocationType::Unknown,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct Suballocation {
pub offset: DeviceSize,
pub size: DeviceSize,
pub allocation_type: AllocationType,
pub handle: AllocationHandle,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum SuballocatorError {
OutOfRegionMemory,
FragmentedRegion,
}
impl Error for SuballocatorError {}
impl Display for SuballocatorError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let msg = match self {
Self::OutOfRegionMemory => "out of region memory",
Self::FragmentedRegion => "the region is too fragmented",
};
f.write_str(msg)
}
}
#[derive(Debug)]
pub struct FreeListAllocator {
region_offset: DeviceSize,
free_size: Cell<DeviceSize>,
state: UnsafeCell<FreeListAllocatorState>,
}
unsafe impl Suballocator for FreeListAllocator {
fn new(region: Region) -> Self {
let free_size = Cell::new(region.size());
let mut nodes = host::PoolAllocator::new(256);
let mut free_list = Vec::with_capacity(32);
let root_id = nodes.allocate(SuballocationListNode {
prev: None,
next: None,
offset: region.offset(),
size: region.size(),
ty: SuballocationType::Free,
});
free_list.push(root_id);
let state = UnsafeCell::new(FreeListAllocatorState { nodes, free_list });
FreeListAllocator {
region_offset: region.offset(),
free_size,
state,
}
}
#[inline]
fn allocate(
&self,
layout: DeviceLayout,
allocation_type: AllocationType,
buffer_image_granularity: DeviceAlignment,
) -> Result<Suballocation, SuballocatorError> {
fn has_granularity_conflict(prev_ty: SuballocationType, ty: AllocationType) -> bool {
if prev_ty == SuballocationType::Free {
false
} else if prev_ty == SuballocationType::Unknown {
true
} else {
prev_ty != ty.into()
}
}
let size = layout.size();
let alignment = layout.alignment();
let state = unsafe { &mut *self.state.get() };
unsafe {
match state.free_list.last() {
Some(&last) if state.nodes.get(last).size >= size => {
let index = match state
.free_list
.binary_search_by_key(&size, |&id| state.nodes.get(id).size)
{
Ok(index) => index,
Err(index) => index,
};
for (index, &id) in state.free_list.iter().enumerate().skip(index) {
let suballoc = state.nodes.get(id);
let mut offset = align_up(suballoc.offset, alignment);
if buffer_image_granularity != DeviceAlignment::MIN {
debug_assert!(is_aligned(self.region_offset, buffer_image_granularity));
if let Some(prev_id) = suballoc.prev {
let prev = state.nodes.get(prev_id);
if are_blocks_on_same_page(
prev.offset,
prev.size,
offset,
buffer_image_granularity,
) && has_granularity_conflict(prev.ty, allocation_type)
{
offset = align_up(offset, buffer_image_granularity);
}
}
}
if offset + size <= suballoc.offset + suballoc.size {
state.free_list.remove(index);
state.split(id, offset, size);
state.nodes.get_mut(id).ty = allocation_type.into();
self.free_size.set(self.free_size.get() - size);
return Ok(Suballocation {
offset,
size,
allocation_type,
handle: AllocationHandle::from_index(id.get()),
});
}
}
Err(SuballocatorError::OutOfRegionMemory)
}
Some(_) if self.free_size() >= size => Err(SuballocatorError::FragmentedRegion),
Some(_) => Err(SuballocatorError::OutOfRegionMemory),
None => Err(SuballocatorError::OutOfRegionMemory),
}
}
}
#[inline]
unsafe fn deallocate(&self, suballocation: Suballocation) {
let node_id = SlotId::new(suballocation.handle.as_index());
let state = unsafe { &mut *self.state.get() };
let node = state.nodes.get_mut(node_id);
debug_assert!(node.ty != SuballocationType::Free);
self.free_size.set(self.free_size.get() + node.size);
node.ty = SuballocationType::Free;
state.coalesce(node_id);
state.free(node_id);
}
#[inline]
fn free_size(&self) -> DeviceSize {
self.free_size.get()
}
#[inline]
fn cleanup(&mut self) {}
}
#[derive(Debug)]
struct FreeListAllocatorState {
nodes: host::PoolAllocator<SuballocationListNode>,
free_list: Vec<SlotId>,
}
#[derive(Clone, Copy, Debug)]
struct SuballocationListNode {
prev: Option<SlotId>,
next: Option<SlotId>,
offset: DeviceSize,
size: DeviceSize,
ty: SuballocationType,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum SuballocationType {
Unknown,
Linear,
NonLinear,
Free,
}
impl From<AllocationType> for SuballocationType {
fn from(ty: AllocationType) -> Self {
match ty {
AllocationType::Unknown => SuballocationType::Unknown,
AllocationType::Linear => SuballocationType::Linear,
AllocationType::NonLinear => SuballocationType::NonLinear,
}
}
}
impl FreeListAllocatorState {
unsafe fn allocate(&mut self, node_id: SlotId) {
debug_assert!(self.free_list.contains(&node_id));
let node = self.nodes.get(node_id);
match self
.free_list
.binary_search_by_key(&node.size, |&id| self.nodes.get(id).size)
{
Ok(index) => {
if self.free_list[index] == node_id {
self.free_list.remove(index);
return;
}
{
let mut index = index;
loop {
index = index.wrapping_sub(1);
if let Some(&id) = self.free_list.get(index) {
if id == node_id {
self.free_list.remove(index);
return;
}
if self.nodes.get(id).size != node.size {
break;
}
} else {
break;
}
}
}
{
let mut index = index;
loop {
index += 1;
if let Some(&id) = self.free_list.get(index) {
if id == node_id {
self.free_list.remove(index);
return;
}
if self.nodes.get(id).size != node.size {
break;
}
} else {
break;
}
}
}
unreachable!();
}
Err(_) => unreachable!(),
}
}
unsafe fn split(&mut self, node_id: SlotId, offset: DeviceSize, size: DeviceSize) {
let node = self.nodes.get(node_id);
debug_assert!(node.ty == SuballocationType::Free);
debug_assert!(offset >= node.offset);
debug_assert!(offset + size <= node.offset + node.size);
let padding_front = offset - node.offset;
let padding_back = node.offset + node.size - offset - size;
if padding_front > 0 {
let padding = SuballocationListNode {
prev: node.prev,
next: Some(node_id),
offset: node.offset,
size: padding_front,
ty: SuballocationType::Free,
};
let padding_id = self.nodes.allocate(padding);
if let Some(prev_id) = padding.prev {
self.nodes.get_mut(prev_id).next = Some(padding_id);
}
let node = self.nodes.get_mut(node_id);
node.prev = Some(padding_id);
node.offset = offset;
node.size -= padding.size;
self.free(padding_id);
}
if padding_back > 0 {
let padding = SuballocationListNode {
prev: Some(node_id),
next: node.next,
offset: offset + size,
size: padding_back,
ty: SuballocationType::Free,
};
let padding_id = self.nodes.allocate(padding);
if let Some(next_id) = padding.next {
self.nodes.get_mut(next_id).prev = Some(padding_id);
}
let node = self.nodes.get_mut(node_id);
node.next = Some(padding_id);
node.size -= padding.size;
self.free(padding_id);
}
}
unsafe fn free(&mut self, node_id: SlotId) {
debug_assert!(!self.free_list.contains(&node_id));
let node = self.nodes.get(node_id);
let (Ok(index) | Err(index)) = self
.free_list
.binary_search_by_key(&node.size, |&id| self.nodes.get(id).size);
self.free_list.insert(index, node_id);
}
unsafe fn coalesce(&mut self, node_id: SlotId) {
let node = self.nodes.get(node_id);
debug_assert!(node.ty == SuballocationType::Free);
if let Some(prev_id) = node.prev {
let prev = self.nodes.get(prev_id);
if prev.ty == SuballocationType::Free {
self.allocate(prev_id);
let node = self.nodes.get_mut(node_id);
node.prev = prev.prev;
node.offset = prev.offset;
node.size += prev.size;
if let Some(prev_id) = node.prev {
self.nodes.get_mut(prev_id).next = Some(node_id);
}
self.nodes.free(prev_id);
}
}
if let Some(next_id) = node.next {
let next = self.nodes.get(next_id);
if next.ty == SuballocationType::Free {
self.allocate(next_id);
let node = self.nodes.get_mut(node_id);
node.next = next.next;
node.size += next.size;
if let Some(next_id) = node.next {
self.nodes.get_mut(next_id).prev = Some(node_id);
}
self.nodes.free(next_id);
}
}
}
}
#[derive(Debug)]
pub struct BuddyAllocator {
region_offset: DeviceSize,
free_size: Cell<DeviceSize>,
state: UnsafeCell<BuddyAllocatorState>,
}
impl BuddyAllocator {
const MIN_NODE_SIZE: DeviceSize = 16;
const MAX_ORDERS: usize = 32;
}
unsafe impl Suballocator for BuddyAllocator {
fn new(region: Region) -> Self {
const EMPTY_FREE_LIST: Vec<DeviceSize> = Vec::new();
assert!(region.size().is_power_of_two());
assert!(region.size() >= BuddyAllocator::MIN_NODE_SIZE);
let max_order = (region.size() / BuddyAllocator::MIN_NODE_SIZE).trailing_zeros() as usize;
assert!(max_order < BuddyAllocator::MAX_ORDERS);
let free_size = Cell::new(region.size());
let mut free_list =
ArrayVec::new(max_order + 1, [EMPTY_FREE_LIST; BuddyAllocator::MAX_ORDERS]);
free_list[max_order].push(region.offset());
let state = UnsafeCell::new(BuddyAllocatorState { free_list });
BuddyAllocator {
region_offset: region.offset(),
free_size,
state,
}
}
#[inline]
fn allocate(
&self,
layout: DeviceLayout,
allocation_type: AllocationType,
buffer_image_granularity: DeviceAlignment,
) -> Result<Suballocation, SuballocatorError> {
fn prev_power_of_two(val: DeviceSize) -> DeviceSize {
const MAX_POWER_OF_TWO: DeviceSize = DeviceAlignment::MAX.as_devicesize();
if let Some(val) = NonZeroDeviceSize::new(val) {
MAX_POWER_OF_TWO >> val.leading_zeros()
} else {
0
}
}
let mut size = layout.size();
let mut alignment = layout.alignment();
if buffer_image_granularity != DeviceAlignment::MIN {
debug_assert!(is_aligned(self.region_offset, buffer_image_granularity));
if allocation_type == AllocationType::Unknown
|| allocation_type == AllocationType::NonLinear
{
size = align_up(size, buffer_image_granularity);
alignment = cmp::max(alignment, buffer_image_granularity);
}
}
let size = cmp::max(size, BuddyAllocator::MIN_NODE_SIZE).next_power_of_two();
let min_order = (size / BuddyAllocator::MIN_NODE_SIZE).trailing_zeros() as usize;
let state = unsafe { &mut *self.state.get() };
for (order, free_list) in state.free_list.iter_mut().enumerate().skip(min_order) {
for (index, &offset) in free_list.iter().enumerate() {
if is_aligned(offset, alignment) {
free_list.remove(index);
for (order, free_list) in state
.free_list
.iter_mut()
.enumerate()
.skip(min_order)
.take(order - min_order)
.rev()
{
let size = BuddyAllocator::MIN_NODE_SIZE << order;
let right_child = offset + size;
let (Ok(index) | Err(index)) = free_list.binary_search(&right_child);
free_list.insert(index, right_child);
}
self.free_size.set(self.free_size.get() - size);
return Ok(Suballocation {
offset,
size: layout.size(),
allocation_type,
handle: AllocationHandle::from_index(min_order),
});
}
}
}
if prev_power_of_two(self.free_size()) >= layout.size() {
Err(SuballocatorError::FragmentedRegion)
} else {
Err(SuballocatorError::OutOfRegionMemory)
}
}
#[inline]
unsafe fn deallocate(&self, suballocation: Suballocation) {
let mut offset = suballocation.offset;
let order = suballocation.handle.as_index();
let min_order = order;
let state = unsafe { &mut *self.state.get() };
debug_assert!(!state.free_list[order].contains(&offset));
for (order, free_list) in state.free_list.iter_mut().enumerate().skip(min_order) {
let size = BuddyAllocator::MIN_NODE_SIZE << order;
let buddy_offset = ((offset - self.region_offset) ^ size) + self.region_offset;
match free_list.binary_search(&buddy_offset) {
Ok(index) => {
free_list.remove(index);
offset = cmp::min(offset, buddy_offset);
}
Err(_) => {
let (Ok(index) | Err(index)) = free_list.binary_search(&offset);
free_list.insert(index, offset);
let size = BuddyAllocator::MIN_NODE_SIZE << min_order;
self.free_size.set(self.free_size.get() + size);
break;
}
}
}
}
#[inline]
fn free_size(&self) -> DeviceSize {
self.free_size.get()
}
#[inline]
fn cleanup(&mut self) {}
}
#[derive(Debug)]
struct BuddyAllocatorState {
free_list: ArrayVec<Vec<DeviceSize>, { BuddyAllocator::MAX_ORDERS }>,
}
#[derive(Debug)]
pub struct BumpAllocator {
region: Region,
free_start: Cell<DeviceSize>,
prev_allocation_type: Cell<AllocationType>,
}
impl BumpAllocator {
#[inline]
pub fn reset(&mut self) {
*self.free_start.get_mut() = 0;
*self.prev_allocation_type.get_mut() = AllocationType::Unknown;
}
}
unsafe impl Suballocator for BumpAllocator {
fn new(region: Region) -> Self {
BumpAllocator {
region,
free_start: Cell::new(0),
prev_allocation_type: Cell::new(AllocationType::Unknown),
}
}
#[inline]
fn allocate(
&self,
layout: DeviceLayout,
allocation_type: AllocationType,
buffer_image_granularity: DeviceAlignment,
) -> Result<Suballocation, SuballocatorError> {
fn has_granularity_conflict(prev_ty: AllocationType, ty: AllocationType) -> bool {
prev_ty == AllocationType::Unknown || prev_ty != ty
}
let size = layout.size();
let alignment = layout.alignment();
let prev_end = self.region.offset() + self.free_start.get();
let mut offset = align_up(prev_end, alignment);
if buffer_image_granularity != DeviceAlignment::MIN
&& prev_end > 0
&& are_blocks_on_same_page(0, prev_end, offset, buffer_image_granularity)
&& has_granularity_conflict(self.prev_allocation_type.get(), allocation_type)
{
offset = align_up(offset, buffer_image_granularity);
}
let relative_offset = offset - self.region.offset();
let free_start = relative_offset + size;
if free_start > self.region.size() {
return Err(SuballocatorError::OutOfRegionMemory);
}
self.free_start.set(free_start);
self.prev_allocation_type.set(allocation_type);
Ok(Suballocation {
offset,
size,
allocation_type,
handle: AllocationHandle::null(),
})
}
#[inline]
unsafe fn deallocate(&self, _suballocation: Suballocation) {
}
#[inline]
fn free_size(&self) -> DeviceSize {
self.region.size() - self.free_start.get()
}
#[inline]
fn cleanup(&mut self) {
self.reset();
}
}
fn are_blocks_on_same_page(
a_offset: DeviceSize,
a_size: DeviceSize,
b_offset: DeviceSize,
page_size: DeviceAlignment,
) -> bool {
debug_assert!(a_offset + a_size > 0);
debug_assert!(a_offset + a_size <= b_offset);
let a_end = a_offset + a_size - 1;
let a_end_page = align_down(a_end, page_size);
let b_start_page = align_down(b_offset, page_size);
a_end_page == b_start_page
}
mod host {
use std::num::NonZeroUsize;
#[derive(Debug)]
pub(super) struct PoolAllocator<T> {
pool: Vec<T>,
free_list: Vec<SlotId>,
}
impl<T> PoolAllocator<T> {
pub fn new(capacity: usize) -> Self {
debug_assert!(capacity > 0);
PoolAllocator {
pool: Vec::with_capacity(capacity),
free_list: Vec::new(),
}
}
pub fn allocate(&mut self, val: T) -> SlotId {
if let Some(id) = self.free_list.pop() {
*unsafe { self.get_mut(id) } = val;
id
} else {
self.pool.push(val);
SlotId(unsafe { NonZeroUsize::new_unchecked(self.pool.len()) })
}
}
pub unsafe fn free(&mut self, id: SlotId) {
debug_assert!(!self.free_list.contains(&id));
self.free_list.push(id);
}
pub unsafe fn get_mut(&mut self, id: SlotId) -> &mut T {
debug_assert!(!self.free_list.contains(&id));
debug_assert!(id.0.get() <= self.pool.len());
self.pool.get_unchecked_mut(id.0.get() - 1)
}
}
impl<T: Copy> PoolAllocator<T> {
pub unsafe fn get(&self, id: SlotId) -> T {
debug_assert!(!self.free_list.contains(&id));
debug_assert!(id.0.get() <= self.pool.len());
*self.pool.get_unchecked(id.0.get() - 1)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub(super) struct SlotId(NonZeroUsize);
impl SlotId {
pub unsafe fn new(val: usize) -> Self {
SlotId(NonZeroUsize::new(val).unwrap())
}
pub fn get(self) -> usize {
self.0.get()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam_queue::ArrayQueue;
use parking_lot::Mutex;
use std::thread;
const fn unwrap<T: Copy>(opt: Option<T>) -> T {
match opt {
Some(x) => x,
None => panic!(),
}
}
const DUMMY_LAYOUT: DeviceLayout = unwrap(DeviceLayout::from_size_alignment(1, 1));
#[test]
fn free_list_allocator_capacity() {
const THREADS: DeviceSize = 12;
const ALLOCATIONS_PER_THREAD: DeviceSize = 100;
const ALLOCATION_STEP: DeviceSize = 117;
const REGION_SIZE: DeviceSize =
(ALLOCATION_STEP * (THREADS + 1) * THREADS / 2) * ALLOCATIONS_PER_THREAD;
let allocator = Mutex::new(FreeListAllocator::new(Region::new(0, REGION_SIZE).unwrap()));
let allocs = ArrayQueue::new((ALLOCATIONS_PER_THREAD * THREADS) as usize);
thread::scope(|scope| {
for i in 1..=THREADS {
let (allocator, allocs) = (&allocator, &allocs);
scope.spawn(move || {
let layout = DeviceLayout::from_size_alignment(i * ALLOCATION_STEP, 1).unwrap();
for _ in 0..ALLOCATIONS_PER_THREAD {
allocs
.push(
allocator
.lock()
.allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN)
.unwrap(),
)
.unwrap();
}
});
}
});
let allocator = allocator.into_inner();
assert!(allocator
.allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN)
.is_err());
assert!(allocator.free_size() == 0);
for alloc in allocs {
unsafe { allocator.deallocate(alloc) };
}
assert!(allocator.free_size() == REGION_SIZE);
let alloc = allocator
.allocate(
DeviceLayout::from_size_alignment(REGION_SIZE, 1).unwrap(),
AllocationType::Unknown,
DeviceAlignment::MIN,
)
.unwrap();
unsafe { allocator.deallocate(alloc) };
}
#[test]
fn free_list_allocator_respects_alignment() {
const REGION_SIZE: DeviceSize = 10 * 256;
const LAYOUT: DeviceLayout = unwrap(DeviceLayout::from_size_alignment(1, 256));
let allocator = FreeListAllocator::new(Region::new(0, REGION_SIZE).unwrap());
let mut allocs = Vec::with_capacity(10);
for _ in 0..10 {
allocs.push(
allocator
.allocate(LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN)
.unwrap(),
);
}
assert!(allocator
.allocate(LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN)
.is_err());
assert!(allocator.free_size() == REGION_SIZE - 10);
for alloc in allocs.drain(..) {
unsafe { allocator.deallocate(alloc) };
}
}
#[test]
fn free_list_allocator_respects_granularity() {
const GRANULARITY: DeviceAlignment = unwrap(DeviceAlignment::new(16));
const REGION_SIZE: DeviceSize = 2 * GRANULARITY.as_devicesize();
let allocator = FreeListAllocator::new(Region::new(0, REGION_SIZE).unwrap());
let mut linear_allocs = Vec::with_capacity(REGION_SIZE as usize / 2);
let mut nonlinear_allocs = Vec::with_capacity(REGION_SIZE as usize / 2);
for i in 0..REGION_SIZE {
if i % 2 == 0 {
linear_allocs.push(
allocator
.allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY)
.unwrap(),
);
} else {
nonlinear_allocs.push(
allocator
.allocate(DUMMY_LAYOUT, AllocationType::NonLinear, GRANULARITY)
.unwrap(),
);
}
}
assert!(allocator
.allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY)
.is_err());
assert!(allocator.free_size() == 0);
for alloc in linear_allocs.drain(..) {
unsafe { allocator.deallocate(alloc) };
}
let alloc = allocator
.allocate(
DeviceLayout::from_size_alignment(GRANULARITY.as_devicesize(), 1).unwrap(),
AllocationType::Unknown,
GRANULARITY,
)
.unwrap();
unsafe { allocator.deallocate(alloc) };
let alloc = allocator
.allocate(DUMMY_LAYOUT, AllocationType::Unknown, GRANULARITY)
.unwrap();
assert!(allocator
.allocate(DUMMY_LAYOUT, AllocationType::Unknown, GRANULARITY)
.is_err());
assert!(allocator
.allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY)
.is_err());
unsafe { allocator.deallocate(alloc) };
for alloc in nonlinear_allocs.drain(..) {
unsafe { allocator.deallocate(alloc) };
}
}
#[test]
fn buddy_allocator_capacity() {
const MAX_ORDER: usize = 10;
const REGION_SIZE: DeviceSize = BuddyAllocator::MIN_NODE_SIZE << MAX_ORDER;
let allocator = BuddyAllocator::new(Region::new(0, REGION_SIZE).unwrap());
let mut allocs = Vec::with_capacity(1 << MAX_ORDER);
for order in 0..=MAX_ORDER {
let layout =
DeviceLayout::from_size_alignment(BuddyAllocator::MIN_NODE_SIZE << order, 1)
.unwrap();
for _ in 0..1 << (MAX_ORDER - order) {
allocs.push(
allocator
.allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN)
.unwrap(),
);
}
assert!(allocator
.allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN)
.is_err());
assert!(allocator.free_size() == 0);
for alloc in allocs.drain(..) {
unsafe { allocator.deallocate(alloc) };
}
}
let mut orders = (0..MAX_ORDER).collect::<Vec<_>>();
for mid in 0..MAX_ORDER {
orders.rotate_left(mid);
for &order in &orders {
let layout =
DeviceLayout::from_size_alignment(BuddyAllocator::MIN_NODE_SIZE << order, 1)
.unwrap();
allocs.push(
allocator
.allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN)
.unwrap(),
);
}
let alloc = allocator
.allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN)
.unwrap();
assert!(allocator
.allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN)
.is_err());
assert!(allocator.free_size() == 0);
unsafe { allocator.deallocate(alloc) };
for alloc in allocs.drain(..) {
unsafe { allocator.deallocate(alloc) };
}
}
}
#[test]
fn buddy_allocator_respects_alignment() {
const REGION_SIZE: DeviceSize = 4096;
let allocator = BuddyAllocator::new(Region::new(0, REGION_SIZE).unwrap());
{
let layout = DeviceLayout::from_size_alignment(1, 4096).unwrap();
let alloc = allocator
.allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN)
.unwrap();
assert!(allocator
.allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN)
.is_err());
assert!(allocator.free_size() == REGION_SIZE - BuddyAllocator::MIN_NODE_SIZE);
unsafe { allocator.deallocate(alloc) };
}
{
let layout_a = DeviceLayout::from_size_alignment(1, 256).unwrap();
let allocations_a = REGION_SIZE / layout_a.alignment().as_devicesize();
let layout_b = DeviceLayout::from_size_alignment(1, 16).unwrap();
let allocations_b = REGION_SIZE / layout_b.alignment().as_devicesize() - allocations_a;
let mut allocs =
Vec::with_capacity((REGION_SIZE / BuddyAllocator::MIN_NODE_SIZE) as usize);
for _ in 0..allocations_a {
allocs.push(
allocator
.allocate(layout_a, AllocationType::Unknown, DeviceAlignment::MIN)
.unwrap(),
);
}
assert!(allocator
.allocate(layout_a, AllocationType::Unknown, DeviceAlignment::MIN)
.is_err());
assert!(
allocator.free_size()
== REGION_SIZE - allocations_a * BuddyAllocator::MIN_NODE_SIZE
);
for _ in 0..allocations_b {
allocs.push(
allocator
.allocate(layout_b, AllocationType::Unknown, DeviceAlignment::MIN)
.unwrap(),
);
}
assert!(allocator
.allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN)
.is_err());
assert!(allocator.free_size() == 0);
for alloc in allocs {
unsafe { allocator.deallocate(alloc) };
}
}
}
#[test]
fn buddy_allocator_respects_granularity() {
const GRANULARITY: DeviceAlignment = unwrap(DeviceAlignment::new(256));
const REGION_SIZE: DeviceSize = 2 * GRANULARITY.as_devicesize();
let allocator = BuddyAllocator::new(Region::new(0, REGION_SIZE).unwrap());
{
const ALLOCATIONS: DeviceSize = REGION_SIZE / BuddyAllocator::MIN_NODE_SIZE;
let mut allocs = Vec::with_capacity(ALLOCATIONS as usize);
for _ in 0..ALLOCATIONS {
allocs.push(
allocator
.allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY)
.unwrap(),
);
}
assert!(allocator
.allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY)
.is_err());
assert!(allocator.free_size() == 0);
for alloc in allocs {
unsafe { allocator.deallocate(alloc) };
}
}
{
let alloc1 = allocator
.allocate(DUMMY_LAYOUT, AllocationType::Unknown, GRANULARITY)
.unwrap();
let alloc2 = allocator
.allocate(DUMMY_LAYOUT, AllocationType::Unknown, GRANULARITY)
.unwrap();
assert!(allocator
.allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY)
.is_err());
assert!(allocator.free_size() == 0);
unsafe { allocator.deallocate(alloc1) };
unsafe { allocator.deallocate(alloc2) };
}
}
#[test]
fn bump_allocator_respects_alignment() {
const ALIGNMENT: DeviceSize = 16;
const REGION_SIZE: DeviceSize = 10 * ALIGNMENT;
let layout = DeviceLayout::from_size_alignment(1, ALIGNMENT).unwrap();
let mut allocator = BumpAllocator::new(Region::new(0, REGION_SIZE).unwrap());
for _ in 0..10 {
allocator
.allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN)
.unwrap();
}
assert!(allocator
.allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN)
.is_err());
for _ in 0..ALIGNMENT - 1 {
allocator
.allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN)
.unwrap();
}
assert!(allocator
.allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN)
.is_err());
assert!(allocator.free_size() == 0);
allocator.reset();
assert!(allocator.free_size() == REGION_SIZE);
}
#[test]
fn bump_allocator_respects_granularity() {
const ALLOCATIONS: DeviceSize = 10;
const GRANULARITY: DeviceAlignment = unwrap(DeviceAlignment::new(1024));
const REGION_SIZE: DeviceSize = ALLOCATIONS * GRANULARITY.as_devicesize();
let mut allocator = BumpAllocator::new(Region::new(0, REGION_SIZE).unwrap());
for i in 0..ALLOCATIONS {
for _ in 0..GRANULARITY.as_devicesize() {
allocator
.allocate(
DUMMY_LAYOUT,
if i % 2 == 0 {
AllocationType::NonLinear
} else {
AllocationType::Linear
},
GRANULARITY,
)
.unwrap();
}
}
assert!(allocator
.allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY)
.is_err());
assert!(allocator.free_size() == 0);
allocator.reset();
for i in 0..ALLOCATIONS {
allocator
.allocate(
DUMMY_LAYOUT,
if i % 2 == 0 {
AllocationType::Linear
} else {
AllocationType::NonLinear
},
GRANULARITY,
)
.unwrap();
}
assert!(allocator
.allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY)
.is_err());
assert!(allocator.free_size() == GRANULARITY.as_devicesize() - 1);
allocator.reset();
assert!(allocator.free_size() == REGION_SIZE);
}
}