use super::{
sys::BufferCreateInfo, AllocateBufferError, Buffer, BufferContents, BufferMemory, BufferUsage,
Subbuffer,
};
use crate::{
device::{Device, DeviceOwned, DeviceOwnedDebugWrapper},
memory::{
allocator::{
align_up, AllocationCreateInfo, DeviceLayout, MemoryAllocator, MemoryAllocatorError,
MemoryTypeFilter, StandardMemoryAllocator,
},
DeviceAlignment,
},
DeviceSize, NonZeroDeviceSize, Validated,
};
use crossbeam_queue::ArrayQueue;
use std::{
cell::UnsafeCell,
cmp,
hash::{Hash, Hasher},
mem::ManuallyDrop,
sync::Arc,
};
const MAX_ARENAS: usize = 32;
#[derive(Debug)]
pub struct SubbufferAllocator<A = StandardMemoryAllocator> {
state: UnsafeCell<SubbufferAllocatorState<A>>,
}
impl<A> SubbufferAllocator<A>
where
A: MemoryAllocator,
{
pub fn new(memory_allocator: Arc<A>, create_info: SubbufferAllocatorCreateInfo) -> Self {
let SubbufferAllocatorCreateInfo {
arena_size,
buffer_usage,
memory_type_filter,
_ne: _,
} = create_info;
let properties = memory_allocator.device().physical_device().properties();
let buffer_alignment = [
buffer_usage
.intersects(BufferUsage::UNIFORM_TEXEL_BUFFER | BufferUsage::STORAGE_TEXEL_BUFFER)
.then_some(properties.min_texel_buffer_offset_alignment),
buffer_usage
.contains(BufferUsage::UNIFORM_BUFFER)
.then_some(properties.min_uniform_buffer_offset_alignment),
buffer_usage
.contains(BufferUsage::STORAGE_BUFFER)
.then_some(properties.min_storage_buffer_offset_alignment),
]
.into_iter()
.flatten()
.max()
.unwrap_or(DeviceAlignment::MIN);
SubbufferAllocator {
state: UnsafeCell::new(SubbufferAllocatorState {
memory_allocator,
buffer_usage,
memory_type_filter,
buffer_alignment,
arena_size,
arena: None,
free_start: 0,
reserve: None,
}),
}
}
pub fn arena_size(&self) -> DeviceSize {
unsafe { &*self.state.get() }.arena_size
}
pub fn set_arena_size(&self, size: DeviceSize) {
let state = unsafe { &mut *self.state.get() };
state.arena_size = size;
state.arena = None;
state.reserve = None;
}
pub fn reserve(&self, size: DeviceSize) -> Result<(), MemoryAllocatorError> {
if size > self.arena_size() {
let state = unsafe { &mut *self.state.get() };
state.arena_size = size;
state.reserve = None;
state.arena = Some(state.next_arena()?);
}
Ok(())
}
pub fn allocate_sized<T>(&self) -> Result<Subbuffer<T>, MemoryAllocatorError>
where
T: BufferContents,
{
let layout = T::LAYOUT.unwrap_sized();
unsafe { &mut *self.state.get() }
.allocate(layout)
.map(|subbuffer| unsafe { subbuffer.reinterpret_unchecked() })
}
pub fn allocate_slice<T>(&self, len: DeviceSize) -> Result<Subbuffer<[T]>, MemoryAllocatorError>
where
T: BufferContents,
{
self.allocate_unsized(len)
}
pub fn allocate_unsized<T>(&self, len: DeviceSize) -> Result<Subbuffer<T>, MemoryAllocatorError>
where
T: BufferContents + ?Sized,
{
let len = NonZeroDeviceSize::new(len).expect("empty slices are not valid buffer contents");
let layout = T::LAYOUT.layout_for_len(len).unwrap();
unsafe { &mut *self.state.get() }
.allocate(layout)
.map(|subbuffer| unsafe { subbuffer.reinterpret_unchecked() })
}
pub fn allocate(&self, layout: DeviceLayout) -> Result<Subbuffer<[u8]>, MemoryAllocatorError> {
assert!(layout.alignment().as_devicesize() <= 64);
unsafe { &mut *self.state.get() }.allocate(layout)
}
}
unsafe impl<A> DeviceOwned for SubbufferAllocator<A>
where
A: MemoryAllocator,
{
fn device(&self) -> &Arc<Device> {
unsafe { &*self.state.get() }.memory_allocator.device()
}
}
#[derive(Debug)]
struct SubbufferAllocatorState<A> {
memory_allocator: Arc<A>,
buffer_usage: BufferUsage,
memory_type_filter: MemoryTypeFilter,
buffer_alignment: DeviceAlignment,
arena_size: DeviceSize,
arena: Option<Arc<Arena>>,
free_start: DeviceSize,
reserve: Option<Arc<ArrayQueue<Arc<Buffer>>>>,
}
impl<A> SubbufferAllocatorState<A>
where
A: MemoryAllocator,
{
fn allocate(&mut self, layout: DeviceLayout) -> Result<Subbuffer<[u8]>, MemoryAllocatorError> {
let size = layout.size();
let alignment = cmp::max(layout.alignment(), self.buffer_alignment);
loop {
if self.arena.is_none() {
if self.arena_size < size {
self.arena_size = size * 2;
self.reserve = None;
}
self.arena = Some(self.next_arena()?);
self.free_start = 0;
}
let arena = self.arena.as_ref().unwrap();
let allocation = match arena.buffer.memory() {
BufferMemory::Normal(a) => a,
BufferMemory::Sparse => unreachable!(),
};
let arena_offset = allocation.offset();
let atom_size = allocation.atom_size().unwrap_or(DeviceAlignment::MIN);
let alignment = cmp::max(alignment, atom_size);
let offset = align_up(arena_offset + self.free_start, alignment);
if offset + size <= arena_offset + self.arena_size {
let offset = offset - arena_offset;
self.free_start = offset + size;
return Ok(Subbuffer::from_arena(arena.clone(), offset, layout.size()));
}
self.arena = None;
}
}
fn next_arena(&mut self) -> Result<Arc<Arena>, MemoryAllocatorError> {
if self.reserve.is_none() {
self.reserve = Some(Arc::new(ArrayQueue::new(MAX_ARENAS)));
}
let reserve = self.reserve.as_ref().unwrap();
reserve
.pop()
.map(Ok)
.unwrap_or_else(|| self.create_arena())
.map(|buffer| {
Arc::new(Arena {
buffer: ManuallyDrop::new(DeviceOwnedDebugWrapper(buffer)),
reserve: reserve.clone(),
})
})
}
fn create_arena(&self) -> Result<Arc<Buffer>, MemoryAllocatorError> {
Buffer::new(
self.memory_allocator.clone(),
BufferCreateInfo {
usage: self.buffer_usage,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: self.memory_type_filter,
..Default::default()
},
DeviceLayout::from_size_alignment(self.arena_size, 1).unwrap(),
)
.map_err(|err| match err {
Validated::Error(AllocateBufferError::AllocateMemory(err)) => err,
_ => unreachable!("{err:?}"),
})
}
}
#[derive(Debug)]
pub(super) struct Arena {
buffer: ManuallyDrop<DeviceOwnedDebugWrapper<Arc<Buffer>>>,
reserve: Arc<ArrayQueue<Arc<Buffer>>>,
}
impl Arena {
pub(super) fn buffer(&self) -> &Arc<Buffer> {
&self.buffer
}
}
impl Drop for Arena {
fn drop(&mut self) {
let buffer = unsafe { ManuallyDrop::take(&mut self.buffer) }.0;
let _ = self.reserve.push(buffer);
}
}
impl PartialEq for Arena {
fn eq(&self, other: &Self) -> bool {
self.buffer == other.buffer
}
}
impl Eq for Arena {}
impl Hash for Arena {
fn hash<H: Hasher>(&self, state: &mut H) {
self.buffer.hash(state);
}
}
pub struct SubbufferAllocatorCreateInfo {
pub arena_size: DeviceSize,
pub buffer_usage: BufferUsage,
pub memory_type_filter: MemoryTypeFilter,
pub _ne: crate::NonExhaustive,
}
impl Default for SubbufferAllocatorCreateInfo {
#[inline]
fn default() -> Self {
SubbufferAllocatorCreateInfo {
arena_size: 0,
buffer_usage: BufferUsage::empty(),
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE,
_ne: crate::NonExhaustive(()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn reserve() {
let (device, _) = gfx_dev_and_queue!();
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buffer_allocator = SubbufferAllocator::new(
memory_allocator,
SubbufferAllocatorCreateInfo {
buffer_usage: BufferUsage::TRANSFER_SRC,
..Default::default()
},
);
assert_eq!(buffer_allocator.arena_size(), 0);
buffer_allocator.reserve(83).unwrap();
assert_eq!(buffer_allocator.arena_size(), 83);
}
#[test]
fn capacity_increase() {
let (device, _) = gfx_dev_and_queue!();
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buffer_allocator = SubbufferAllocator::new(
memory_allocator,
SubbufferAllocatorCreateInfo {
buffer_usage: BufferUsage::TRANSFER_SRC,
..Default::default()
},
);
assert_eq!(buffer_allocator.arena_size(), 0);
buffer_allocator.allocate_sized::<u32>().unwrap();
assert_eq!(buffer_allocator.arena_size(), 8);
}
}