use super::{
pool::{
CommandBufferAllocateInfo, CommandPool, CommandPoolAlloc, CommandPoolCreateInfo,
CommandPoolResetFlags,
},
CommandBufferLevel,
};
use crate::{
device::{Device, DeviceOwned},
instance::InstanceOwnedDebugWrapper,
Validated, VulkanError,
};
use crossbeam_queue::ArrayQueue;
use smallvec::{IntoIter, SmallVec};
use std::{
cell::{Cell, UnsafeCell},
error::Error,
fmt::Display,
marker::PhantomData,
mem::ManuallyDrop,
sync::Arc,
thread,
};
use thread_local::ThreadLocal;
const MAX_POOLS: usize = 32;
pub unsafe trait CommandBufferAllocator: DeviceOwned {
type Iter: Iterator<Item = Self::Builder>;
type Builder: CommandBufferBuilderAlloc<Alloc = Self::Alloc>;
type Alloc: CommandBufferAlloc;
fn allocate(
&self,
queue_family_index: u32,
level: CommandBufferLevel,
command_buffer_count: u32,
) -> Result<Self::Iter, VulkanError>;
}
pub unsafe trait CommandBufferBuilderAlloc: DeviceOwned {
type Alloc: CommandBufferAlloc;
fn inner(&self) -> &CommandPoolAlloc;
fn into_alloc(self) -> Self::Alloc;
fn queue_family_index(&self) -> u32;
}
pub unsafe trait CommandBufferAlloc: DeviceOwned + Send + Sync + 'static {
fn inner(&self) -> &CommandPoolAlloc;
fn queue_family_index(&self) -> u32;
}
#[derive(Debug)]
pub struct StandardCommandBufferAllocator {
device: InstanceOwnedDebugWrapper<Arc<Device>>,
pools: ThreadLocal<SmallVec<[UnsafeCell<Option<Entry>>; 8]>>,
create_info: StandardCommandBufferAllocatorCreateInfo,
}
impl StandardCommandBufferAllocator {
#[inline]
pub fn new(device: Arc<Device>, create_info: StandardCommandBufferAllocatorCreateInfo) -> Self {
StandardCommandBufferAllocator {
device: InstanceOwnedDebugWrapper(device),
pools: ThreadLocal::new(),
create_info,
}
}
#[inline]
pub fn try_reset_pool(
&self,
queue_family_index: u32,
flags: CommandPoolResetFlags,
) -> Result<(), Validated<ResetCommandPoolError>> {
if let Some(entry) = unsafe { &mut *self.entry(queue_family_index) }.as_mut() {
entry.try_reset_pool(flags)
} else {
Ok(())
}
}
#[inline]
pub fn clear(&self, queue_family_index: u32) {
unsafe { *self.entry(queue_family_index) = None };
}
fn entry(&self, queue_family_index: u32) -> *mut Option<Entry> {
let pools = self.pools.get_or(|| {
self.device
.physical_device()
.queue_family_properties()
.iter()
.map(|_| UnsafeCell::new(None))
.collect()
});
pools[queue_family_index as usize].get()
}
}
unsafe impl CommandBufferAllocator for StandardCommandBufferAllocator {
type Iter = IntoIter<[StandardCommandBufferBuilderAlloc; 1]>;
type Builder = StandardCommandBufferBuilderAlloc;
type Alloc = StandardCommandBufferAlloc;
#[inline]
fn allocate(
&self,
queue_family_index: u32,
level: CommandBufferLevel,
command_buffer_count: u32,
) -> Result<Self::Iter, VulkanError> {
assert!(self
.device
.active_queue_family_indices()
.contains(&queue_family_index));
let entry = unsafe { &mut *self.entry(queue_family_index) };
if entry.is_none() {
let reserve = Arc::new(ArrayQueue::new(MAX_POOLS));
*entry = Some(Entry {
pool: Pool::new(
self.device.clone(),
queue_family_index,
reserve.clone(),
&self.create_info,
)?,
reserve,
});
}
let entry = entry.as_mut().unwrap();
if let Some(allocs) = entry.pool.allocate(level, command_buffer_count) {
return Ok(allocs);
}
if entry
.try_reset_pool(CommandPoolResetFlags::empty())
.is_err()
{
entry.pool = if let Some(inner) = entry.reserve.pop() {
Arc::new(Pool {
inner: ManuallyDrop::new(inner),
reserve: entry.reserve.clone(),
})
} else {
Pool::new(
self.device.clone(),
queue_family_index,
entry.reserve.clone(),
&self.create_info,
)?
};
}
Ok(entry.pool.allocate(level, command_buffer_count).unwrap())
}
}
unsafe impl<T: CommandBufferAllocator> CommandBufferAllocator for Arc<T> {
type Iter = T::Iter;
type Builder = T::Builder;
type Alloc = T::Alloc;
#[inline]
fn allocate(
&self,
queue_family_index: u32,
level: CommandBufferLevel,
command_buffer_count: u32,
) -> Result<Self::Iter, VulkanError> {
(**self).allocate(queue_family_index, level, command_buffer_count)
}
}
unsafe impl DeviceOwned for StandardCommandBufferAllocator {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
#[derive(Debug)]
struct Entry {
pool: Arc<Pool>,
reserve: Arc<ArrayQueue<PoolInner>>,
}
unsafe impl Send for Entry {}
impl Entry {
fn try_reset_pool(
&mut self,
flags: CommandPoolResetFlags,
) -> Result<(), Validated<ResetCommandPoolError>> {
if let Some(pool) = Arc::get_mut(&mut self.pool) {
unsafe {
pool.inner.inner.reset(flags).map_err(|err| match err {
Validated::Error(err) => {
Validated::Error(ResetCommandPoolError::VulkanError(err))
}
Validated::ValidationError(err) => err.into(),
})?
};
*pool.inner.primary_allocations.get_mut() = 0;
*pool.inner.secondary_allocations.get_mut() = 0;
Ok(())
} else {
Err(ResetCommandPoolError::InUse.into())
}
}
}
#[derive(Debug)]
struct Pool {
inner: ManuallyDrop<PoolInner>,
reserve: Arc<ArrayQueue<PoolInner>>,
}
#[derive(Debug)]
struct PoolInner {
inner: CommandPool,
primary_pool: Option<ArrayQueue<CommandPoolAlloc>>,
secondary_pool: Option<ArrayQueue<CommandPoolAlloc>>,
primary_allocations: Cell<usize>,
secondary_allocations: Cell<usize>,
}
impl Pool {
fn new(
device: Arc<Device>,
queue_family_index: u32,
reserve: Arc<ArrayQueue<PoolInner>>,
create_info: &StandardCommandBufferAllocatorCreateInfo,
) -> Result<Arc<Self>, VulkanError> {
let inner = CommandPool::new(
device,
CommandPoolCreateInfo {
queue_family_index,
..Default::default()
},
)
.map_err(Validated::unwrap)?;
let primary_pool = if create_info.primary_buffer_count > 0 {
let pool = ArrayQueue::new(create_info.primary_buffer_count);
for alloc in inner.allocate_command_buffers(CommandBufferAllocateInfo {
level: CommandBufferLevel::Primary,
command_buffer_count: create_info.primary_buffer_count as u32,
..Default::default()
})? {
let _ = pool.push(alloc);
}
Some(pool)
} else {
None
};
let secondary_pool = if create_info.secondary_buffer_count > 0 {
let pool = ArrayQueue::new(create_info.secondary_buffer_count);
for alloc in inner.allocate_command_buffers(CommandBufferAllocateInfo {
level: CommandBufferLevel::Secondary,
command_buffer_count: create_info.secondary_buffer_count as u32,
..Default::default()
})? {
let _ = pool.push(alloc);
}
Some(pool)
} else {
None
};
Ok(Arc::new(Pool {
inner: ManuallyDrop::new(PoolInner {
inner,
primary_pool,
secondary_pool,
primary_allocations: Cell::new(0),
secondary_allocations: Cell::new(0),
}),
reserve,
}))
}
fn allocate(
self: &Arc<Self>,
level: CommandBufferLevel,
command_buffer_count: u32,
) -> Option<IntoIter<[StandardCommandBufferBuilderAlloc; 1]>> {
let command_buffer_count = command_buffer_count as usize;
match level {
CommandBufferLevel::Primary => {
if let Some(pool) = &self.inner.primary_pool {
let count = self.inner.primary_allocations.get();
if count + command_buffer_count <= pool.capacity() {
let mut output = SmallVec::<[_; 1]>::with_capacity(command_buffer_count);
for _ in 0..command_buffer_count {
output.push(StandardCommandBufferBuilderAlloc {
inner: StandardCommandBufferAlloc {
inner: ManuallyDrop::new(pool.pop().unwrap()),
pool: self.clone(),
},
_marker: PhantomData,
});
}
self.inner
.primary_allocations
.set(count + command_buffer_count);
Some(output.into_iter())
} else if command_buffer_count > pool.capacity() {
panic!(
"command buffer count ({}) exceeds the capacity of the primary command \
buffer pool ({})",
command_buffer_count, pool.capacity(),
);
} else {
None
}
} else {
panic!(
"attempted to allocate a primary command buffer when the primary command \
buffer pool was configured to be empty",
);
}
}
CommandBufferLevel::Secondary => {
if let Some(pool) = &self.inner.secondary_pool {
let count = self.inner.secondary_allocations.get();
if count + command_buffer_count <= pool.capacity() {
let mut output = SmallVec::<[_; 1]>::with_capacity(command_buffer_count);
for _ in 0..command_buffer_count {
output.push(StandardCommandBufferBuilderAlloc {
inner: StandardCommandBufferAlloc {
inner: ManuallyDrop::new(pool.pop().unwrap()),
pool: self.clone(),
},
_marker: PhantomData,
});
}
self.inner
.secondary_allocations
.set(count + command_buffer_count);
Some(output.into_iter())
} else if command_buffer_count > pool.capacity() {
panic!(
"command buffer count ({}) exceeds the capacity of the secondary \
command buffer pool ({})",
command_buffer_count,
pool.capacity(),
);
} else {
None
}
} else {
panic!(
"attempted to allocate a secondary command buffer when the secondary \
command buffer pool was configured to be empty",
);
}
}
}
}
}
impl Drop for Pool {
fn drop(&mut self) {
let inner = unsafe { ManuallyDrop::take(&mut self.inner) };
if thread::panicking() {
return;
}
unsafe { inner.inner.reset(CommandPoolResetFlags::empty()) }.unwrap();
inner.primary_allocations.set(0);
inner.secondary_allocations.set(0);
let _ = self.reserve.push(inner);
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct StandardCommandBufferAllocatorCreateInfo {
pub primary_buffer_count: usize,
pub secondary_buffer_count: usize,
pub _ne: crate::NonExhaustive,
}
impl Default for StandardCommandBufferAllocatorCreateInfo {
#[inline]
fn default() -> Self {
StandardCommandBufferAllocatorCreateInfo {
primary_buffer_count: 32,
secondary_buffer_count: 0,
_ne: crate::NonExhaustive(()),
}
}
}
pub struct StandardCommandBufferBuilderAlloc {
inner: StandardCommandBufferAlloc,
_marker: PhantomData<*const ()>,
}
unsafe impl CommandBufferBuilderAlloc for StandardCommandBufferBuilderAlloc {
type Alloc = StandardCommandBufferAlloc;
#[inline]
fn inner(&self) -> &CommandPoolAlloc {
self.inner.inner()
}
#[inline]
fn into_alloc(self) -> Self::Alloc {
self.inner
}
#[inline]
fn queue_family_index(&self) -> u32 {
self.inner.queue_family_index()
}
}
unsafe impl DeviceOwned for StandardCommandBufferBuilderAlloc {
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
pub struct StandardCommandBufferAlloc {
inner: ManuallyDrop<CommandPoolAlloc>,
pool: Arc<Pool>,
}
unsafe impl Send for StandardCommandBufferAlloc {}
unsafe impl Sync for StandardCommandBufferAlloc {}
unsafe impl CommandBufferAlloc for StandardCommandBufferAlloc {
#[inline]
fn inner(&self) -> &CommandPoolAlloc {
&self.inner
}
#[inline]
fn queue_family_index(&self) -> u32 {
self.pool.inner.inner.queue_family_index()
}
}
unsafe impl DeviceOwned for StandardCommandBufferAlloc {
#[inline]
fn device(&self) -> &Arc<Device> {
self.pool.inner.inner.device()
}
}
impl Drop for StandardCommandBufferAlloc {
#[inline]
fn drop(&mut self) {
let inner = unsafe { ManuallyDrop::take(&mut self.inner) };
let pool = match inner.level() {
CommandBufferLevel::Primary => &self.pool.inner.primary_pool,
CommandBufferLevel::Secondary => &self.pool.inner.secondary_pool,
};
let _ = pool.as_ref().unwrap().push(inner);
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ResetCommandPoolError {
VulkanError(VulkanError),
InUse,
}
impl Error for ResetCommandPoolError {}
impl Display for ResetCommandPoolError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::VulkanError(_) => write!(f, "a runtime error occurred"),
Self::InUse => write!(f, "the command pool is still in use"),
}
}
}
impl From<VulkanError> for ResetCommandPoolError {
fn from(err: VulkanError) -> Self {
Self::VulkanError(err)
}
}
impl From<ResetCommandPoolError> for Validated<ResetCommandPoolError> {
fn from(err: ResetCommandPoolError) -> Self {
Self::Error(err)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::VulkanObject;
use std::thread;
#[test]
fn threads_use_different_pools() {
let (device, queue) = gfx_dev_and_queue!();
let allocator = StandardCommandBufferAllocator::new(device, Default::default());
let pool1 = allocator
.allocate(queue.queue_family_index(), CommandBufferLevel::Primary, 1)
.unwrap()
.next()
.unwrap()
.into_alloc()
.pool
.inner
.inner
.handle();
thread::spawn(move || {
let pool2 = allocator
.allocate(queue.queue_family_index(), CommandBufferLevel::Primary, 1)
.unwrap()
.next()
.unwrap()
.into_alloc()
.pool
.inner
.inner
.handle();
assert_ne!(pool1, pool2);
})
.join()
.unwrap();
}
}