pub use self::builder::*;
pub(in crate::command_buffer) use self::builder::{
BeginRenderPassState, BeginRenderingState, QueryState, RenderPassState,
RenderPassStateAttachments, RenderPassStateType, SetOrPush,
};
use super::{
allocator::{CommandBufferAllocator, StandardCommandBufferAllocator},
sys::{UnsafeCommandBuffer, UnsafeCommandBufferBuilder},
CommandBufferInheritanceInfo, CommandBufferResourcesUsage, CommandBufferState,
CommandBufferUsage, PrimaryCommandBufferAbstract, ResourceInCommand,
SecondaryCommandBufferAbstract, SecondaryCommandBufferResourcesUsage, SecondaryResourceUseRef,
};
use crate::{
buffer::Subbuffer,
device::{Device, DeviceOwned},
image::{Image, ImageLayout, ImageSubresourceRange},
sync::PipelineStageAccessFlags,
DeviceSize, ValidationError, VulkanObject,
};
use parking_lot::{Mutex, MutexGuard};
use std::{
fmt::{Debug, Error as FmtError, Formatter},
ops::Range,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
mod builder;
pub struct PrimaryAutoCommandBuffer<A = StandardCommandBufferAllocator>
where
A: CommandBufferAllocator,
{
inner: UnsafeCommandBuffer<A>,
_keep_alive_objects:
Vec<Box<dyn Fn(&mut UnsafeCommandBufferBuilder<A>) + Send + Sync + 'static>>,
resources_usage: CommandBufferResourcesUsage,
state: Mutex<CommandBufferState>,
}
unsafe impl<A> VulkanObject for PrimaryAutoCommandBuffer<A>
where
A: CommandBufferAllocator,
{
type Handle = ash::vk::CommandBuffer;
fn handle(&self) -> Self::Handle {
self.inner.handle()
}
}
unsafe impl<A> DeviceOwned for PrimaryAutoCommandBuffer<A>
where
A: CommandBufferAllocator,
{
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
unsafe impl<A> PrimaryCommandBufferAbstract for PrimaryAutoCommandBuffer<A>
where
A: CommandBufferAllocator,
{
fn usage(&self) -> CommandBufferUsage {
self.inner.usage()
}
fn state(&self) -> MutexGuard<'_, CommandBufferState> {
self.state.lock()
}
fn resources_usage(&self) -> &CommandBufferResourcesUsage {
&self.resources_usage
}
}
pub struct SecondaryAutoCommandBuffer<A = StandardCommandBufferAllocator>
where
A: CommandBufferAllocator,
{
inner: UnsafeCommandBuffer<A>,
_keep_alive_objects:
Vec<Box<dyn Fn(&mut UnsafeCommandBufferBuilder<A>) + Send + Sync + 'static>>,
resources_usage: SecondaryCommandBufferResourcesUsage,
submit_state: SubmitState,
}
unsafe impl<A> VulkanObject for SecondaryAutoCommandBuffer<A>
where
A: CommandBufferAllocator,
{
type Handle = ash::vk::CommandBuffer;
fn handle(&self) -> Self::Handle {
self.inner.handle()
}
}
unsafe impl<A> DeviceOwned for SecondaryAutoCommandBuffer<A>
where
A: CommandBufferAllocator,
{
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
unsafe impl<A> SecondaryCommandBufferAbstract for SecondaryAutoCommandBuffer<A>
where
A: CommandBufferAllocator,
{
fn usage(&self) -> CommandBufferUsage {
self.inner.usage()
}
fn inheritance_info(&self) -> &CommandBufferInheritanceInfo {
self.inner.inheritance_info().as_ref().unwrap()
}
fn lock_record(&self) -> Result<(), Box<ValidationError>> {
match self.submit_state {
SubmitState::OneTime {
ref already_submitted,
} => {
let was_already_submitted = already_submitted.swap(true, Ordering::SeqCst);
if was_already_submitted {
return Err(Box::new(ValidationError {
problem: "the command buffer was created with the \
`CommandBufferUsage::OneTimeSubmit` usage, but \
it was already submitted before"
.into(),
..Default::default()
}));
}
}
SubmitState::ExclusiveUse { ref in_use } => {
let already_in_use = in_use.swap(true, Ordering::SeqCst);
if already_in_use {
return Err(Box::new(ValidationError {
problem: "the command buffer was created with the \
`CommandBufferUsage::MultipleSubmit` usage, but \
it is currently being executed"
.into(),
..Default::default()
}));
}
}
SubmitState::Concurrent => (),
};
Ok(())
}
unsafe fn unlock(&self) {
match self.submit_state {
SubmitState::OneTime {
ref already_submitted,
} => {
debug_assert!(already_submitted.load(Ordering::SeqCst));
}
SubmitState::ExclusiveUse { ref in_use } => {
let old_val = in_use.swap(false, Ordering::SeqCst);
debug_assert!(old_val);
}
SubmitState::Concurrent => (),
};
}
fn resources_usage(&self) -> &SecondaryCommandBufferResourcesUsage {
&self.resources_usage
}
}
#[derive(Debug)]
enum SubmitState {
Concurrent,
ExclusiveUse {
in_use: AtomicBool,
},
OneTime {
already_submitted: AtomicBool,
},
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub(in crate::command_buffer) struct ResourceUseRef2 {
pub(in crate::command_buffer) resource_in_command: ResourceInCommand,
pub(in crate::command_buffer) secondary_use_ref: Option<SecondaryResourceUseRef>,
}
impl From<ResourceInCommand> for ResourceUseRef2 {
#[inline]
fn from(resource_in_command: ResourceInCommand) -> Self {
Self {
resource_in_command,
secondary_use_ref: None,
}
}
}
#[derive(Clone, Debug)]
pub(super) enum Resource {
Buffer {
buffer: Subbuffer<[u8]>,
range: Range<DeviceSize>,
memory_access: PipelineStageAccessFlags,
},
Image {
image: Arc<Image>,
subresource_range: ImageSubresourceRange,
memory_access: PipelineStageAccessFlags,
start_layout: ImageLayout,
end_layout: ImageLayout,
},
}
struct CommandInfo {
name: &'static str,
used_resources: Vec<(ResourceUseRef2, Resource)>,
render_pass: RenderPassCommand,
}
#[derive(Debug)]
enum RenderPassCommand {
None,
Begin,
End,
}
impl Debug for CommandInfo {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
f.write_str(self.name)
}
}
#[cfg(test)]
mod tests {
use crate::{
buffer::{Buffer, BufferCreateInfo, BufferUsage},
command_buffer::{
allocator::{StandardCommandBufferAllocator, StandardCommandBufferAllocatorCreateInfo},
AutoCommandBufferBuilder, BufferCopy, CommandBufferUsage, CopyBufferInfoTyped,
PrimaryCommandBufferAbstract,
},
descriptor_set::{
allocator::StandardDescriptorSetAllocator,
layout::{
DescriptorSetLayout, DescriptorSetLayoutBinding, DescriptorSetLayoutCreateInfo,
DescriptorType,
},
PersistentDescriptorSet, WriteDescriptorSet,
},
device::{Device, DeviceCreateInfo, QueueCreateInfo},
image::sampler::{Sampler, SamplerCreateInfo},
memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator},
pipeline::{layout::PipelineLayoutCreateInfo, PipelineBindPoint, PipelineLayout},
shader::ShaderStages,
sync::GpuFuture,
};
use std::sync::Arc;
#[test]
fn basic_creation() {
let (device, queue) = gfx_dev_and_queue!();
let allocator = StandardCommandBufferAllocator::new(device, Default::default());
AutoCommandBufferBuilder::primary(
&allocator,
queue.queue_family_index(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
}
#[test]
fn copy_buffer_dimensions() {
let instance = instance!();
let physical_device = match instance.enumerate_physical_devices().unwrap().next() {
Some(p) => p,
None => return,
};
let (device, mut queues) = Device::new(
physical_device,
DeviceCreateInfo {
queue_create_infos: vec![QueueCreateInfo {
queue_family_index: 0,
..Default::default()
}],
..Default::default()
},
)
.unwrap();
let queue = queues.next().unwrap();
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let source = Buffer::from_iter(
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_HOST
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
[1_u32, 2].iter().copied(),
)
.unwrap();
let destination = Buffer::from_iter(
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_DST,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_HOST
| MemoryTypeFilter::HOST_RANDOM_ACCESS,
..Default::default()
},
[0_u32, 10, 20, 3, 4].iter().copied(),
)
.unwrap();
let cb_allocator = StandardCommandBufferAllocator::new(device, Default::default());
let mut cbb = AutoCommandBufferBuilder::primary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
cbb.copy_buffer(CopyBufferInfoTyped {
regions: [BufferCopy {
src_offset: 0,
dst_offset: 1,
size: 2,
..Default::default()
}]
.into(),
..CopyBufferInfoTyped::buffers(source, destination.clone())
})
.unwrap();
let cb = cbb.build().unwrap();
let future = cb
.execute(queue)
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
future.wait(None).unwrap();
let result = destination.read().unwrap();
assert_eq!(*result, [0_u32, 1, 2, 3, 4]);
}
#[test]
fn secondary_nonconcurrent_conflict() {
let (device, queue) = gfx_dev_and_queue!();
let cb_allocator = StandardCommandBufferAllocator::new(
device,
StandardCommandBufferAllocatorCreateInfo {
secondary_buffer_count: 1,
..Default::default()
},
);
let builder = AutoCommandBufferBuilder::secondary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::MultipleSubmit,
Default::default(),
)
.unwrap();
let secondary = builder.build().unwrap();
{
let mut builder = AutoCommandBufferBuilder::primary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::SimultaneousUse,
)
.unwrap();
builder.execute_commands(secondary.clone()).unwrap();
assert!(builder.execute_commands(secondary.clone()).is_err());
}
{
let mut builder = AutoCommandBufferBuilder::primary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::SimultaneousUse,
)
.unwrap();
builder.execute_commands(secondary.clone()).unwrap();
let cb1 = builder.build().unwrap();
let mut builder = AutoCommandBufferBuilder::primary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::SimultaneousUse,
)
.unwrap();
assert!(builder.execute_commands(secondary.clone()).is_err());
std::mem::drop(cb1);
builder.execute_commands(secondary).unwrap();
}
}
#[test]
fn buffer_self_copy_overlapping() {
let (device, queue) = gfx_dev_and_queue!();
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let source = Buffer::from_iter(
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC | BufferUsage::TRANSFER_DST,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_HOST
| MemoryTypeFilter::HOST_RANDOM_ACCESS,
..Default::default()
},
[0_u32, 1, 2, 3].iter().copied(),
)
.unwrap();
let cb_allocator = StandardCommandBufferAllocator::new(device, Default::default());
let mut builder = AutoCommandBufferBuilder::primary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
builder
.copy_buffer(CopyBufferInfoTyped {
regions: [BufferCopy {
src_offset: 0,
dst_offset: 2,
size: 2,
..Default::default()
}]
.into(),
..CopyBufferInfoTyped::buffers(source.clone(), source.clone())
})
.unwrap();
let cb = builder.build().unwrap();
let future = cb
.execute(queue)
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
future.wait(None).unwrap();
let result = source.read().unwrap();
assert_eq!(*result, [0_u32, 1, 0, 1]);
}
#[test]
fn buffer_self_copy_not_overlapping() {
let (device, queue) = gfx_dev_and_queue!();
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let source = Buffer::from_iter(
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC | BufferUsage::TRANSFER_DST,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_HOST
| MemoryTypeFilter::HOST_RANDOM_ACCESS,
..Default::default()
},
[0_u32, 1, 2, 3].iter().copied(),
)
.unwrap();
let cb_allocator = StandardCommandBufferAllocator::new(device, Default::default());
let mut builder = AutoCommandBufferBuilder::primary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
assert!(builder
.copy_buffer(CopyBufferInfoTyped {
regions: [BufferCopy {
src_offset: 0,
dst_offset: 1,
size: 2,
..Default::default()
}]
.into(),
..CopyBufferInfoTyped::buffers(source.clone(), source)
})
.is_err());
}
#[test]
fn secondary_conflicting_writes() {
unsafe {
let (device, queue) = gfx_dev_and_queue!();
let cb_allocator = StandardCommandBufferAllocator::new(
device.clone(),
StandardCommandBufferAllocatorCreateInfo {
secondary_buffer_count: 1,
..Default::default()
},
);
let cbb = AutoCommandBufferBuilder::primary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buffer = Buffer::from_data(
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_DST,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
0u32,
)
.unwrap();
cbb.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap()
.wait(None)
.unwrap();
let secondary = (0..2)
.map(|_| {
let mut builder = AutoCommandBufferBuilder::secondary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::SimultaneousUse,
Default::default(),
)
.unwrap();
builder
.fill_buffer(buffer.clone().into_slice(), 42)
.unwrap();
builder.build().unwrap()
})
.collect::<Vec<_>>();
{
let mut builder = AutoCommandBufferBuilder::primary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::SimultaneousUse,
)
.unwrap();
secondary.iter().cloned().for_each(|secondary| {
builder.execute_commands_unchecked([secondary as _].into_iter().collect());
});
let _primary = builder.build().unwrap();
}
{
let mut builder = AutoCommandBufferBuilder::primary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::SimultaneousUse,
)
.unwrap();
builder.execute_commands_unchecked(
secondary
.into_iter()
.map(|secondary| secondary as _)
.collect(),
);
}
}
}
#[test]
fn vertex_buffer_binding() {
unsafe {
let (device, queue) = gfx_dev_and_queue!();
let cb_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
let mut sync = AutoCommandBufferBuilder::primary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buf = Buffer::from_data(
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
0u32,
)
.unwrap();
sync.bind_vertex_buffers_unchecked(1, buf);
assert!(!sync.builder_state.vertex_buffers.contains_key(&0));
assert!(sync.builder_state.vertex_buffers.contains_key(&1));
assert!(!sync.builder_state.vertex_buffers.contains_key(&2));
}
}
#[test]
fn descriptor_set_binding() {
unsafe {
let (device, queue) = gfx_dev_and_queue!();
let cb_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
let mut sync = AutoCommandBufferBuilder::primary(
&cb_allocator,
queue.queue_family_index(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
let set_layout = DescriptorSetLayout::new(
device.clone(),
DescriptorSetLayoutCreateInfo {
bindings: [(
0,
DescriptorSetLayoutBinding {
stages: ShaderStages::all_graphics(),
..DescriptorSetLayoutBinding::descriptor_type(DescriptorType::Sampler)
},
)]
.into(),
..Default::default()
},
)
.unwrap();
let pipeline_layout = PipelineLayout::new(
device.clone(),
PipelineLayoutCreateInfo {
set_layouts: [set_layout.clone(), set_layout.clone()].into(),
..Default::default()
},
)
.unwrap();
let ds_allocator =
StandardDescriptorSetAllocator::new(device.clone(), Default::default());
let set = PersistentDescriptorSet::new(
&ds_allocator,
set_layout.clone(),
[WriteDescriptorSet::sampler(
0,
Sampler::new(device.clone(), SamplerCreateInfo::simple_repeat_linear())
.unwrap(),
)],
[],
)
.unwrap();
sync.bind_descriptor_sets_unchecked(
PipelineBindPoint::Graphics,
pipeline_layout.clone(),
1,
set.clone(),
);
assert!(!sync
.builder_state
.descriptor_sets
.get(&PipelineBindPoint::Compute)
.map_or(false, |state| state.descriptor_sets.contains_key(&0)));
assert!(!sync
.builder_state
.descriptor_sets
.get(&PipelineBindPoint::Graphics)
.map_or(false, |state| state.descriptor_sets.contains_key(&0)));
assert!(sync
.builder_state
.descriptor_sets
.get(&PipelineBindPoint::Graphics)
.map_or(false, |state| state.descriptor_sets.contains_key(&1)));
assert!(!sync
.builder_state
.descriptor_sets
.get(&PipelineBindPoint::Graphics)
.map_or(false, |state| state.descriptor_sets.contains_key(&2)));
sync.bind_descriptor_sets_unchecked(
PipelineBindPoint::Graphics,
pipeline_layout,
0,
set,
);
assert!(sync
.builder_state
.descriptor_sets
.get(&PipelineBindPoint::Graphics)
.map_or(false, |state| state.descriptor_sets.contains_key(&0)));
assert!(sync
.builder_state
.descriptor_sets
.get(&PipelineBindPoint::Graphics)
.map_or(false, |state| state.descriptor_sets.contains_key(&1)));
let pipeline_layout = PipelineLayout::new(
device.clone(),
PipelineLayoutCreateInfo {
set_layouts: [
DescriptorSetLayout::new(device.clone(), Default::default()).unwrap(),
set_layout.clone(),
]
.into(),
..Default::default()
},
)
.unwrap();
let set = PersistentDescriptorSet::new(
&ds_allocator,
set_layout,
[WriteDescriptorSet::sampler(
0,
Sampler::new(device, SamplerCreateInfo::simple_repeat_linear()).unwrap(),
)],
[],
)
.unwrap();
sync.bind_descriptor_sets_unchecked(
PipelineBindPoint::Graphics,
pipeline_layout,
1,
set,
);
assert!(!sync
.builder_state
.descriptor_sets
.get(&PipelineBindPoint::Graphics)
.map_or(false, |state| state.descriptor_sets.contains_key(&0)));
assert!(sync
.builder_state
.descriptor_sets
.get(&PipelineBindPoint::Graphics)
.map_or(false, |state| state.descriptor_sets.contains_key(&1)));
}
}
}