use crate::core::Device;
use ash::vk;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum CommandPoolError {
#[error("Command pool creation failed: {0}")]
CreationFailed(vk::Result),
#[error("Command buffer allocation failed: {0}")]
AllocationFailed(vk::Result),
#[error("Command pool reset failed: {0}")]
ResetFailed(vk::Result),
}
#[derive(Debug, Error)]
pub enum CommandBufferError {
#[error("Begin recording failed: {0}")]
BeginFailed(vk::Result),
#[error("End recording failed: {0}")]
EndFailed(vk::Result),
#[error("Invalid command buffer state")]
InvalidState,
}
pub struct CommandPool {
pool: vk::CommandPool,
family_index: u32,
}
impl CommandPool {
pub fn new(
device: &Device,
queue_family_index: u32,
flags: vk::CommandPoolCreateFlags,
) -> Result<Self, CommandPoolError> {
let create_info = vk::CommandPoolCreateInfo {
queue_family_index,
flags,
..Default::default()
};
let pool = unsafe {
device
.handle()
.create_command_pool(&create_info, None)
.map_err(CommandPoolError::CreationFailed)?
};
Ok(Self {
pool,
family_index: queue_family_index,
})
}
#[inline]
pub fn handle(&self) -> vk::CommandPool {
self.pool
}
#[inline]
pub fn family_index(&self) -> u32 {
self.family_index
}
pub fn allocate(
&self,
device: &Device,
level: vk::CommandBufferLevel,
count: u32,
) -> Result<Vec<CommandBuffer>, CommandPoolError> {
let alloc_info = vk::CommandBufferAllocateInfo {
command_pool: self.pool,
level,
command_buffer_count: count,
..Default::default()
};
let buffers = unsafe {
device
.handle()
.allocate_command_buffers(&alloc_info)
.map_err(CommandPoolError::AllocationFailed)?
};
Ok(buffers
.into_iter()
.map(|buffer| CommandBuffer { buffer, level })
.collect())
}
pub fn allocate_primary(&self, device: &Device) -> Result<CommandBuffer, CommandPoolError> {
let mut buffers = self.allocate(device, vk::CommandBufferLevel::PRIMARY, 1)?;
Ok(buffers.remove(0))
}
pub fn allocate_secondary(&self, device: &Device) -> Result<CommandBuffer, CommandPoolError> {
let mut buffers = self.allocate(device, vk::CommandBufferLevel::SECONDARY, 1)?;
Ok(buffers.remove(0))
}
pub fn reset(
&self,
device: &Device,
flags: vk::CommandPoolResetFlags,
) -> Result<(), CommandPoolError> {
unsafe {
device
.handle()
.reset_command_pool(self.pool, flags)
.map_err(CommandPoolError::ResetFailed)
}
}
pub fn destroy(&self, device: &Device) {
unsafe {
device.handle().destroy_command_pool(self.pool, None);
}
}
}
impl Drop for CommandPool {
fn drop(&mut self) {
if self.pool != vk::CommandPool::null() {
eprintln!(
"WARNING: CommandPool dropped without calling .destroy() - potential memory leak"
);
}
}
}
pub struct CommandBuffer {
buffer: vk::CommandBuffer,
level: vk::CommandBufferLevel,
}
impl CommandBuffer {
pub(crate) fn from_handle(buffer: vk::CommandBuffer, level: vk::CommandBufferLevel) -> Self {
Self { buffer, level }
}
#[inline]
pub fn handle(&self) -> vk::CommandBuffer {
self.buffer
}
#[inline]
pub fn level(&self) -> vk::CommandBufferLevel {
self.level
}
pub fn begin(
&self,
device: &Device,
flags: vk::CommandBufferUsageFlags,
) -> Result<(), CommandBufferError> {
let begin_info = vk::CommandBufferBeginInfo {
flags,
..Default::default()
};
unsafe {
device
.handle()
.begin_command_buffer(self.buffer, &begin_info)
.map_err(CommandBufferError::BeginFailed)
}
}
pub fn end(&self, device: &Device) -> Result<(), CommandBufferError> {
unsafe {
device
.handle()
.end_command_buffer(self.buffer)
.map_err(CommandBufferError::EndFailed)
}
}
pub fn reset(
&self,
device: &Device,
flags: vk::CommandBufferResetFlags,
) -> Result<(), CommandBufferError> {
unsafe {
device
.handle()
.reset_command_buffer(self.buffer, flags)
.map_err(CommandBufferError::BeginFailed)
}
}
#[inline]
pub fn bind_pipeline(
&self,
device: &Device,
bind_point: vk::PipelineBindPoint,
pipeline: vk::Pipeline,
) {
unsafe {
device
.handle()
.cmd_bind_pipeline(self.buffer, bind_point, pipeline);
}
}
#[inline]
pub fn bind_descriptor_sets(
&self,
device: &Device,
bind_point: vk::PipelineBindPoint,
layout: vk::PipelineLayout,
first_set: u32,
descriptor_sets: &[vk::DescriptorSet],
dynamic_offsets: &[u32],
) {
unsafe {
device.handle().cmd_bind_descriptor_sets(
self.buffer,
bind_point,
layout,
first_set,
descriptor_sets,
dynamic_offsets,
);
}
}
#[inline]
pub fn bind_vertex_buffers(
&self,
device: &Device,
first_binding: u32,
buffers: &[vk::Buffer],
offsets: &[vk::DeviceSize],
) {
unsafe {
device
.handle()
.cmd_bind_vertex_buffers(self.buffer, first_binding, buffers, offsets);
}
}
#[inline]
pub fn bind_index_buffer(
&self,
device: &Device,
buffer: vk::Buffer,
offset: vk::DeviceSize,
index_type: vk::IndexType,
) {
unsafe {
device
.handle()
.cmd_bind_index_buffer(self.buffer, buffer, offset, index_type);
}
}
#[inline]
pub fn draw(
&self,
device: &Device,
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
) {
unsafe {
device.handle().cmd_draw(
self.buffer,
vertex_count,
instance_count,
first_vertex,
first_instance,
);
}
}
#[inline]
pub fn draw_indexed(
&self,
device: &Device,
index_count: u32,
instance_count: u32,
first_index: u32,
vertex_offset: i32,
first_instance: u32,
) {
unsafe {
device.handle().cmd_draw_indexed(
self.buffer,
index_count,
instance_count,
first_index,
vertex_offset,
first_instance,
);
}
}
#[inline]
pub fn dispatch(
&self,
device: &Device,
group_count_x: u32,
group_count_y: u32,
group_count_z: u32,
) {
unsafe {
device
.handle()
.cmd_dispatch(self.buffer, group_count_x, group_count_y, group_count_z);
}
}
#[inline]
pub fn begin_rendering(&self, device: &Device, rendering_info: &vk::RenderingInfo) {
unsafe {
device
.handle()
.cmd_begin_rendering(self.buffer, rendering_info);
}
}
#[inline]
pub fn end_rendering(&self, device: &Device) {
unsafe {
device.handle().cmd_end_rendering(self.buffer);
}
}
#[allow(clippy::too_many_arguments)]
#[inline]
pub fn pipeline_barrier(
&self,
device: &Device,
src_stage_mask: vk::PipelineStageFlags,
dst_stage_mask: vk::PipelineStageFlags,
dependency_flags: vk::DependencyFlags,
memory_barriers: &[vk::MemoryBarrier],
buffer_barriers: &[vk::BufferMemoryBarrier],
image_barriers: &[vk::ImageMemoryBarrier],
) {
unsafe {
device.handle().cmd_pipeline_barrier(
self.buffer,
src_stage_mask,
dst_stage_mask,
dependency_flags,
memory_barriers,
buffer_barriers,
image_barriers,
);
}
}
#[inline]
pub fn copy_buffer(
&self,
device: &Device,
src_buffer: vk::Buffer,
dst_buffer: vk::Buffer,
regions: &[vk::BufferCopy],
) {
unsafe {
device
.handle()
.cmd_copy_buffer(self.buffer, src_buffer, dst_buffer, regions);
}
}
#[inline]
pub fn copy_buffer_to_image(
&self,
device: &Device,
src_buffer: vk::Buffer,
dst_image: vk::Image,
dst_image_layout: vk::ImageLayout,
regions: &[vk::BufferImageCopy],
) {
unsafe {
device.handle().cmd_copy_buffer_to_image(
self.buffer,
src_buffer,
dst_image,
dst_image_layout,
regions,
);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::core::{Device, DeviceCreateInfo, Instance, InstanceCreateInfo, QueueCreateInfo};
fn create_test_device() -> (Instance, Device, u32) {
let instance = Instance::new(InstanceCreateInfo {
enable_validation: false,
..Default::default()
})
.unwrap();
let physical_devices = instance.enumerate_physical_devices().unwrap();
let physical_device = physical_devices[0];
let graphics_family = unsafe {
instance
.get_physical_device_queue_family_properties(physical_device)
.iter()
.enumerate()
.find(|(_, qf)| qf.queue_flags.contains(vk::QueueFlags::GRAPHICS))
.map(|(i, _)| i as u32)
.unwrap()
};
let device = Device::new(
&instance,
physical_device,
DeviceCreateInfo {
queue_create_infos: vec![QueueCreateInfo {
queue_family_index: graphics_family,
queue_count: 1,
queue_priorities: vec![1.0],
}],
..Default::default()
},
)
.unwrap();
(instance, device, graphics_family)
}
#[test]
fn test_command_pool_creation() {
let (_instance, device, graphics_family) = create_test_device();
let pool = CommandPool::new(
&device,
graphics_family,
vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER,
)
.unwrap();
assert_ne!(pool.handle(), vk::CommandPool::null());
assert_eq!(pool.family_index(), graphics_family);
pool.destroy(&device);
}
#[test]
fn test_command_buffer_allocation() {
let (_instance, device, graphics_family) = create_test_device();
let pool = CommandPool::new(
&device,
graphics_family,
vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER,
)
.unwrap();
let buffer = pool.allocate_primary(&device).unwrap();
assert_ne!(buffer.handle(), vk::CommandBuffer::null());
assert_eq!(buffer.level(), vk::CommandBufferLevel::PRIMARY);
pool.destroy(&device);
}
#[test]
fn test_command_buffer_recording() {
let (_instance, device, graphics_family) = create_test_device();
let pool = CommandPool::new(
&device,
graphics_family,
vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER,
)
.unwrap();
let buffer = pool.allocate_primary(&device).unwrap();
assert!(buffer
.begin(&device, vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT)
.is_ok());
assert!(buffer.end(&device).is_ok());
pool.destroy(&device);
}
#[test]
fn test_command_pool_reset() {
let (_instance, device, graphics_family) = create_test_device();
let pool = CommandPool::new(
&device,
graphics_family,
vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER,
)
.unwrap();
assert!(pool
.reset(&device, vk::CommandPoolResetFlags::empty())
.is_ok());
pool.destroy(&device);
}
}