use vulkane::safe::{
AccessFlags, AccessFlags2, AllocationCreateInfo, AllocationStrategy, AllocationUsage, Allocator,
ApiVersion, Buffer, BufferCopy, BufferCreateInfo, BufferImageCopy, BufferUsage, CommandPool,
ComputePipeline, DEBUG_UTILS_EXTENSION, DebugMessage, DebugMessageSeverity,
DefragmentationMove, DefragmentationPlan, DescriptorPool, DescriptorPoolSize,
DescriptorSetLayout, DescriptorSetLayoutBinding, DescriptorType, DeviceCreateInfo,
DeviceFeatures, DeviceMemory, Fence, Format, Image, Image2dCreateInfo, ImageBarrier,
ImageLayout, ImageUsage, ImageView, Instance, InstanceCreateInfo, KHRONOS_VALIDATION_LAYER,
MemoryPropertyFlags, PipelineCache, PipelineLayout, PipelineStage, PipelineStage2,
PipelineStatisticsFlags, PoolCreateInfo, PushConstantRange, QueryPool, QueueCreateInfo,
QueueFlags, Semaphore, SemaphoreKind, ShaderModule, ShaderStageFlags, SignalSemaphore,
SpecializationConstants, WaitSemaphore,
};
#[test]
fn test_safe_instance_creation_and_enumeration() {
let instance = match Instance::new(InstanceCreateInfo {
application_name: Some("vulkane test"),
api_version: ApiVersion::V1_0,
..Default::default()
}) {
Ok(i) => i,
Err(e) => {
eprintln!("SKIP: Vulkan not available: {e}");
return;
}
};
let physical_devices = instance.enumerate_physical_devices().unwrap();
println!("Found {} physical device(s)", physical_devices.len());
for pd in &physical_devices {
let props = pd.properties();
assert!(!props.device_name().is_empty());
assert!(props.api_version().major() >= 1);
let queue_families = pd.queue_family_properties();
assert!(
!queue_families.is_empty(),
"every device has at least one queue family"
);
}
}
#[test]
fn test_xlib_xcb_surface_constructors_callable() {
use vulkane::safe::Surface;
let instance = match Instance::new(InstanceCreateInfo::default()) {
Ok(i) => i,
Err(e) => {
eprintln!("SKIP: cannot create Vulkan instance: {e}");
return;
}
};
let fake_display: *mut std::ffi::c_void = std::ptr::null_mut();
let fake_window: std::ffi::c_ulong = 0;
match unsafe { Surface::from_xlib(&instance, fake_display, fake_window) } {
Err(vulkane::safe::Error::MissingFunction(name)) => {
assert_eq!(name, "vkCreateXlibSurfaceKHR");
}
Ok(_) => {
}
Err(e) => panic!("expected MissingFunction or Ok, got {e:?}"),
}
let fake_connection: *mut std::ffi::c_void = std::ptr::null_mut();
let fake_xcb_window: u32 = 0;
match unsafe { Surface::from_xcb(&instance, fake_connection, fake_xcb_window) } {
Err(vulkane::safe::Error::MissingFunction(name)) => {
assert_eq!(name, "vkCreateXcbSurfaceKHR");
}
Ok(_) => {}
Err(e) => panic!("expected MissingFunction or Ok, got {e:?}"),
}
}
#[test]
fn test_safe_device_creation_and_drop() {
let instance = match Instance::new(InstanceCreateInfo::default()) {
Ok(i) => i,
Err(_) => {
eprintln!("SKIP: Vulkan not available");
return;
}
};
let physicals = instance.enumerate_physical_devices().unwrap();
let physical = match physicals.first() {
Some(p) => p.clone(),
None => {
eprintln!("SKIP: no physical devices");
return;
}
};
let queue_family = physical.find_queue_family(QueueFlags::TRANSFER).unwrap();
let device = physical
.create_device(DeviceCreateInfo {
queue_create_infos: &[QueueCreateInfo {
queue_family_index: queue_family,
queue_priorities: vec![1.0],
}],
..Default::default()
})
.expect("device creation should succeed");
let _queue = device.get_queue(queue_family, 0);
device
.wait_idle()
.expect("wait_idle on idle device should succeed");
}
#[test]
fn test_safe_buffer_with_host_visible_memory() {
let instance = match Instance::new(InstanceCreateInfo::default()) {
Ok(i) => i,
Err(_) => {
eprintln!("SKIP: Vulkan not available");
return;
}
};
let physicals = instance.enumerate_physical_devices().unwrap();
let Some(physical) = physicals.first().cloned() else {
eprintln!("SKIP: no physical devices");
return;
};
let queue_family = physical.find_queue_family(QueueFlags::TRANSFER).unwrap();
let device = physical
.create_device(DeviceCreateInfo {
queue_create_infos: &[QueueCreateInfo {
queue_family_index: queue_family,
queue_priorities: vec![1.0],
}],
..Default::default()
})
.unwrap();
let buffer = Buffer::new(
&device,
BufferCreateInfo {
size: 256,
usage: BufferUsage::TRANSFER_DST,
},
)
.unwrap();
assert_eq!(buffer.size(), 256);
let req = buffer.memory_requirements();
assert!(req.size >= 256);
assert!(req.alignment.is_power_of_two());
let mem_type = physical
.find_memory_type(
req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.expect("host-visible memory should be available on any platform");
let mut memory = DeviceMemory::allocate(&device, req.size, mem_type).unwrap();
buffer.bind_memory(&memory, 0).unwrap();
{
let mut mapped = memory.map().unwrap();
let slice = mapped.as_slice_mut();
assert_eq!(slice.len() as u64, req.size);
for (i, b) in slice.iter_mut().enumerate() {
*b = (i & 0xFF) as u8;
}
}
{
let mapped = memory.map().unwrap();
let slice = mapped.as_slice();
for (i, &b) in slice.iter().enumerate() {
assert_eq!(b, (i & 0xFF) as u8, "byte {i} did not persist");
}
}
}
#[test]
fn test_safe_full_gpu_round_trip() {
let instance = match Instance::new(InstanceCreateInfo::default()) {
Ok(i) => i,
Err(_) => {
eprintln!("SKIP: Vulkan not available");
return;
}
};
let physicals = instance.enumerate_physical_devices().unwrap();
let Some(physical) = physicals.first().cloned() else {
eprintln!("SKIP: no physical devices");
return;
};
let queue_family = physical.find_queue_family(QueueFlags::TRANSFER).unwrap();
let device = physical
.create_device(DeviceCreateInfo {
queue_create_infos: &[QueueCreateInfo {
queue_family_index: queue_family,
queue_priorities: vec![1.0],
}],
..Default::default()
})
.unwrap();
let queue = device.get_queue(queue_family, 0);
let buffer = Buffer::new(
&device,
BufferCreateInfo {
size: 64,
usage: BufferUsage::TRANSFER_DST,
},
)
.unwrap();
let req = buffer.memory_requirements();
let mem_type = physical
.find_memory_type(
req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let mut memory = DeviceMemory::allocate(&device, req.size, mem_type).unwrap();
buffer.bind_memory(&memory, 0).unwrap();
{
let mut m = memory.map().unwrap();
m.as_slice_mut().fill(0);
}
let pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd = pool.allocate_primary().unwrap();
{
let mut rec = cmd.begin().unwrap();
rec.fill_buffer(&buffer, 0, 64, 0xCAFEBABE);
rec.end().unwrap();
}
let fence = Fence::new(&device).unwrap();
queue.submit(&[&cmd], Some(&fence)).unwrap();
fence.wait(u64::MAX).unwrap();
{
let mapped = memory.map().unwrap();
let slice = mapped.as_slice();
let expected: [u8; 4] = 0xCAFEBABEu32.to_ne_bytes();
for chunk in slice.chunks_exact(4) {
assert_eq!(chunk, expected, "GPU did not write expected pattern");
}
}
}
#[test]
fn test_api_version_encoding() {
let v = ApiVersion::new(0, 1, 3, 250);
assert_eq!(v.major(), 1);
assert_eq!(v.minor(), 3);
assert_eq!(v.patch(), 250);
let v0 = ApiVersion::V1_0;
assert_eq!(v0.major(), 1);
assert_eq!(v0.minor(), 0);
assert_eq!(v0.patch(), 0);
}
#[test]
fn test_queue_flags_bitor_and_contains() {
let combined = QueueFlags::GRAPHICS | QueueFlags::COMPUTE;
assert!(combined.contains(QueueFlags::GRAPHICS));
assert!(combined.contains(QueueFlags::COMPUTE));
assert!(!combined.contains(QueueFlags::TRANSFER));
}
#[test]
fn test_memory_property_flags_bitor() {
let f = MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT;
assert!(f.contains(MemoryPropertyFlags::HOST_VISIBLE));
assert!(f.contains(MemoryPropertyFlags::HOST_COHERENT));
assert!(!f.contains(MemoryPropertyFlags::DEVICE_LOCAL));
}
#[test]
fn test_buffer_usage_bitor() {
let u = BufferUsage::TRANSFER_DST | BufferUsage::STORAGE_BUFFER;
assert!(u.contains(BufferUsage::TRANSFER_DST));
assert!(u.contains(BufferUsage::STORAGE_BUFFER));
assert!(!u.contains(BufferUsage::TRANSFER_SRC));
}
#[test]
fn test_shader_module_from_spirv_bytes() {
let instance = match Instance::new(InstanceCreateInfo::default()) {
Ok(i) => i,
Err(_) => return,
};
let physicals = instance.enumerate_physical_devices().unwrap();
let Some(physical) = physicals.first().cloned() else {
return;
};
let queue_family = physical.find_queue_family(QueueFlags::COMPUTE).unwrap();
let device = physical
.create_device(DeviceCreateInfo {
queue_create_infos: &[QueueCreateInfo {
queue_family_index: queue_family,
queue_priorities: vec![1.0],
}],
..Default::default()
})
.unwrap();
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let spv = std::fs::read(format!("{manifest_dir}/examples/shaders/square_buffer.spv"))
.expect("pre-compiled square_buffer.spv must exist (run compile_shader example)");
let shader = ShaderModule::from_spirv_bytes(&device, &spv)
.expect("ShaderModule::from_spirv_bytes should succeed for valid SPIR-V");
assert!(shader.raw() != 0);
}
#[test]
fn test_compute_pipeline_full_dispatch() {
let instance = match Instance::new(InstanceCreateInfo::default()) {
Ok(i) => i,
Err(_) => return,
};
let physicals = instance.enumerate_physical_devices().unwrap();
let Some(physical) = physicals.first().cloned() else {
return;
};
let queue_family = match physical.find_queue_family(QueueFlags::COMPUTE) {
Some(q) => q,
None => return,
};
let device = physical
.create_device(DeviceCreateInfo {
queue_create_infos: &[QueueCreateInfo {
queue_family_index: queue_family,
queue_priorities: vec![1.0],
}],
..Default::default()
})
.unwrap();
let queue = device.get_queue(queue_family, 0);
const N: u32 = 64;
const SIZE: u64 = (N as u64) * 4;
let buffer = Buffer::new(
&device,
BufferCreateInfo {
size: SIZE,
usage: BufferUsage::STORAGE_BUFFER,
},
)
.unwrap();
let req = buffer.memory_requirements();
let mt = physical
.find_memory_type(
req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let mut memory = DeviceMemory::allocate(&device, req.size, mt).unwrap();
buffer.bind_memory(&memory, 0).unwrap();
{
let mut m = memory.map().unwrap();
let bytes = m.as_slice_mut();
for i in 0..N as usize {
let v = i as u32;
bytes[i * 4..(i + 1) * 4].copy_from_slice(&v.to_le_bytes());
}
}
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let spv = std::fs::read(format!("{manifest_dir}/examples/shaders/square_buffer.spv")).unwrap();
let shader = ShaderModule::from_spirv_bytes(&device, &spv).unwrap();
let set_layout = DescriptorSetLayout::new(
&device,
&[DescriptorSetLayoutBinding {
binding: 0,
descriptor_type: DescriptorType::STORAGE_BUFFER,
descriptor_count: 1,
stage_flags: ShaderStageFlags::COMPUTE,
}],
)
.unwrap();
let pool = DescriptorPool::new(
&device,
1,
&[DescriptorPoolSize {
descriptor_type: DescriptorType::STORAGE_BUFFER,
descriptor_count: 1,
}],
)
.unwrap();
let dset = pool.allocate(&set_layout).unwrap();
dset.write_buffer(0, DescriptorType::STORAGE_BUFFER, &buffer, 0, SIZE);
let pipeline_layout = PipelineLayout::new(&device, &[&set_layout]).unwrap();
let pipeline = ComputePipeline::new(&device, &pipeline_layout, &shader, "main").unwrap();
let cmd_pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd = cmd_pool.allocate_primary().unwrap();
{
let mut rec = cmd.begin().unwrap();
rec.bind_compute_pipeline(&pipeline);
rec.bind_compute_descriptor_sets(&pipeline_layout, 0, &[&dset]);
rec.dispatch(N.div_ceil(64), 1, 1);
rec.memory_barrier(
PipelineStage::COMPUTE_SHADER,
PipelineStage::HOST,
AccessFlags::SHADER_WRITE,
AccessFlags::HOST_READ,
);
rec.end().unwrap();
}
let fence = Fence::new(&device).unwrap();
queue.submit(&[&cmd], Some(&fence)).unwrap();
fence.wait(u64::MAX).unwrap();
{
let m = memory.map().unwrap();
let bytes = m.as_slice();
for i in 0..N as usize {
let read = u32::from_le_bytes([
bytes[i * 4],
bytes[i * 4 + 1],
bytes[i * 4 + 2],
bytes[i * 4 + 3],
]);
let expected = (i as u32).wrapping_mul(i as u32);
assert_eq!(read, expected, "element {i}: GPU did not square correctly");
}
}
}
fn try_init_compute() -> Option<(
Instance,
vulkane::safe::PhysicalDevice,
vulkane::safe::Device,
vulkane::safe::Queue,
u32,
)> {
let instance = Instance::new(InstanceCreateInfo::default()).ok()?;
let physical = instance
.enumerate_physical_devices()
.ok()?
.into_iter()
.next()?;
let queue_family = physical.find_queue_family(QueueFlags::COMPUTE)?;
let device = physical
.create_device(DeviceCreateInfo {
queue_create_infos: &[QueueCreateInfo {
queue_family_index: queue_family,
queue_priorities: vec![1.0],
}],
..Default::default()
})
.ok()?;
let queue = device.get_queue(queue_family, 0);
Some((instance, physical, device, queue, queue_family))
}
#[test]
fn test_specialization_constants_builder() {
let specs = SpecializationConstants::new()
.add_u32(0, 0xDEADBEEF)
.add_i32(1, -1)
.add_f32(2, 1.5)
.add_bool(3, true);
assert_eq!(specs.len(), 4);
assert!(!specs.is_empty());
let empty = SpecializationConstants::new();
assert!(empty.is_empty());
assert_eq!(empty.len(), 0);
}
#[test]
fn test_pipeline_statistics_flags_count() {
let f = PipelineStatisticsFlags::COMPUTE_SHADER_INVOCATIONS
| PipelineStatisticsFlags::INPUT_ASSEMBLY_VERTICES;
assert_eq!(f.count(), 2);
assert!(f.contains(PipelineStatisticsFlags::COMPUTE_SHADER_INVOCATIONS));
assert!(!f.contains(PipelineStatisticsFlags::FRAGMENT_SHADER_INVOCATIONS));
assert_eq!(PipelineStatisticsFlags::NONE.count(), 0);
}
#[test]
fn test_buffer_copy_struct() {
let r = BufferCopy {
src_offset: 16,
dst_offset: 32,
size: 64,
};
assert_eq!(r.src_offset, 16);
assert_eq!(r.dst_offset, 32);
assert_eq!(r.size, 64);
}
#[test]
fn test_async_compute_queue_helper_returns_compute_capable() {
let Some((_inst, physical, _dev, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let q = physical
.find_dedicated_compute_queue()
.expect("any compute device exposes a compute queue");
let families = physical.queue_family_properties();
assert!(
families[q as usize]
.queue_flags()
.contains(QueueFlags::COMPUTE)
);
if let Some(t) = physical.find_dedicated_transfer_queue() {
assert!(
families[t as usize]
.queue_flags()
.contains(QueueFlags::TRANSFER)
);
}
}
#[test]
fn test_timestamp_period_is_nonneg() {
let Some((_inst, physical, _dev, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let p = physical.timestamp_period();
assert!(p.is_finite());
assert!(p >= 0.0);
}
#[test]
fn test_max_push_constants_size_meets_spec_minimum() {
let Some((_inst, physical, _dev, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let max = physical.properties().max_push_constants_size();
assert!(max >= 128, "spec minimum is 128 bytes, got {max}");
}
#[test]
fn test_pipeline_layout_with_push_constants() {
let Some((_inst, _physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let set_layout = DescriptorSetLayout::new(
&device,
&[DescriptorSetLayoutBinding {
binding: 0,
descriptor_type: DescriptorType::STORAGE_BUFFER,
descriptor_count: 1,
stage_flags: ShaderStageFlags::COMPUTE,
}],
)
.unwrap();
let pcr = PushConstantRange {
stage_flags: ShaderStageFlags::COMPUTE,
offset: 0,
size: 16,
};
let layout_no = PipelineLayout::new(&device, &[&set_layout]).unwrap();
let layout_pc = PipelineLayout::with_push_constants(&device, &[&set_layout], &[pcr]).unwrap();
assert!(layout_no.raw() != 0);
assert!(layout_pc.raw() != 0);
assert!(layout_no.raw() != layout_pc.raw());
}
#[test]
fn test_query_pool_timestamp_creation_and_metadata() {
let Some((_inst, physical, device, _q, queue_family)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let families = physical.queue_family_properties();
if families[queue_family as usize].timestamp_valid_bits() == 0 {
eprintln!("SKIP: queue family does not support timestamps");
return;
}
let pool = QueryPool::timestamps(&device, 4).unwrap();
assert_eq!(pool.query_count(), 4);
assert!(pool.raw() != 0);
}
#[test]
fn test_copy_buffer_staging_round_trip() {
let Some((_inst, physical, device, queue, queue_family)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let src = Buffer::new(
&device,
BufferCreateInfo {
size: 256,
usage: BufferUsage::TRANSFER_SRC,
},
)
.unwrap();
let src_req = src.memory_requirements();
let src_mt = physical
.find_memory_type(
src_req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let mut src_mem = DeviceMemory::allocate(&device, src_req.size, src_mt).unwrap();
src.bind_memory(&src_mem, 0).unwrap();
{
let mut m = src_mem.map().unwrap();
let bytes = m.as_slice_mut();
for (i, b) in bytes.iter_mut().enumerate() {
*b = (i * 3 + 1) as u8;
}
}
let dst = Buffer::new(
&device,
BufferCreateInfo {
size: 256,
usage: BufferUsage::TRANSFER_DST,
},
)
.unwrap();
let dst_req = dst.memory_requirements();
let dst_mt = physical
.find_memory_type(
dst_req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let mut dst_mem = DeviceMemory::allocate(&device, dst_req.size, dst_mt).unwrap();
dst.bind_memory(&dst_mem, 0).unwrap();
{
let mut m = dst_mem.map().unwrap();
m.as_slice_mut().fill(0);
}
let pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd = pool.allocate_primary().unwrap();
{
let mut rec = cmd.begin().unwrap();
rec.copy_buffer(
&src,
&dst,
&[BufferCopy {
src_offset: 0,
dst_offset: 0,
size: 256,
}],
);
rec.memory_barrier(
PipelineStage::TRANSFER,
PipelineStage::HOST,
AccessFlags::TRANSFER_READ,
AccessFlags::HOST_READ,
);
rec.end().unwrap();
}
let fence = Fence::new(&device).unwrap();
queue.submit(&[&cmd], Some(&fence)).unwrap();
fence.wait(u64::MAX).unwrap();
{
let m = dst_mem.map().unwrap();
let bytes = m.as_slice();
for (i, &b) in bytes.iter().enumerate() {
assert_eq!(b, (i * 3 + 1) as u8, "byte {i} not copied correctly");
}
}
}
#[test]
fn test_dispatch_indirect_with_explicit_count() {
let Some((_inst, physical, device, queue, queue_family)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
const N: u32 = 256;
const SIZE: u64 = (N as u64) * 4;
let buffer = Buffer::new(
&device,
BufferCreateInfo {
size: SIZE,
usage: BufferUsage::STORAGE_BUFFER,
},
)
.unwrap();
let req = buffer.memory_requirements();
let mt = physical
.find_memory_type(
req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let mut memory = DeviceMemory::allocate(&device, req.size, mt).unwrap();
buffer.bind_memory(&memory, 0).unwrap();
{
let mut m = memory.map().unwrap();
let bytes = m.as_slice_mut();
for i in 0..N as usize {
let v = i as u32;
bytes[i * 4..(i + 1) * 4].copy_from_slice(&v.to_le_bytes());
}
}
let indirect = Buffer::new(
&device,
BufferCreateInfo {
size: 16,
usage: BufferUsage::INDIRECT_BUFFER,
},
)
.unwrap();
let ireq = indirect.memory_requirements();
let imt = physical
.find_memory_type(
ireq.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let mut imem = DeviceMemory::allocate(&device, ireq.size, imt).unwrap();
indirect.bind_memory(&imem, 0).unwrap();
{
let mut m = imem.map().unwrap();
let b = m.as_slice_mut();
b[0..4].copy_from_slice(&4u32.to_le_bytes());
b[4..8].copy_from_slice(&1u32.to_le_bytes());
b[8..12].copy_from_slice(&1u32.to_le_bytes());
}
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let spv = std::fs::read(format!("{manifest_dir}/examples/shaders/square_buffer.spv")).unwrap();
let shader = ShaderModule::from_spirv_bytes(&device, &spv).unwrap();
let set_layout = DescriptorSetLayout::new(
&device,
&[DescriptorSetLayoutBinding {
binding: 0,
descriptor_type: DescriptorType::STORAGE_BUFFER,
descriptor_count: 1,
stage_flags: ShaderStageFlags::COMPUTE,
}],
)
.unwrap();
let dpool = DescriptorPool::new(
&device,
1,
&[DescriptorPoolSize {
descriptor_type: DescriptorType::STORAGE_BUFFER,
descriptor_count: 1,
}],
)
.unwrap();
let dset = dpool.allocate(&set_layout).unwrap();
dset.write_buffer(0, DescriptorType::STORAGE_BUFFER, &buffer, 0, SIZE);
let pipeline_layout = PipelineLayout::new(&device, &[&set_layout]).unwrap();
let pipeline = ComputePipeline::new(&device, &pipeline_layout, &shader, "main").unwrap();
let cmd_pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd = cmd_pool.allocate_primary().unwrap();
{
let mut rec = cmd.begin().unwrap();
rec.bind_compute_pipeline(&pipeline);
rec.bind_compute_descriptor_sets(&pipeline_layout, 0, &[&dset]);
rec.dispatch_indirect(&indirect, 0);
rec.memory_barrier(
PipelineStage::COMPUTE_SHADER,
PipelineStage::HOST,
AccessFlags::SHADER_WRITE,
AccessFlags::HOST_READ,
);
rec.end().unwrap();
}
let fence = Fence::new(&device).unwrap();
queue.submit(&[&cmd], Some(&fence)).unwrap();
fence.wait(u64::MAX).unwrap();
{
let m = memory.map().unwrap();
let bytes = m.as_slice();
for i in 0..N as usize {
let read = u32::from_le_bytes([
bytes[i * 4],
bytes[i * 4 + 1],
bytes[i * 4 + 2],
bytes[i * 4 + 3],
]);
assert_eq!(
read,
(i as u32).wrapping_mul(i as u32),
"indirect dispatch did not square element {i}"
);
}
}
}
#[test]
fn test_query_pool_records_timestamp_around_dispatch() {
let Some((_inst, physical, device, queue, queue_family)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let families = physical.queue_family_properties();
if families[queue_family as usize].timestamp_valid_bits() == 0 {
eprintln!("SKIP: queue family does not support timestamps");
return;
}
const N: u32 = 64;
const SIZE: u64 = (N as u64) * 4;
let buffer = Buffer::new(
&device,
BufferCreateInfo {
size: SIZE,
usage: BufferUsage::STORAGE_BUFFER,
},
)
.unwrap();
let req = buffer.memory_requirements();
let mt = physical
.find_memory_type(
req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let mut memory = DeviceMemory::allocate(&device, req.size, mt).unwrap();
buffer.bind_memory(&memory, 0).unwrap();
{
let mut m = memory.map().unwrap();
let b = m.as_slice_mut();
for i in 0..N as usize {
b[i * 4..(i + 1) * 4].copy_from_slice(&(i as u32).to_le_bytes());
}
}
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let spv = std::fs::read(format!("{manifest_dir}/examples/shaders/square_buffer.spv")).unwrap();
let shader = ShaderModule::from_spirv_bytes(&device, &spv).unwrap();
let set_layout = DescriptorSetLayout::new(
&device,
&[DescriptorSetLayoutBinding {
binding: 0,
descriptor_type: DescriptorType::STORAGE_BUFFER,
descriptor_count: 1,
stage_flags: ShaderStageFlags::COMPUTE,
}],
)
.unwrap();
let dpool = DescriptorPool::new(
&device,
1,
&[DescriptorPoolSize {
descriptor_type: DescriptorType::STORAGE_BUFFER,
descriptor_count: 1,
}],
)
.unwrap();
let dset = dpool.allocate(&set_layout).unwrap();
dset.write_buffer(0, DescriptorType::STORAGE_BUFFER, &buffer, 0, SIZE);
let pl = PipelineLayout::new(&device, &[&set_layout]).unwrap();
let pipe = ComputePipeline::new(&device, &pl, &shader, "main").unwrap();
let qpool = QueryPool::timestamps(&device, 2).unwrap();
let cmd_pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd = cmd_pool.allocate_primary().unwrap();
{
let mut rec = cmd.begin().unwrap();
rec.reset_query_pool(&qpool, 0, 2);
rec.write_timestamp(PipelineStage::TOP_OF_PIPE, &qpool, 0);
rec.bind_compute_pipeline(&pipe);
rec.bind_compute_descriptor_sets(&pl, 0, &[&dset]);
rec.dispatch(N.div_ceil(64), 1, 1);
rec.write_timestamp(PipelineStage::BOTTOM_OF_PIPE, &qpool, 1);
rec.memory_barrier(
PipelineStage::COMPUTE_SHADER,
PipelineStage::HOST,
AccessFlags::SHADER_WRITE,
AccessFlags::HOST_READ,
);
rec.end().unwrap();
}
let fence = Fence::new(&device).unwrap();
queue.submit(&[&cmd], Some(&fence)).unwrap();
fence.wait(u64::MAX).unwrap();
let times = qpool.get_results_u64(0, 2).unwrap();
assert_eq!(times.len(), 2);
{
let m = memory.map().unwrap();
let b = m.as_slice();
for i in 0..N as usize {
let v = u32::from_le_bytes([b[i * 4], b[i * 4 + 1], b[i * 4 + 2], b[i * 4 + 3]]);
assert_eq!(v, (i as u32).wrapping_mul(i as u32));
}
}
let period = physical.timestamp_period();
let delta_ticks = times[1].wrapping_sub(times[0]) as f64;
let delta_ns = delta_ticks * (period as f64);
assert!(delta_ns.is_finite());
}
#[test]
fn test_uniform_buffer_descriptor_round_trip() {
let Some((_inst, physical, device, _queue, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let buffer = Buffer::new(
&device,
BufferCreateInfo {
size: 64,
usage: BufferUsage::UNIFORM_BUFFER,
},
)
.unwrap();
let req = buffer.memory_requirements();
let mt = physical
.find_memory_type(
req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let memory = DeviceMemory::allocate(&device, req.size, mt).unwrap();
buffer.bind_memory(&memory, 0).unwrap();
let set_layout = DescriptorSetLayout::new(
&device,
&[DescriptorSetLayoutBinding {
binding: 0,
descriptor_type: DescriptorType::UNIFORM_BUFFER,
descriptor_count: 1,
stage_flags: ShaderStageFlags::COMPUTE,
}],
)
.unwrap();
let pool = DescriptorPool::new(
&device,
1,
&[DescriptorPoolSize {
descriptor_type: DescriptorType::UNIFORM_BUFFER,
descriptor_count: 1,
}],
)
.unwrap();
let dset = pool.allocate(&set_layout).unwrap();
dset.write_buffer(0, DescriptorType::UNIFORM_BUFFER, &buffer, 0, 64);
assert!(dset.raw() != 0);
}
#[test]
fn test_debug_message_severity_label_and_bits() {
assert_eq!(DebugMessageSeverity::ERROR.label(), "ERROR");
assert_eq!(DebugMessageSeverity::WARNING.label(), "WARN");
assert_eq!(DebugMessageSeverity::INFO.label(), "INFO");
assert_eq!(DebugMessageSeverity::VERBOSE.label(), "VERBOSE");
let combined = DebugMessageSeverity::WARNING_AND_ABOVE;
assert!(combined.contains(DebugMessageSeverity::ERROR));
assert!(combined.contains(DebugMessageSeverity::WARNING));
assert!(!combined.contains(DebugMessageSeverity::INFO));
let all = DebugMessageSeverity::ALL;
assert!(all.contains(DebugMessageSeverity::VERBOSE));
assert!(all.contains(DebugMessageSeverity::ERROR));
}
#[test]
fn test_enumerate_layer_properties_succeeds_or_skips() {
let layers = match Instance::enumerate_layer_properties() {
Ok(l) => l,
Err(e) => {
eprintln!("SKIP: cannot load Vulkan library: {e}");
return;
}
};
for l in &layers {
let n = l.name();
assert!(!n.is_empty(), "layer name should not be empty");
assert!(l.spec_version().major() >= 1);
}
println!("Found {} instance layer(s)", layers.len());
}
#[test]
fn test_enumerate_instance_extension_properties() {
let exts = match Instance::enumerate_extension_properties() {
Ok(e) => e,
Err(e) => {
eprintln!("SKIP: cannot load Vulkan library: {e}");
return;
}
};
assert!(!exts.is_empty(), "expected at least one instance extension");
for e in &exts {
assert!(!e.name().is_empty());
}
}
#[test]
fn test_physical_device_enumerate_extension_properties() {
let Some((_inst, physical, _dev, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let exts = physical.enumerate_extension_properties().unwrap();
for e in &exts {
assert!(!e.name().is_empty());
}
}
#[test]
fn test_instance_with_no_layers_or_extensions() {
let info = InstanceCreateInfo {
application_name: Some("vulkane-empty-lists"),
enabled_layers: &[],
enabled_extensions: &[],
..InstanceCreateInfo::default()
};
if let Ok(_inst) = Instance::new(info) {
} else {
eprintln!("SKIP: no Vulkan ICD");
}
}
#[test]
fn test_instance_with_unknown_layer_fails_cleanly() {
let info = InstanceCreateInfo {
application_name: Some("vulkane-bad-layer"),
enabled_layers: &["VK_LAYER_THIS_DOES_NOT_EXIST_zzz"],
..InstanceCreateInfo::default()
};
let result = Instance::new(info);
match result {
Ok(_) => panic!("loader should not have accepted a fake layer"),
Err(e) => {
eprintln!("OK: enabling fake layer rejected with: {e}");
}
}
}
#[test]
fn test_instance_with_validation_when_available() {
let layers = match Instance::enumerate_layer_properties() {
Ok(l) => l,
Err(_) => {
eprintln!("SKIP: no Vulkan loader");
return;
}
};
if !layers.iter().any(|l| l.name() == KHRONOS_VALIDATION_LAYER) {
eprintln!("SKIP: validation layer not installed");
return;
}
let exts = Instance::enumerate_extension_properties().unwrap();
if !exts.iter().any(|e| e.name() == DEBUG_UTILS_EXTENSION) {
eprintln!("SKIP: debug utils extension not present");
return;
}
use std::sync::Arc as StdArc;
use std::sync::atomic::{AtomicUsize, Ordering};
let counter = StdArc::new(AtomicUsize::new(0));
let counter_cb = StdArc::clone(&counter);
let info = InstanceCreateInfo {
application_name: Some("vulkane-validation"),
enabled_layers: &[KHRONOS_VALIDATION_LAYER],
enabled_extensions: &[DEBUG_UTILS_EXTENSION],
debug_callback: Some(Box::new(move |msg: &DebugMessage<'_>| {
if msg.severity.contains(DebugMessageSeverity::WARNING)
|| msg.severity.contains(DebugMessageSeverity::ERROR)
{
counter_cb.fetch_add(1, Ordering::Relaxed);
}
})),
..InstanceCreateInfo::default()
};
let instance = match Instance::new(info) {
Ok(i) => i,
Err(e) => {
eprintln!("SKIP: validation instance creation failed: {e}");
return;
}
};
let _ = counter.load(Ordering::Relaxed);
drop(instance);
}
#[test]
fn test_instance_validation_constructor_when_available() {
let layers = Instance::enumerate_layer_properties().ok();
let has_validation = layers
.as_ref()
.map(|ls| ls.iter().any(|l| l.name() == KHRONOS_VALIDATION_LAYER))
.unwrap_or(false);
if !has_validation {
eprintln!("SKIP: validation layer not installed");
return;
}
match Instance::new(InstanceCreateInfo::validation()) {
Ok(_inst) => {}
Err(e) => eprintln!("validation() constructor returned err: {e}"),
}
}
#[test]
fn test_image_usage_bitor_and_format_constants() {
let usage = ImageUsage::STORAGE | ImageUsage::TRANSFER_DST | ImageUsage::TRANSFER_SRC;
assert!(usage.contains(ImageUsage::STORAGE));
assert!(usage.contains(ImageUsage::TRANSFER_DST));
assert!(usage.contains(ImageUsage::TRANSFER_SRC));
assert!(!usage.contains(ImageUsage::SAMPLED));
assert_ne!(Format::R8_UNORM, Format::R32_UINT);
assert_ne!(ImageLayout::UNDEFINED, ImageLayout::GENERAL);
}
#[test]
fn test_buffer_image_copy_full_2d_helper() {
let r = BufferImageCopy::full_2d(64, 32);
assert_eq!(r.image_extent, [64, 32, 1]);
assert_eq!(r.image_offset, [0, 0, 0]);
assert_eq!(r.buffer_offset, 0);
assert_eq!(r.buffer_row_length, 0);
assert_eq!(r.buffer_image_height, 0);
}
#[test]
fn test_image_2d_creation_and_memory_binding() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let image = Image::new_2d(
&device,
Image2dCreateInfo {
format: Format::R32_UINT,
width: 64,
height: 64,
usage: ImageUsage::STORAGE | ImageUsage::TRANSFER_DST | ImageUsage::TRANSFER_SRC,
},
)
.unwrap();
assert_eq!(image.format(), Format::R32_UINT);
assert_eq!(image.width(), 64);
assert_eq!(image.height(), 64);
assert!(image.raw() != 0);
let req = image.memory_requirements();
assert!(req.size >= 64 * 64 * 4);
assert!(req.alignment.is_power_of_two());
let mt = physical
.find_memory_type(req.memory_type_bits, MemoryPropertyFlags::DEVICE_LOCAL)
.or_else(|| {
physical.find_memory_type(req.memory_type_bits, MemoryPropertyFlags::HOST_VISIBLE)
})
.expect("some memory type should back the image");
let memory = DeviceMemory::allocate(&device, req.size, mt).unwrap();
image.bind_memory(&memory, 0).unwrap();
let view = ImageView::new_2d_color(&image).unwrap();
assert!(view.raw() != 0);
}
#[test]
fn test_image_buffer_round_trip_via_layout_transitions() {
let Some((_inst, physical, device, queue, queue_family)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
const W: u32 = 16;
const H: u32 = 16;
const PIXEL_BYTES: u64 = 4; const BUF_SIZE: u64 = (W as u64) * (H as u64) * PIXEL_BYTES;
let src = Buffer::new(
&device,
BufferCreateInfo {
size: BUF_SIZE,
usage: BufferUsage::TRANSFER_SRC,
},
)
.unwrap();
let src_req = src.memory_requirements();
let src_mt = physical
.find_memory_type(
src_req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let mut src_mem = DeviceMemory::allocate(&device, src_req.size, src_mt).unwrap();
src.bind_memory(&src_mem, 0).unwrap();
{
let mut m = src_mem.map().unwrap();
let bytes = m.as_slice_mut();
for i in 0..(W * H) as usize {
let v = (i as u32).wrapping_mul(0x9E3779B1u32);
bytes[i * 4..(i + 1) * 4].copy_from_slice(&v.to_le_bytes());
}
}
let image = Image::new_2d(
&device,
Image2dCreateInfo {
format: Format::R32_UINT,
width: W,
height: H,
usage: ImageUsage::STORAGE | ImageUsage::TRANSFER_DST | ImageUsage::TRANSFER_SRC,
},
)
.unwrap();
let img_req = image.memory_requirements();
let img_mt = physical
.find_memory_type(img_req.memory_type_bits, MemoryPropertyFlags::DEVICE_LOCAL)
.or_else(|| {
physical.find_memory_type(img_req.memory_type_bits, MemoryPropertyFlags::HOST_VISIBLE)
})
.expect("some memory type should back the image");
let img_mem = DeviceMemory::allocate(&device, img_req.size, img_mt).unwrap();
image.bind_memory(&img_mem, 0).unwrap();
let dst = Buffer::new(
&device,
BufferCreateInfo {
size: BUF_SIZE,
usage: BufferUsage::TRANSFER_DST,
},
)
.unwrap();
let dst_req = dst.memory_requirements();
let dst_mt = physical
.find_memory_type(
dst_req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let mut dst_mem = DeviceMemory::allocate(&device, dst_req.size, dst_mt).unwrap();
dst.bind_memory(&dst_mem, 0).unwrap();
{
let mut m = dst_mem.map().unwrap();
m.as_slice_mut().fill(0);
}
let pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd = pool.allocate_primary().unwrap();
{
let mut rec = cmd.begin().unwrap();
rec.image_barrier(
PipelineStage::TOP_OF_PIPE,
PipelineStage::TRANSFER,
ImageBarrier::color(&image, ImageLayout::UNDEFINED, ImageLayout::TRANSFER_DST_OPTIMAL, AccessFlags::NONE, AccessFlags::TRANSFER_WRITE),
);
rec.copy_buffer_to_image(
&src,
&image,
ImageLayout::TRANSFER_DST_OPTIMAL,
&[BufferImageCopy::full_2d(W, H)],
);
rec.image_barrier(
PipelineStage::TRANSFER,
PipelineStage::TRANSFER,
ImageBarrier::color(&image, ImageLayout::TRANSFER_DST_OPTIMAL, ImageLayout::TRANSFER_SRC_OPTIMAL, AccessFlags::TRANSFER_WRITE, AccessFlags::TRANSFER_READ),
);
rec.copy_image_to_buffer(
&image,
ImageLayout::TRANSFER_SRC_OPTIMAL,
&dst,
&[BufferImageCopy::full_2d(W, H)],
);
rec.memory_barrier(
PipelineStage::TRANSFER,
PipelineStage::HOST,
AccessFlags::TRANSFER_WRITE,
AccessFlags::HOST_READ,
);
rec.end().unwrap();
}
let fence = Fence::new(&device).unwrap();
queue.submit(&[&cmd], Some(&fence)).unwrap();
fence.wait(u64::MAX).unwrap();
{
let m = dst_mem.map().unwrap();
let b = m.as_slice();
for i in 0..(W * H) as usize {
let read = u32::from_le_bytes([b[i * 4], b[i * 4 + 1], b[i * 4 + 2], b[i * 4 + 3]]);
let expected = (i as u32).wrapping_mul(0x9E3779B1u32);
assert_eq!(read, expected, "pixel {i} did not survive image round trip");
}
}
}
#[test]
fn test_storage_image_descriptor_wiring() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let image = Image::new_2d(
&device,
Image2dCreateInfo {
format: Format::R32_UINT,
width: 8,
height: 8,
usage: ImageUsage::STORAGE,
},
)
.unwrap();
let req = image.memory_requirements();
let mt = physical
.find_memory_type(req.memory_type_bits, MemoryPropertyFlags::DEVICE_LOCAL)
.or_else(|| {
physical.find_memory_type(req.memory_type_bits, MemoryPropertyFlags::HOST_VISIBLE)
})
.unwrap();
let memory = DeviceMemory::allocate(&device, req.size, mt).unwrap();
image.bind_memory(&memory, 0).unwrap();
let view = ImageView::new_2d_color(&image).unwrap();
let set_layout = DescriptorSetLayout::new(
&device,
&[DescriptorSetLayoutBinding {
binding: 0,
descriptor_type: DescriptorType::STORAGE_IMAGE,
descriptor_count: 1,
stage_flags: ShaderStageFlags::COMPUTE,
}],
)
.unwrap();
let pool = DescriptorPool::new(
&device,
1,
&[DescriptorPoolSize {
descriptor_type: DescriptorType::STORAGE_IMAGE,
descriptor_count: 1,
}],
)
.unwrap();
let dset = pool.allocate(&set_layout).unwrap();
dset.write_storage_image(0, &view, ImageLayout::GENERAL);
assert!(dset.raw() != 0);
}
#[test]
fn test_binary_semaphore_creation_and_drop() {
let Some((_inst, _physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let s = Semaphore::binary(&device).unwrap();
assert_eq!(s.kind(), SemaphoreKind::Binary);
assert!(s.raw() != 0);
}
#[test]
fn test_timeline_semaphore_host_signal_and_wait() {
let Some((_inst, _physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let sem = match Semaphore::timeline(&device, 5) {
Ok(s) => s,
Err(e) => {
eprintln!("SKIP: timeline semaphores not supported: {e}");
return;
}
};
assert_eq!(sem.kind(), SemaphoreKind::Timeline);
assert_eq!(sem.current_value().unwrap(), 5);
sem.signal_value(10).unwrap();
assert_eq!(sem.current_value().unwrap(), 10);
sem.wait_value(10, 0).unwrap();
}
#[test]
fn test_timeline_semaphore_gpu_signal_then_host_wait() {
let Some((_inst, _physical, device, queue, queue_family)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let sem = match Semaphore::timeline(&device, 0) {
Ok(s) => s,
Err(e) => {
eprintln!("SKIP: timeline semaphores not supported: {e}");
return;
}
};
let pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd = pool.allocate_primary().unwrap();
{
let rec = cmd.begin().unwrap();
rec.end().unwrap();
}
queue
.submit_with_sync(
&[&cmd],
&[],
&[SignalSemaphore {
semaphore: &sem,
value: 1,
device_index: 0,
}],
None,
)
.unwrap();
sem.wait_value(1, u64::MAX).unwrap();
assert!(sem.current_value().unwrap() >= 1);
}
#[test]
fn test_timeline_semaphore_chained_dispatches() {
let Some((_inst, _physical, device, queue, queue_family)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let sem = match Semaphore::timeline(&device, 0) {
Ok(s) => s,
Err(e) => {
eprintln!("SKIP: timeline semaphores not supported: {e}");
return;
}
};
let pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd_a = pool.allocate_primary().unwrap();
{
let rec = cmd_a.begin().unwrap();
rec.end().unwrap();
}
let mut cmd_b = pool.allocate_primary().unwrap();
{
let rec = cmd_b.begin().unwrap();
rec.end().unwrap();
}
queue
.submit_with_sync(
&[&cmd_a],
&[],
&[SignalSemaphore {
semaphore: &sem,
value: 1,
device_index: 0,
}],
None,
)
.unwrap();
queue
.submit_with_sync(
&[&cmd_b],
&[WaitSemaphore {
semaphore: &sem,
value: 1,
dst_stage_mask: PipelineStage::TOP_OF_PIPE,
device_index: 0,
}],
&[SignalSemaphore {
semaphore: &sem,
value: 2,
device_index: 0,
}],
None,
)
.unwrap();
sem.wait_value(2, u64::MAX).unwrap();
assert!(sem.current_value().unwrap() >= 2);
}
#[test]
fn test_pipeline_cache_create_serialize_reuse() {
let Some((_inst, _physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let cache_a = PipelineCache::new(&device).unwrap();
let bytes = cache_a.data().unwrap();
println!("Pipeline cache (empty) -> {} bytes", bytes.len());
let cache_b = PipelineCache::with_data(&device, &bytes).unwrap();
assert!(cache_b.raw() != 0);
}
#[test]
fn test_specialization_constants_baked_into_pipeline() {
let Some((_inst, _physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let spv = std::fs::read(format!("{manifest_dir}/examples/shaders/square_buffer.spv")).unwrap();
let shader = ShaderModule::from_spirv_bytes(&device, &spv).unwrap();
let set_layout = DescriptorSetLayout::new(
&device,
&[DescriptorSetLayoutBinding {
binding: 0,
descriptor_type: DescriptorType::STORAGE_BUFFER,
descriptor_count: 1,
stage_flags: ShaderStageFlags::COMPUTE,
}],
)
.unwrap();
let layout = PipelineLayout::new(&device, &[&set_layout]).unwrap();
let specs = SpecializationConstants::new()
.add_u32(99, 1234)
.add_f32(100, 2.5);
let pipe =
ComputePipeline::with_specialization(&device, &layout, &shader, "main", &specs).unwrap();
assert!(pipe.raw() != 0);
}
#[test]
fn test_sync2_memory_barrier_when_supported() {
let Some((_inst, _physical, device, queue, queue_family)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd = pool.allocate_primary().unwrap();
let supported = {
let mut rec = cmd.begin().unwrap();
let s2 = rec.memory_barrier2(
PipelineStage2::COMPUTE_SHADER,
PipelineStage2::HOST,
AccessFlags2::SHADER_WRITE,
AccessFlags2::HOST_READ,
);
rec.end().unwrap();
s2
};
match supported {
Ok(()) => {
let fence = Fence::new(&device).unwrap();
queue.submit(&[&cmd], Some(&fence)).unwrap();
fence.wait(u64::MAX).unwrap();
}
Err(e) => {
eprintln!("SKIP: sync2 not supported: {e}");
}
}
}
#[test]
fn test_allocator_creation_and_statistics_start_zero() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let alloc = Allocator::new(&device, &physical).unwrap();
let stats = alloc.statistics();
assert_eq!(stats.allocation_bytes, 0);
assert_eq!(stats.allocation_count, 0);
assert_eq!(stats.block_bytes, 0);
assert_eq!(stats.block_count, 0);
}
#[test]
fn test_allocator_create_buffer_pool_path() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let allocator = Allocator::new(&device, &physical).unwrap();
let (buffer, allocation) = allocator
.create_buffer(
BufferCreateInfo {
size: 4096,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
usage: AllocationUsage::HostVisible,
..Default::default()
},
)
.unwrap();
assert!(allocation.size() >= 4096);
assert!(allocation.memory() != 0);
let stats = allocator.statistics();
assert_eq!(stats.allocation_count, 1);
assert!(stats.allocation_bytes >= 4096);
assert!(stats.block_count >= 1);
allocator.free(allocation);
drop(buffer);
let stats = allocator.statistics();
assert_eq!(stats.allocation_count, 0);
assert_eq!(stats.allocation_bytes, 0);
assert!(stats.block_count >= 1);
}
#[test]
fn test_allocator_many_buffers_share_one_block() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let allocator = Allocator::new(&device, &physical).unwrap();
let mut buffers = Vec::new();
let mut allocations = Vec::new();
let mut last_memory_handle = 0u64;
let mut shared_count = 0u32;
for _ in 0..32 {
let (b, a) = allocator
.create_buffer(
BufferCreateInfo {
size: 1024,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
usage: AllocationUsage::HostVisible,
..Default::default()
},
)
.unwrap();
if a.memory() == last_memory_handle {
shared_count += 1;
}
last_memory_handle = a.memory();
buffers.push(b);
allocations.push(a);
}
assert!(
shared_count >= 28,
"expected at least 28 shared, got {shared_count}"
);
let stats = allocator.statistics();
assert_eq!(stats.allocation_count, 32);
assert!(
stats.block_count <= 2,
"expected <=2 blocks for 32 small allocations, got {}",
stats.block_count
);
for a in allocations.drain(..) {
allocator.free(a);
}
drop(buffers);
}
#[test]
fn test_allocator_dedicated_for_huge_buffer() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let allocator = Allocator::new(&device, &physical).unwrap();
let (buffer, alloc) = allocator
.create_buffer(
BufferCreateInfo {
size: 4096,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
usage: AllocationUsage::HostVisible,
dedicated: true,
..Default::default()
},
)
.unwrap();
assert_eq!(alloc.offset(), 0);
let stats = allocator.statistics();
assert_eq!(stats.dedicated_allocation_count, 1);
allocator.free(alloc);
drop(buffer);
let stats = allocator.statistics();
assert_eq!(stats.dedicated_allocation_count, 0);
}
#[test]
fn test_allocator_per_allocation_device_mask_forces_dedicated() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let allocator = Allocator::new(&device, &physical).unwrap();
let mask = device.default_device_mask();
let stats_before = allocator.statistics();
let (buffer, alloc) = allocator
.create_buffer(
BufferCreateInfo {
size: 4096,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
usage: AllocationUsage::DeviceLocal,
device_mask: Some(mask),
..Default::default()
},
)
.unwrap();
let stats = allocator.statistics();
assert_eq!(
stats.dedicated_allocation_count,
stats_before.dedicated_allocation_count + 1,
"device_mask=Some(_) must force a dedicated allocation"
);
assert_eq!(alloc.offset(), 0);
allocator.free(alloc);
drop(buffer);
}
#[test]
fn test_allocator_device_mask_rejects_custom_pool() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let allocator = Allocator::new(&device, &physical).unwrap();
let mem_props = physical.memory_properties();
let mem_type = (0..mem_props.type_count())
.find(|&i| {
mem_props
.memory_type(i)
.property_flags()
.contains(vulkane::safe::MemoryPropertyFlags::HOST_VISIBLE)
})
.expect("expected at least one host-visible memory type");
let pool = allocator
.create_pool(vulkane::safe::PoolCreateInfo {
memory_type_index: mem_type,
strategy: vulkane::safe::AllocationStrategy::FreeList,
block_size: 1024 * 1024,
max_block_count: 0,
})
.unwrap();
let mask = device.default_device_mask();
let result = allocator.create_buffer(
BufferCreateInfo {
size: 4096,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
usage: AllocationUsage::HostVisible,
pool: Some(pool),
device_mask: Some(mask),
..Default::default()
},
);
match result {
Err(vulkane::safe::Error::InvalidArgument(_)) => {}
Ok(_) => panic!("expected InvalidArgument when combining device_mask with a pool"),
Err(e) => panic!("expected InvalidArgument, got {e:?}"),
}
allocator.destroy_pool(pool);
}
#[test]
fn test_allocator_create_image_2d_via_pool() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let allocator = Allocator::new(&device, &physical).unwrap();
let (image, alloc) = allocator
.create_image_2d(
Image2dCreateInfo {
format: Format::R32_UINT,
width: 32,
height: 32,
usage: ImageUsage::STORAGE,
},
AllocationCreateInfo {
usage: AllocationUsage::DeviceLocal,
..Default::default()
},
)
.unwrap();
assert!(alloc.size() >= 32 * 32 * 4);
let _view = ImageView::new_2d_color(&image).unwrap();
allocator.free(alloc);
drop(image);
}
#[test]
fn test_allocator_persistent_mapped_pointer() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let allocator = Allocator::new(&device, &physical).unwrap();
let (buffer, alloc) = allocator
.create_buffer(
BufferCreateInfo {
size: 256,
usage: BufferUsage::TRANSFER_DST,
},
AllocationCreateInfo {
usage: AllocationUsage::HostVisible,
mapped: true,
..Default::default()
},
)
.unwrap();
let ptr = alloc
.mapped_ptr()
.expect("HostVisible + mapped should give a pointer");
unsafe {
let bytes = std::slice::from_raw_parts_mut(ptr as *mut u8, alloc.size() as usize);
for (i, b) in bytes.iter_mut().enumerate() {
*b = (i & 0xFF) as u8;
}
let bytes = std::slice::from_raw_parts(ptr as *const u8, alloc.size() as usize);
for (i, &b) in bytes.iter().enumerate() {
assert_eq!(b, (i & 0xFF) as u8);
}
}
allocator.free(alloc);
drop(buffer);
}
#[test]
fn test_allocator_peak_bytes_tracks_high_watermark() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let allocator = Allocator::new(&device, &physical).unwrap();
let (b1, a1) = allocator
.create_buffer(
BufferCreateInfo {
size: 8 * 1024,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
usage: AllocationUsage::HostVisible,
..Default::default()
},
)
.unwrap();
let (b2, a2) = allocator
.create_buffer(
BufferCreateInfo {
size: 16 * 1024,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
usage: AllocationUsage::HostVisible,
..Default::default()
},
)
.unwrap();
let peak_after_two = allocator.statistics().peak_allocation_bytes;
assert!(peak_after_two >= 24 * 1024);
allocator.free(a2);
drop(b2);
let peak_after_one_freed = allocator.statistics().peak_allocation_bytes;
assert_eq!(peak_after_two, peak_after_one_freed);
allocator.free(a1);
drop(b1);
}
#[test]
fn test_memory_budget_query_succeeds_or_skips() {
let Some((_inst, physical, _device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let Some(budget) = physical.memory_budget() else {
eprintln!("SKIP: vkGetPhysicalDeviceMemoryProperties2 not loaded");
return;
};
assert!(budget.heap_count > 0);
println!(
"Memory heaps: {}; total budget reported: {} bytes (0 if extension not enabled)",
budget.heap_count,
budget.total_budget()
);
}
fn try_init_with_features(
features: DeviceFeatures,
) -> Option<(
Instance,
vulkane::safe::PhysicalDevice,
vulkane::safe::Device,
vulkane::safe::Queue,
u32,
)> {
let instance = Instance::new(InstanceCreateInfo::default()).ok()?;
let physical = instance
.enumerate_physical_devices()
.ok()?
.into_iter()
.next()?;
let queue_family = physical.find_queue_family(QueueFlags::COMPUTE)?;
let device = physical
.create_device(DeviceCreateInfo {
queue_create_infos: &[QueueCreateInfo {
queue_family_index: queue_family,
queue_priorities: vec![1.0],
}],
enabled_features: Some(&features),
..Default::default()
})
.ok()?;
let queue = device.get_queue(queue_family, 0);
Some((instance, physical, device, queue, queue_family))
}
#[test]
fn test_device_features_default_creates_normally() {
let features = DeviceFeatures::default();
let Some((_inst, _physical, device, _q, _qf)) = try_init_with_features(features) else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
assert!(!device.raw().is_null());
}
#[test]
fn test_device_features_buffer_device_address_round_trip() {
let features = DeviceFeatures::default().with_buffer_device_address();
let Some((_inst, physical, device, _q, _qf)) = try_init_with_features(features) else {
eprintln!("SKIP: bufferDeviceAddress not supported by device");
return;
};
let buffer = match Buffer::new(
&device,
BufferCreateInfo {
size: 4096,
usage: BufferUsage::STORAGE_BUFFER | BufferUsage::SHADER_DEVICE_ADDRESS,
},
) {
Ok(b) => b,
Err(e) => {
eprintln!("SKIP: SHADER_DEVICE_ADDRESS buffer creation failed: {e}");
return;
}
};
let req = buffer.memory_requirements();
let mt = physical
.find_memory_type(req.memory_type_bits, MemoryPropertyFlags::DEVICE_LOCAL)
.or_else(|| {
physical.find_memory_type(req.memory_type_bits, MemoryPropertyFlags::HOST_VISIBLE)
})
.unwrap();
let memory = match DeviceMemory::allocate(&device, req.size, mt) {
Ok(m) => m,
Err(e) => {
eprintln!("SKIP: vkAllocateMemory rejected the device-address buffer: {e}");
return;
}
};
if let Err(e) = buffer.bind_memory(&memory, 0) {
eprintln!("SKIP: vkBindBufferMemory rejected the device-address buffer: {e}");
return;
}
match buffer.device_address() {
Ok(addr) => {
assert!(
addr != 0,
"device_address() returned zero on a feature-enabled device"
);
println!("Buffer device address: 0x{addr:x}");
}
Err(e) => {
eprintln!("SKIP: vkGetBufferDeviceAddress returned: {e}");
}
}
}
#[test]
fn test_device_features_timeline_semaphore_round_trip() {
let features = DeviceFeatures::default().with_timeline_semaphore();
let Some((_inst, _physical, device, queue, queue_family)) = try_init_with_features(features)
else {
eprintln!("SKIP: timelineSemaphore not supported");
return;
};
let sem = match Semaphore::timeline(&device, 0) {
Ok(s) => s,
Err(e) => {
eprintln!("SKIP: timeline semaphore creation failed: {e}");
return;
}
};
assert_eq!(sem.kind(), SemaphoreKind::Timeline);
assert_eq!(sem.current_value().unwrap(), 0);
let pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd = pool.allocate_primary().unwrap();
{
let rec = cmd.begin().unwrap();
rec.end().unwrap();
}
queue
.submit_with_sync(
&[&cmd],
&[],
&[SignalSemaphore {
semaphore: &sem,
value: 42,
device_index: 0,
}],
None,
)
.unwrap();
sem.wait_value(42, u64::MAX).unwrap();
assert!(sem.current_value().unwrap() >= 42);
println!(
"Timeline semaphore reached value {}",
sem.current_value().unwrap()
);
}
#[test]
fn test_device_features_synchronization2_round_trip() {
let features = DeviceFeatures::default().with_synchronization2();
let Some((_inst, _physical, device, queue, queue_family)) = try_init_with_features(features)
else {
eprintln!("SKIP: synchronization2 not supported");
return;
};
let pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd = pool.allocate_primary().unwrap();
let supported = {
let mut rec = cmd.begin().unwrap();
let res = rec.memory_barrier2(
PipelineStage2::COMPUTE_SHADER,
PipelineStage2::HOST,
AccessFlags2::SHADER_WRITE,
AccessFlags2::HOST_READ,
);
rec.end().unwrap();
res
};
match supported {
Ok(()) => {
let fence = Fence::new(&device).unwrap();
queue.submit(&[&cmd], Some(&fence)).unwrap();
fence.wait(u64::MAX).unwrap();
}
Err(e) => {
eprintln!("SKIP: vkCmdPipelineBarrier2 not loaded: {e}");
}
}
}
#[test]
fn test_supported_features_query_succeeds() {
let Some((_inst, physical, _device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let _features = physical.supported_features();
}
#[test]
fn test_allocator_custom_freelist_pool_round_trip() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let allocator = Allocator::new(&device, &physical).unwrap();
let dummy_buf = Buffer::new(
&device,
BufferCreateInfo {
size: 256,
usage: BufferUsage::STORAGE_BUFFER,
},
)
.unwrap();
let req = dummy_buf.memory_requirements();
drop(dummy_buf);
let mt = physical
.find_memory_type(
req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let pool = allocator
.create_pool(PoolCreateInfo {
memory_type_index: mt,
strategy: AllocationStrategy::FreeList,
block_size: 1024 * 1024, max_block_count: 0,
})
.unwrap();
let mut allocations = Vec::new();
let mut buffers = Vec::new();
for _ in 0..8 {
let (b, a) = allocator
.create_buffer(
BufferCreateInfo {
size: 4096,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
pool: Some(pool),
..Default::default()
},
)
.unwrap();
buffers.push(b);
allocations.push(a);
}
let pool_stats = allocator.pool_statistics(pool).unwrap();
assert_eq!(pool_stats.allocation_count, 8);
assert!(pool_stats.allocation_bytes >= 8 * 4096);
assert_eq!(pool_stats.block_count, 1);
for a in allocations.drain(..) {
allocator.free(a);
}
drop(buffers);
let pool_stats = allocator.pool_statistics(pool).unwrap();
assert_eq!(pool_stats.allocation_count, 0);
allocator.destroy_pool(pool);
assert!(allocator.pool_statistics(pool).is_none());
}
#[test]
fn test_allocator_linear_pool_supports_reset() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let allocator = Allocator::new(&device, &physical).unwrap();
let dummy_buf = Buffer::new(
&device,
BufferCreateInfo {
size: 256,
usage: BufferUsage::STORAGE_BUFFER,
},
)
.unwrap();
let req = dummy_buf.memory_requirements();
drop(dummy_buf);
let mt = physical
.find_memory_type(
req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let pool = allocator
.create_pool(PoolCreateInfo {
memory_type_index: mt,
strategy: AllocationStrategy::Linear,
block_size: 64 * 1024, max_block_count: 0,
})
.unwrap();
let mut buffers = Vec::new();
let mut allocations = Vec::new();
for _ in 0..10 {
let (b, a) = allocator
.create_buffer(
BufferCreateInfo {
size: 1024,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
pool: Some(pool),
..Default::default()
},
)
.unwrap();
buffers.push(b);
allocations.push(a);
}
let stats_before = allocator.pool_statistics(pool).unwrap();
assert_eq!(stats_before.allocation_count, 10);
drop(buffers);
allocator.reset_pool(pool);
let stats_after = allocator.pool_statistics(pool).unwrap();
assert_eq!(stats_after.allocation_count, 0);
assert_eq!(stats_after.allocation_bytes, 0);
let (b2, a2) = allocator
.create_buffer(
BufferCreateInfo {
size: 2048,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
pool: Some(pool),
..Default::default()
},
)
.unwrap();
let stats_post = allocator.pool_statistics(pool).unwrap();
assert_eq!(stats_post.allocation_count, 1);
drop(b2);
drop(a2);
allocator.destroy_pool(pool);
}
#[test]
fn test_allocator_linear_pool_full_returns_error() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let allocator = Allocator::new(&device, &physical).unwrap();
let dummy_buf = Buffer::new(
&device,
BufferCreateInfo {
size: 256,
usage: BufferUsage::STORAGE_BUFFER,
},
)
.unwrap();
let req = dummy_buf.memory_requirements();
drop(dummy_buf);
let mt = physical
.find_memory_type(
req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let pool = allocator
.create_pool(PoolCreateInfo {
memory_type_index: mt,
strategy: AllocationStrategy::Linear,
block_size: 4096,
max_block_count: 1,
})
.unwrap();
let r1 = allocator.create_buffer(
BufferCreateInfo {
size: 2048,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
pool: Some(pool),
..Default::default()
},
);
assert!(r1.is_ok());
let _r2 = allocator.create_buffer(
BufferCreateInfo {
size: 2048,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
pool: Some(pool),
..Default::default()
},
);
let r3 = allocator.create_buffer(
BufferCreateInfo {
size: 4096,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
pool: Some(pool),
..Default::default()
},
);
assert!(
r3.is_err(),
"expected linear pool to refuse over-budget allocation"
);
allocator.destroy_pool(pool);
}
#[test]
fn test_allocator_defragmentation_compacts_fragmented_pool() {
let Some((_inst, physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let allocator = Allocator::new(&device, &physical).unwrap();
let dummy_buf = Buffer::new(
&device,
BufferCreateInfo {
size: 256,
usage: BufferUsage::STORAGE_BUFFER,
},
)
.unwrap();
let req = dummy_buf.memory_requirements();
drop(dummy_buf);
let mt = physical
.find_memory_type(
req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.unwrap();
let pool = allocator
.create_pool(PoolCreateInfo {
memory_type_index: mt,
strategy: AllocationStrategy::FreeList,
block_size: 256 * 1024, max_block_count: 1,
})
.unwrap();
let mut buffers: Vec<(Buffer, vulkane::safe::Allocation)> = Vec::new();
for i in 0..8 {
let (b, a) = allocator
.create_buffer(
BufferCreateInfo {
size: 4096 * (i as u64 + 1),
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
pool: Some(pool),
user_data: 100 + i as u64,
..Default::default()
},
)
.unwrap();
buffers.push((b, a));
}
let mut survivors: Vec<vulkane::safe::Allocation> = Vec::new();
for (i, (buffer, alloc)) in buffers.into_iter().enumerate() {
if i % 2 == 1 {
drop(buffer);
allocator.free(alloc);
} else {
drop(buffer);
survivors.push(alloc);
}
}
let pre: Vec<(u64, u64, u64)> = survivors
.iter()
.map(|a| (a.id(), a.size(), a.user_data()))
.collect();
let plan = allocator.build_defragmentation_plan(pool);
assert!(
!plan.total_layout().is_empty(),
"defrag plan should include all 4 surviving allocations"
);
for m in &plan.moves {
assert!(
survivors.iter().any(|a| a.user_data() == m.user_data),
"move user_data {} should match a surviving allocation",
m.user_data
);
}
allocator.apply_defragmentation_plan(plan);
let mut last_offset: u64 = 0;
for (i, alloc) in survivors.iter().enumerate() {
assert_eq!(alloc.id(), pre[i].0, "id should be stable across defrag");
assert_eq!(
alloc.size(),
pre[i].1,
"size should be stable across defrag"
);
assert_eq!(
alloc.user_data(),
pre[i].2,
"user_data should be stable across defrag"
);
let off = alloc.offset();
assert!(
off >= last_offset,
"post-defrag offsets should be monotonically increasing (got {off} after {last_offset})"
);
last_offset = off + alloc.size();
}
let (_b, post_alloc) = allocator
.create_buffer(
BufferCreateInfo {
size: 64 * 1024,
usage: BufferUsage::STORAGE_BUFFER,
},
AllocationCreateInfo {
pool: Some(pool),
..Default::default()
},
)
.expect("post-defrag allocation should succeed");
assert!(post_alloc.size() >= 64 * 1024);
drop(survivors);
drop(post_alloc);
allocator.destroy_pool(pool);
}
#[test]
fn test_enumerate_physical_device_groups() {
let instance = match Instance::new(InstanceCreateInfo::default()) {
Ok(i) => i,
Err(e) => {
eprintln!("SKIP: cannot create Vulkan instance: {e}");
return;
}
};
let groups = match instance.enumerate_physical_device_groups() {
Ok(g) => g,
Err(e) => {
eprintln!("SKIP: enumerate_physical_device_groups returned: {e}");
return;
}
};
println!("Found {} physical device group(s)", groups.len());
for (i, group) in groups.iter().enumerate() {
assert!(group.count() >= 1, "every group has at least one device");
println!(
" group {i}: {} device(s), subset_allocation={}",
group.count(),
group.supports_subset_allocation()
);
for pd in group.physical_devices() {
assert!(!pd.properties().device_name().is_empty());
}
}
}
#[test]
fn test_device_singleton_group_unification() {
let Some((_inst, _physical, device, _q, _qf)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
assert_eq!(
device.physical_device_count(),
1,
"single-physical-device path should produce a length-1 group"
);
let handles = device.physical_device_handles();
assert_eq!(handles.len(), 1);
assert_eq!(device.default_device_mask(), 0b1);
}
#[test]
fn test_submit_with_groups_default_mask_round_trip() {
let Some((_inst, _physical, device, queue, queue_family)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd = pool.allocate_primary().unwrap();
{
let rec = cmd.begin().unwrap();
rec.end().unwrap();
}
let fence = Fence::new(&device).unwrap();
let mask = device.default_device_mask();
queue
.submit_with_groups(&[&cmd], Some(&[mask]), &[], &[], Some(&fence))
.unwrap();
fence.wait(u64::MAX).unwrap();
}
#[test]
fn test_submit_with_groups_rejects_mask_length_mismatch() {
let Some((_inst, _physical, device, queue, queue_family)) = try_init_compute() else {
eprintln!("SKIP: no Vulkan ICD");
return;
};
let pool = CommandPool::new(&device, queue_family).unwrap();
let mut cmd = pool.allocate_primary().unwrap();
{
let rec = cmd.begin().unwrap();
rec.end().unwrap();
}
let result = queue.submit_with_groups(&[&cmd], Some(&[1u32, 2u32]), &[], &[], None);
match result {
Err(vulkane::safe::Error::InvalidArgument(_)) => {}
Ok(_) => panic!("expected InvalidArgument when mask len differs from CB count"),
Err(e) => panic!("expected InvalidArgument, got {e:?}"),
}
}
#[test]
fn test_device_create_via_physical_device_group() {
let instance = match Instance::new(InstanceCreateInfo::default()) {
Ok(i) => i,
Err(_) => {
eprintln!("SKIP: no Vulkan ICD");
return;
}
};
let Some(group) = instance
.enumerate_physical_device_groups()
.ok()
.and_then(|gs| gs.into_iter().next())
else {
eprintln!("SKIP: no physical device groups");
return;
};
let Some(physical) = group.physical_devices().first() else {
eprintln!("SKIP: empty physical device group");
return;
};
let queue_family = physical.find_queue_family(QueueFlags::TRANSFER).unwrap();
let device = group
.create_device(DeviceCreateInfo {
queue_create_infos: &[QueueCreateInfo {
queue_family_index: queue_family,
queue_priorities: vec![1.0],
}],
..Default::default()
})
.unwrap();
assert_eq!(device.physical_device_count(), group.count());
assert!(device.default_device_mask() != 0);
}
#[test]
fn test_defragmentation_move_struct_round_trip() {
let m = DefragmentationMove {
allocation_id: 42,
user_data: 0xDEAD_BEEF,
size: 1024,
src_memory: 0x1000,
src_offset: 0,
dst_memory: 0x2000,
dst_offset: 256,
};
assert_eq!(m.allocation_id, 42);
assert_eq!(m.user_data, 0xDEAD_BEEF);
assert_eq!(m.size, 1024);
let _plan = DefragmentationPlan::default();
}