use vulkane::safe::{
ApiVersion, Buffer, BufferCreateInfo, BufferUsage, CommandPool, DeviceCreateInfo, DeviceMemory,
Fence, Instance, InstanceCreateInfo, MemoryPropertyFlags, QueueCreateInfo, QueueFlags,
};
fn main() -> Result<(), Box<dyn std::error::Error>> {
let instance = match Instance::new(InstanceCreateInfo {
application_name: Some("vulkane fill_buffer example"),
api_version: ApiVersion::V1_0,
..Default::default()
}) {
Ok(i) => i,
Err(e) => {
eprintln!("SKIP: could not create Vulkan instance: {e}");
eprintln!("(Install a Vulkan driver such as Lavapipe to run this example.)");
return Ok(());
}
};
println!("[OK] Created VkInstance");
let physical_devices = instance.enumerate_physical_devices()?;
let physical = physical_devices
.into_iter()
.find(|pd| pd.find_queue_family(QueueFlags::TRANSFER).is_some())
.ok_or("No physical device with a transfer-capable queue family")?;
let props = physical.properties();
println!(
"[OK] Using GPU: {} (Vulkan {})",
props.device_name(),
props.api_version()
);
let queue_family_index = physical
.find_queue_family(QueueFlags::TRANSFER)
.expect("transfer-capable queue family was found above");
let device = physical.create_device(DeviceCreateInfo {
queue_create_infos: &[QueueCreateInfo {
queue_family_index,
queue_priorities: vec![1.0],
}],
..Default::default()
})?;
let queue = device.get_queue(queue_family_index, 0);
println!("[OK] Created VkDevice and got transfer queue");
const BUFFER_SIZE: u64 = 1024;
let buffer = Buffer::new(
&device,
BufferCreateInfo {
size: BUFFER_SIZE,
usage: BufferUsage::TRANSFER_DST,
},
)?;
println!("[OK] Created VkBuffer ({} bytes)", BUFFER_SIZE);
let req = buffer.memory_requirements();
let memory_type_index = physical
.find_memory_type(
req.memory_type_bits,
MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT,
)
.ok_or("No host-visible+coherent memory type available")?;
let mut memory = DeviceMemory::allocate(&device, req.size, memory_type_index)?;
buffer.bind_memory(&memory, 0)?;
println!(
"[OK] Allocated and bound {} bytes of host-visible memory",
req.size
);
{
let mut mapped = memory.map()?;
let slice = mapped.as_slice_mut();
slice.fill(0x11);
assert!(slice.iter().all(|&b| b == 0x11));
println!("[OK] Host-wrote 0x11 to all bytes of the buffer");
}
let pool = CommandPool::new(&device, queue_family_index)?;
let mut cmd = pool.allocate_primary()?;
{
let mut recording = cmd.begin()?;
recording.fill_buffer(&buffer, 0, BUFFER_SIZE, 0xDEADBEEF);
recording.end()?;
}
println!("[OK] Recorded vkCmdFillBuffer with pattern 0xDEADBEEF");
let fence = Fence::new(&device)?;
queue.submit(&[&cmd], Some(&fence))?;
fence.wait(u64::MAX)?;
println!("[OK] GPU finished the fill operation");
{
let mut mapped = memory.map()?;
let slice = mapped.as_slice_mut();
let expected: [u8; 4] = 0xDEADBEEFu32.to_ne_bytes();
for chunk in slice.chunks_exact(4) {
assert_eq!(chunk, expected, "GPU did not write the expected pattern");
}
println!("[OK] Verified all bytes match 0xDEADBEEF");
}
device.wait_idle()?;
println!();
println!("=== fill_buffer example PASSED ===");
println!("(All resources will now be dropped via RAII — no manual vkDestroy* calls.)");
Ok(())
}