1pub mod backend;
32pub mod buffer;
33pub mod memory;
34pub mod pool;
35pub mod staging;
36pub mod transfer;
37pub mod sync;
38
39pub use backend::{MkGpuBackend, MkGpuCapabilities, DummyBackend, DummyBackendConfig, DummyError};
41pub use buffer::{MkDeviceBuffer, MkBufferUsage};
42pub use memory::MkMemoryType;
43pub use staging::MkStagingBuffer;
44pub use transfer::{MkGpuTransfer, MkTransferBuilder};
45pub use sync::MkFence;
46pub use pool::MkGpuPool;
47
48#[cfg(feature = "vulkan")]
49pub mod vulkan;
50
51#[cfg(feature = "vulkan")]
52pub use vulkan::{VulkanBackend, VulkanConfig};
53
54pub struct MkGpu<B: MkGpuBackend> {
56 backend: B,
57 transfer_id: std::sync::atomic::AtomicU64,
58}
59
60impl<B: MkGpuBackend> MkGpu<B> {
61 pub fn new(backend: B) -> Self {
63 Self {
64 backend,
65 transfer_id: std::sync::atomic::AtomicU64::new(0),
66 }
67 }
68
69 pub fn backend(&self) -> &B {
71 &self.backend
72 }
73
74 pub fn create_device_buffer(&self, size: usize, usage: MkBufferUsage) -> Result<MkDeviceBuffer<B>, B::Error> {
76 let handle = self.backend.create_buffer(size, usage, MkMemoryType::DeviceLocal)?;
77 Ok(MkDeviceBuffer::new(handle, size, usage))
78 }
79
80 pub fn create_staging_buffer(&self, size: usize) -> Result<MkStagingBuffer<B>, B::Error> {
82 let usage = MkBufferUsage::TRANSFER_SRC;
83 let handle = self.backend.create_buffer(size, usage, MkMemoryType::HostVisible)?;
84 Ok(MkStagingBuffer::new(handle, size))
85 }
86
87 pub fn staging_buffer_with_data<T: Copy>(&self, data: &[T]) -> Result<MkStagingBuffer<B>, B::Error> {
89 let size = std::mem::size_of_val(data);
90 let mut staging = self.create_staging_buffer(size)?;
91
92 if let Some(ptr) = self.backend.map(&staging.handle()) {
94 unsafe {
95 std::ptr::copy_nonoverlapping(
96 data.as_ptr() as *const u8,
97 ptr,
98 size,
99 );
100 }
101 self.backend.unmap(&staging.handle());
102 }
103
104 Ok(staging)
105 }
106
107 pub fn transfer(&self, src: &MkStagingBuffer<B>, dst: &MkDeviceBuffer<B>) -> Result<MkGpuTransfer, B::Error> {
109 let size = src.size().min(dst.size());
110 self.backend.copy_buffer(&src.handle(), &dst.handle(), size)?;
111
112 let id = self.transfer_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
113 Ok(MkGpuTransfer::new(id))
114 }
115
116 pub fn batch_transfer(&self) -> transfer::MkBatchTransfer<'_, B> {
118 transfer::MkBatchTransfer::new(self)
119 }
120
121 pub fn wait_idle(&self) -> Result<(), B::Error> {
123 self.backend.wait_idle()
124 }
125}