memkit_gpu/
lib.rs

1//! # memkit-gpu
2//!
3//! Backend-agnostic GPU memory management for memkit.
4//!
5//! ## Features
6//!
7//! - **Backend-agnostic**: Write code once, run on Vulkan, Metal, DX12
8//! - **Unified API**: Same types work across all backends
9//! - **Zero-cost abstraction**: Backend selection at compile time
10//!
11//! ## Backends
12//!
13//! - `vulkan` - Vulkan backend via ash + gpu-allocator
14//! - `metal` - Metal backend (planned)
15//! - `dx12` - DirectX 12 backend (planned)
16//! - `dummy` - No-op backend for testing
17//!
18//! ## Example
19//!
20//! ```rust
21//! use memkit_gpu::{MkGpu, MkMemoryType, MkBufferUsage, DummyBackend};
22//!
23//! // Create with dummy backend for testing
24//! let gpu = MkGpu::new(DummyBackend::new());
25//!
26//! // Create buffers
27//! let staging = gpu.create_staging_buffer(1024).unwrap();
28//! let device = gpu.create_device_buffer(1024, MkBufferUsage::VERTEX).unwrap();
29//! ```
30
31pub mod backend;
32pub mod buffer;
33pub mod memory;
34pub mod pool;
35pub mod staging;
36pub mod transfer;
37pub mod sync;
38
39// Re-exports
40pub use backend::{MkGpuBackend, MkGpuCapabilities, DummyBackend, DummyBackendConfig, DummyError};
41pub use buffer::{MkDeviceBuffer, MkBufferUsage};
42pub use memory::MkMemoryType;
43pub use staging::MkStagingBuffer;
44pub use transfer::{MkGpuTransfer, MkTransferBuilder};
45pub use sync::MkFence;
46pub use pool::MkGpuPool;
47
48#[cfg(feature = "vulkan")]
49pub mod vulkan;
50
51#[cfg(feature = "vulkan")]
52pub use vulkan::{VulkanBackend, VulkanConfig};
53
54/// Main GPU allocator handle, generic over backend.
55pub struct MkGpu<B: MkGpuBackend> {
56    backend: B,
57    transfer_id: std::sync::atomic::AtomicU64,
58}
59
60impl<B: MkGpuBackend> MkGpu<B> {
61    /// Create a new GPU allocator with the given backend.
62    pub fn new(backend: B) -> Self {
63        Self {
64            backend,
65            transfer_id: std::sync::atomic::AtomicU64::new(0),
66        }
67    }
68
69    /// Get a reference to the backend.
70    pub fn backend(&self) -> &B {
71        &self.backend
72    }
73
74    /// Create a device-local buffer.
75    pub fn create_device_buffer(&self, size: usize, usage: MkBufferUsage) -> Result<MkDeviceBuffer<B>, B::Error> {
76        let handle = self.backend.create_buffer(size, usage, MkMemoryType::DeviceLocal)?;
77        Ok(MkDeviceBuffer::new(handle, size, usage))
78    }
79
80    /// Create a staging buffer for CPU-GPU transfers.
81    pub fn create_staging_buffer(&self, size: usize) -> Result<MkStagingBuffer<B>, B::Error> {
82        let usage = MkBufferUsage::TRANSFER_SRC;
83        let handle = self.backend.create_buffer(size, usage, MkMemoryType::HostVisible)?;
84        Ok(MkStagingBuffer::new(handle, size))
85    }
86
87    /// Create a staging buffer and write data to it.
88    pub fn staging_buffer_with_data<T: Copy>(&self, data: &[T]) -> Result<MkStagingBuffer<B>, B::Error> {
89        let size = std::mem::size_of_val(data);
90        let mut staging = self.create_staging_buffer(size)?;
91        
92        // Map and write
93        if let Some(ptr) = self.backend.map(&staging.handle()) {
94            unsafe {
95                std::ptr::copy_nonoverlapping(
96                    data.as_ptr() as *const u8,
97                    ptr,
98                    size,
99                );
100            }
101            self.backend.unmap(&staging.handle());
102        }
103        
104        Ok(staging)
105    }
106
107    /// Transfer data from staging to device buffer.
108    pub fn transfer(&self, src: &MkStagingBuffer<B>, dst: &MkDeviceBuffer<B>) -> Result<MkGpuTransfer, B::Error> {
109        let size = src.size().min(dst.size());
110        self.backend.copy_buffer(&src.handle(), &dst.handle(), size)?;
111        
112        let id = self.transfer_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
113        Ok(MkGpuTransfer::new(id))
114    }
115
116    /// Create a batch transfer builder.
117    pub fn batch_transfer(&self) -> transfer::MkBatchTransfer<'_, B> {
118        transfer::MkBatchTransfer::new(self)
119    }
120
121    /// Wait for all pending operations to complete.
122    pub fn wait_idle(&self) -> Result<(), B::Error> {
123        self.backend.wait_idle()
124    }
125}