memkit-gpu 0.2.0-beta.1

Backend-agnostic GPU memory management for memkit
//! # memkit-gpu
//!
//! Backend-agnostic GPU memory management for memkit.
//!
//! ## Features
//!
//! - **Backend-agnostic**: Write code once, run on Vulkan, Metal, DX12
//! - **Unified API**: Same types work across all backends
//! - **Zero-cost abstraction**: Backend selection at compile time
//!
//! ## Backends
//!
//! - `vulkan` - Vulkan backend via ash + gpu-allocator
//! - `metal` - Metal backend (planned)
//! - `dx12` - DirectX 12 backend (planned)
//! - `dummy` - No-op backend for testing
//!
//! ## Example
//!
//! ```rust
//! use memkit_gpu::{MkGpu, MkMemoryType, MkBufferUsage, DummyBackend};
//!
//! // Create with dummy backend for testing
//! let gpu = MkGpu::new(DummyBackend::new());
//!
//! // Create buffers
//! let staging = gpu.create_staging_buffer(1024).unwrap();
//! let device = gpu.create_device_buffer(1024, MkBufferUsage::VERTEX).unwrap();
//! ```

pub mod backend;
pub mod buffer;
pub mod memory;
pub mod pool;
pub mod staging;
pub mod transfer;
pub mod sync;

// Re-exports
pub use backend::{MkGpuBackend, MkGpuCapabilities, DummyBackend, DummyBackendConfig, DummyError};
pub use buffer::{MkDeviceBuffer, MkBufferUsage};
pub use memory::MkMemoryType;
pub use staging::MkStagingBuffer;
pub use transfer::{MkGpuTransfer, MkTransferBuilder};
pub use sync::MkFence;
pub use pool::MkGpuPool;

#[cfg(feature = "vulkan")]
pub mod vulkan;

#[cfg(feature = "vulkan")]
pub use vulkan::{VulkanBackend, VulkanConfig};

/// Main GPU allocator handle, generic over backend.
pub struct MkGpu<B: MkGpuBackend> {
    backend: B,
    transfer_id: std::sync::atomic::AtomicU64,
}

impl<B: MkGpuBackend> MkGpu<B> {
    /// Create a new GPU allocator with the given backend.
    pub fn new(backend: B) -> Self {
        Self {
            backend,
            transfer_id: std::sync::atomic::AtomicU64::new(0),
        }
    }

    /// Get a reference to the backend.
    pub fn backend(&self) -> &B {
        &self.backend
    }

    /// Create a device-local buffer.
    pub fn create_device_buffer(&self, size: usize, usage: MkBufferUsage) -> Result<MkDeviceBuffer<B>, B::Error> {
        let handle = self.backend.create_buffer(size, usage, MkMemoryType::DeviceLocal)?;
        Ok(MkDeviceBuffer::new(handle, size, usage))
    }

    /// Create a staging buffer for CPU-GPU transfers.
    pub fn create_staging_buffer(&self, size: usize) -> Result<MkStagingBuffer<B>, B::Error> {
        let usage = MkBufferUsage::TRANSFER_SRC;
        let handle = self.backend.create_buffer(size, usage, MkMemoryType::HostVisible)?;
        Ok(MkStagingBuffer::new(handle, size))
    }

    /// Create a staging buffer and write data to it.
    pub fn staging_buffer_with_data<T: Copy>(&self, data: &[T]) -> Result<MkStagingBuffer<B>, B::Error> {
        let size = std::mem::size_of_val(data);
        let mut staging = self.create_staging_buffer(size)?;
        
        // Map and write
        if let Some(ptr) = self.backend.map(&staging.handle()) {
            unsafe {
                std::ptr::copy_nonoverlapping(
                    data.as_ptr() as *const u8,
                    ptr,
                    size,
                );
            }
            self.backend.unmap(&staging.handle());
        }
        
        Ok(staging)
    }

    /// Transfer data from staging to device buffer.
    pub fn transfer(&self, src: &MkStagingBuffer<B>, dst: &MkDeviceBuffer<B>) -> Result<MkGpuTransfer, B::Error> {
        let size = src.size().min(dst.size());
        self.backend.copy_buffer(&src.handle(), &dst.handle(), size)?;
        
        let id = self.transfer_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
        Ok(MkGpuTransfer::new(id))
    }

    /// Create a batch transfer builder.
    pub fn batch_transfer(&self) -> transfer::MkBatchTransfer<'_, B> {
        transfer::MkBatchTransfer::new(self)
    }

    /// Wait for all pending operations to complete.
    pub fn wait_idle(&self) -> Result<(), B::Error> {
        self.backend.wait_idle()
    }
}