memkit-gpu 0.2.0-beta.1

Backend-agnostic GPU memory management for memkit
use memkit_gpu::{MkGpu, MkBufferUsage, DummyBackend, DummyBackendConfig};

#[test]
fn test_gpu_creation() {
    let config = DummyBackendConfig::default();
    let backend = DummyBackend::new();
    let _gpu = MkGpu::new(backend);
}

#[test]
fn test_buffer_creation() {
    let gpu = MkGpu::new(DummyBackend::new());
    
    let buffer = gpu.create_device_buffer(1024, MkBufferUsage::VERTEX).unwrap();
    assert_eq!(buffer.size(), 1024);
    assert_eq!(buffer.usage(), MkBufferUsage::VERTEX);
}

#[test]
fn test_staging_and_transfer() {
    let gpu = MkGpu::new(DummyBackend::new());
    
    // Create data
    let data = [1.0f32, 2.0, 3.0, 4.0];
    
    // Create staging buffer with data
    let staging = gpu.staging_buffer_with_data(&data).unwrap();
    assert_eq!(staging.size(), 16);
    
    // Create device buffer
    let device = gpu.create_device_buffer(16, MkBufferUsage::TRANSFER_DST | MkBufferUsage::VERTEX).unwrap();
    
    // Transfer
    let transfer = gpu.transfer(&staging, &device).unwrap();
    assert_eq!(transfer.id(), 0);
    
    // Wait for completion
    gpu.wait_idle().unwrap();
}

#[test]
fn test_batch_transfer() {
    let gpu = MkGpu::new(DummyBackend::new());
    
    // Create pairs
    let src1 = gpu.create_staging_buffer(100).unwrap();
    let dst1 = gpu.create_device_buffer(100, MkBufferUsage::TRANSFER_DST).unwrap();
    
    let src2 = gpu.create_staging_buffer(200).unwrap();
    let dst2 = gpu.create_device_buffer(200, MkBufferUsage::TRANSFER_DST).unwrap();
    
    // Batch (builder pattern consumes self)
    let batch = gpu.batch_transfer()
        .copy(&src1, &dst1)
        .copy(&src2, &dst2);
    
    batch.submit().unwrap();
    
    gpu.wait_idle().unwrap();
}