use astrelis_core::profiling::profile_function;
use crate::GraphicsContext;
use std::sync::Arc;
const RING_BUFFER_FRAMES: usize = 3;
pub struct RingBufferAllocation {
buffer: Arc<wgpu::Buffer>,
offset: u64,
size: u64,
}
impl RingBufferAllocation {
pub fn buffer(&self) -> &wgpu::Buffer {
&self.buffer
}
pub fn offset(&self) -> u64 {
self.offset
}
pub fn size(&self) -> u64 {
self.size
}
pub fn write(&self, queue: &wgpu::Queue, data: &[u8]) {
assert!(
data.len() as u64 <= self.size,
"Data size {} exceeds allocation size {}",
data.len(),
self.size
);
queue.write_buffer(&self.buffer, self.offset, data);
}
pub fn as_binding(&self) -> wgpu::BindingResource<'_> {
wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: &self.buffer,
offset: self.offset,
size: Some(std::num::NonZeroU64::new(self.size).unwrap()),
})
}
}
pub struct RingBuffer {
buffer: Arc<wgpu::Buffer>,
size: u64,
offset: u64,
frame: u64,
}
impl RingBuffer {
pub fn new(context: Arc<GraphicsContext>, size: u64, usage: wgpu::BufferUsages) -> Self {
let total_size = size * RING_BUFFER_FRAMES as u64;
let buffer = context.device().create_buffer(&wgpu::BufferDescriptor {
label: Some("Ring Buffer"),
size: total_size,
usage: usage | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
Self {
buffer: Arc::new(buffer),
size: total_size,
offset: 0,
frame: 0,
}
}
pub fn allocate(&mut self, size: u64, alignment: u64) -> Option<RingBufferAllocation> {
profile_function!();
let aligned_offset = if !self.offset.is_multiple_of(alignment) {
self.offset + (alignment - (self.offset % alignment))
} else {
self.offset
};
let frame_size = self.size / RING_BUFFER_FRAMES as u64;
let frame_start = (self.frame % RING_BUFFER_FRAMES as u64) * frame_size;
let frame_end = frame_start + frame_size;
if aligned_offset + size > frame_end {
return None;
}
let allocation = RingBufferAllocation {
buffer: self.buffer.clone(),
offset: aligned_offset,
size,
};
self.offset = aligned_offset + size;
Some(allocation)
}
pub fn next_frame(&mut self) {
self.frame += 1;
let frame_size = self.size / RING_BUFFER_FRAMES as u64;
self.offset = (self.frame % RING_BUFFER_FRAMES as u64) * frame_size;
}
pub fn reset(&mut self) {
self.frame = 0;
self.offset = 0;
}
pub fn frame(&self) -> u64 {
self.frame
}
pub fn offset(&self) -> u64 {
self.offset
}
pub fn size(&self) -> u64 {
self.size
}
pub fn remaining(&self) -> u64 {
let frame_size = self.size / RING_BUFFER_FRAMES as u64;
let frame_end = ((self.frame % RING_BUFFER_FRAMES as u64) + 1) * frame_size;
frame_end.saturating_sub(self.offset)
}
}
pub struct StagingBuffer {
buffer: wgpu::Buffer,
size: u64,
}
impl StagingBuffer {
fn new(context: &GraphicsContext, size: u64) -> Self {
let buffer = context.device().create_buffer(&wgpu::BufferDescriptor {
label: Some("Staging Buffer"),
size,
usage: wgpu::BufferUsages::MAP_WRITE | wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
Self { buffer, size }
}
pub fn buffer(&self) -> &wgpu::Buffer {
&self.buffer
}
pub fn size(&self) -> u64 {
self.size
}
pub fn write(&self, queue: &wgpu::Queue, data: &[u8]) {
assert!(
data.len() as u64 <= self.size,
"Data size {} exceeds buffer size {}",
data.len(),
self.size
);
queue.write_buffer(&self.buffer, 0, data);
}
pub fn copy_to_buffer(
&self,
encoder: &mut wgpu::CommandEncoder,
dst: &wgpu::Buffer,
dst_offset: u64,
) {
encoder.copy_buffer_to_buffer(&self.buffer, 0, dst, dst_offset, self.size);
}
pub fn copy_region_to_buffer(
&self,
encoder: &mut wgpu::CommandEncoder,
src_offset: u64,
dst: &wgpu::Buffer,
dst_offset: u64,
size: u64,
) {
encoder.copy_buffer_to_buffer(&self.buffer, src_offset, dst, dst_offset, size);
}
}
pub struct StagingBufferPool {
available: Vec<StagingBuffer>,
}
impl StagingBufferPool {
pub fn new() -> Self {
Self {
available: Vec::new(),
}
}
pub fn allocate(&mut self, context: &GraphicsContext, size: u64) -> StagingBuffer {
profile_function!();
let mut best_idx = None;
let mut best_size = u64::MAX;
for (idx, buffer) in self.available.iter().enumerate() {
if buffer.size >= size && buffer.size < best_size {
best_idx = Some(idx);
best_size = buffer.size;
}
}
if let Some(idx) = best_idx {
self.available.swap_remove(idx)
} else {
let rounded_size = size.next_power_of_two();
StagingBuffer::new(context, rounded_size)
}
}
pub fn recycle(&mut self, buffer: StagingBuffer) {
self.available.push(buffer);
}
pub fn clear(&mut self) {
self.available.clear();
}
pub fn available_count(&self) -> usize {
self.available.len()
}
pub fn total_available_size(&self) -> u64 {
self.available.iter().map(|b| b.size).sum()
}
}
impl Default for StagingBufferPool {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_ring_buffer_allocation() {
let ctx = GraphicsContext::new_owned_sync().expect("Failed to create graphics context");
let mut ring = RingBuffer::new(ctx, 1024, wgpu::BufferUsages::UNIFORM);
let alloc1 = ring.allocate(256, 256);
assert!(alloc1.is_some());
let alloc1 = alloc1.unwrap();
assert_eq!(alloc1.offset, 0);
assert_eq!(alloc1.size, 256);
let alloc2 = ring.allocate(256, 256);
assert!(alloc2.is_some());
let alloc2 = alloc2.unwrap();
assert_eq!(alloc2.offset, 256);
assert_eq!(alloc2.size, 256);
}
#[test]
fn test_ring_buffer_frame_advance() {
let ctx = GraphicsContext::new_owned_sync().expect("Failed to create graphics context");
let mut ring = RingBuffer::new(ctx, 1024, wgpu::BufferUsages::UNIFORM);
let alloc1 = ring.allocate(512, 256);
assert!(alloc1.is_some());
ring.next_frame();
assert_eq!(ring.frame(), 1);
let alloc2 = ring.allocate(512, 256);
assert!(alloc2.is_some());
let alloc2 = alloc2.unwrap();
assert_eq!(alloc2.offset, 1024); }
#[test]
fn test_staging_pool() {
let ctx = GraphicsContext::new_owned_sync().expect("Failed to create graphics context");
let mut pool = StagingBufferPool::new();
let buffer1 = pool.allocate(&ctx, 1024);
assert_eq!(buffer1.size(), 1024);
assert_eq!(pool.available_count(), 0);
pool.recycle(buffer1);
assert_eq!(pool.available_count(), 1);
let buffer2 = pool.allocate(&ctx, 1024);
assert_eq!(buffer2.size(), 1024);
assert_eq!(pool.available_count(), 0);
pool.recycle(buffer2);
}
#[test]
fn test_staging_pool_size_matching() {
let ctx = GraphicsContext::new_owned_sync().expect("Failed to create graphics context");
let mut pool = StagingBufferPool::new();
pool.recycle(StagingBuffer::new(&ctx, 512));
pool.recycle(StagingBuffer::new(&ctx, 1024));
pool.recycle(StagingBuffer::new(&ctx, 2048));
let buffer = pool.allocate(&ctx, 600);
assert_eq!(buffer.size(), 1024);
assert_eq!(pool.available_count(), 2);
}
}