use crate::{
util::align_to, Buffer, BufferAddress, BufferDescriptor, BufferSize, BufferSlice, BufferUsages,
BufferViewMut, CommandEncoder, Device, MapMode,
};
use alloc::vec::Vec;
use core::fmt;
use std::sync::mpsc;
pub struct StagingBelt {
chunk_size: BufferAddress,
active_chunks: Vec<Chunk>,
closed_chunks: Vec<Chunk>,
free_chunks: Vec<Chunk>,
sender: Exclusive<mpsc::Sender<Chunk>>,
receiver: Exclusive<mpsc::Receiver<Chunk>>,
}
impl StagingBelt {
pub fn new(chunk_size: BufferAddress) -> Self {
let (sender, receiver) = mpsc::channel();
StagingBelt {
chunk_size,
active_chunks: Vec::new(),
closed_chunks: Vec::new(),
free_chunks: Vec::new(),
sender: Exclusive::new(sender),
receiver: Exclusive::new(receiver),
}
}
pub fn write_buffer(
&mut self,
encoder: &mut CommandEncoder,
target: &Buffer,
offset: BufferAddress,
size: BufferSize,
device: &Device,
) -> BufferViewMut {
let slice_of_belt = self.allocate(
size,
const { BufferSize::new(crate::COPY_BUFFER_ALIGNMENT).unwrap() },
device,
);
encoder.copy_buffer_to_buffer(
slice_of_belt.buffer(),
slice_of_belt.offset(),
target,
offset,
size.get(),
);
slice_of_belt.get_mapped_range_mut()
}
pub fn allocate(
&mut self,
size: BufferSize,
alignment: BufferSize,
device: &Device,
) -> BufferSlice<'_> {
assert!(
alignment.get().is_power_of_two(),
"alignment must be a power of two, not {alignment}"
);
let alignment = alignment.get().max(crate::MAP_ALIGNMENT);
let mut chunk = if let Some(index) = self
.active_chunks
.iter()
.position(|chunk| chunk.can_allocate(size, alignment))
{
self.active_chunks.swap_remove(index)
} else {
self.receive_chunks();
if let Some(index) = self
.free_chunks
.iter()
.position(|chunk| chunk.can_allocate(size, alignment))
{
self.free_chunks.swap_remove(index)
} else {
Chunk {
buffer: device.create_buffer(&BufferDescriptor {
label: Some("(wgpu internal) StagingBelt staging buffer"),
size: self.chunk_size.max(size.get()),
usage: BufferUsages::MAP_WRITE | BufferUsages::COPY_SRC,
mapped_at_creation: true,
}),
offset: 0,
}
}
};
let allocation_offset = chunk.allocate(size, alignment);
self.active_chunks.push(chunk);
let chunk = self.active_chunks.last().unwrap();
chunk
.buffer
.slice(allocation_offset..allocation_offset + size.get())
}
pub fn finish(&mut self) {
for chunk in self.active_chunks.drain(..) {
chunk.buffer.unmap();
self.closed_chunks.push(chunk);
}
}
pub fn recall(&mut self) {
self.receive_chunks();
for chunk in self.closed_chunks.drain(..) {
let sender = self.sender.get_mut().clone();
chunk
.buffer
.clone()
.slice(..)
.map_async(MapMode::Write, move |_| {
let _ = sender.send(chunk);
});
}
}
fn receive_chunks(&mut self) {
while let Ok(mut chunk) = self.receiver.get_mut().try_recv() {
chunk.offset = 0;
self.free_chunks.push(chunk);
}
}
}
impl fmt::Debug for StagingBelt {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("StagingBelt")
.field("chunk_size", &self.chunk_size)
.field("active_chunks", &self.active_chunks.len())
.field("closed_chunks", &self.closed_chunks.len())
.field("free_chunks", &self.free_chunks.len())
.finish_non_exhaustive()
}
}
struct Chunk {
buffer: Buffer,
offset: BufferAddress,
}
impl Chunk {
fn can_allocate(&self, size: BufferSize, alignment: BufferAddress) -> bool {
let alloc_start = align_to(self.offset, alignment);
let alloc_end = alloc_start + size.get();
alloc_end <= self.buffer.size()
}
fn allocate(&mut self, size: BufferSize, alignment: BufferAddress) -> BufferAddress {
let alloc_start = align_to(self.offset, alignment);
let alloc_end = alloc_start + size.get();
assert!(alloc_end <= self.buffer.size());
self.offset = alloc_end;
alloc_start
}
}
use exclusive::Exclusive;
mod exclusive {
pub(super) struct Exclusive<T>(T);
unsafe impl<T> Sync for Exclusive<T> {}
impl<T> Exclusive<T> {
pub fn new(value: T) -> Self {
Self(value)
}
pub fn get_mut(&mut self) -> &mut T {
&mut self.0
}
}
}