1use crate::GraphicsContext;
44use std::sync::Arc;
45
46const RING_BUFFER_FRAMES: usize = 3;
48
49pub struct RingBufferAllocation {
51 buffer: Arc<wgpu::Buffer>,
53 offset: u64,
55 size: u64,
57}
58
59impl RingBufferAllocation {
60 pub fn buffer(&self) -> &wgpu::Buffer {
62 &self.buffer
63 }
64
65 pub fn offset(&self) -> u64 {
67 self.offset
68 }
69
70 pub fn size(&self) -> u64 {
72 self.size
73 }
74
75 pub fn write(&self, queue: &wgpu::Queue, data: &[u8]) {
81 assert!(
82 data.len() as u64 <= self.size,
83 "Data size {} exceeds allocation size {}",
84 data.len(),
85 self.size
86 );
87 queue.write_buffer(&self.buffer, self.offset, data);
88 }
89
90 pub fn as_binding(&self) -> wgpu::BindingResource {
92 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
93 buffer: &self.buffer,
94 offset: self.offset,
95 size: Some(std::num::NonZeroU64::new(self.size).unwrap()),
96 })
97 }
98}
99
100pub struct RingBuffer {
105 buffer: Arc<wgpu::Buffer>,
107 size: u64,
109 offset: u64,
111 frame: u64,
113 context: Arc<GraphicsContext>,
115}
116
117impl RingBuffer {
118 pub fn new(
126 context: Arc<GraphicsContext>,
127 size: u64,
128 usage: wgpu::BufferUsages,
129 ) -> Self {
130 let total_size = size * RING_BUFFER_FRAMES as u64;
131
132 let buffer = context.device.create_buffer(&wgpu::BufferDescriptor {
133 label: Some("Ring Buffer"),
134 size: total_size,
135 usage: usage | wgpu::BufferUsages::COPY_DST,
136 mapped_at_creation: false,
137 });
138
139 Self {
140 buffer: Arc::new(buffer),
141 size: total_size,
142 offset: 0,
143 frame: 0,
144 context,
145 }
146 }
147
148 pub fn allocate(&mut self, size: u64, alignment: u64) -> Option<RingBufferAllocation> {
159 let aligned_offset = if self.offset % alignment != 0 {
161 self.offset + (alignment - (self.offset % alignment))
162 } else {
163 self.offset
164 };
165
166 let frame_size = self.size / RING_BUFFER_FRAMES as u64;
168 let frame_start = (self.frame % RING_BUFFER_FRAMES as u64) * frame_size;
169 let frame_end = frame_start + frame_size;
170
171 if aligned_offset + size > frame_end {
172 return None;
173 }
174
175 let allocation = RingBufferAllocation {
176 buffer: self.buffer.clone(),
177 offset: aligned_offset,
178 size,
179 };
180
181 self.offset = aligned_offset + size;
182
183 Some(allocation)
184 }
185
186 pub fn next_frame(&mut self) {
191 self.frame += 1;
192 let frame_size = self.size / RING_BUFFER_FRAMES as u64;
193 self.offset = (self.frame % RING_BUFFER_FRAMES as u64) * frame_size;
194 }
195
196 pub fn reset(&mut self) {
198 self.frame = 0;
199 self.offset = 0;
200 }
201
202 pub fn frame(&self) -> u64 {
204 self.frame
205 }
206
207 pub fn offset(&self) -> u64 {
209 self.offset
210 }
211
212 pub fn size(&self) -> u64 {
214 self.size
215 }
216
217 pub fn remaining(&self) -> u64 {
219 let frame_size = self.size / RING_BUFFER_FRAMES as u64;
220 let frame_end = ((self.frame % RING_BUFFER_FRAMES as u64) + 1) * frame_size;
221 frame_end.saturating_sub(self.offset)
222 }
223}
224
225pub struct StagingBuffer {
227 buffer: wgpu::Buffer,
229 size: u64,
231}
232
233impl StagingBuffer {
234 fn new(context: &GraphicsContext, size: u64) -> Self {
236 let buffer = context.device.create_buffer(&wgpu::BufferDescriptor {
237 label: Some("Staging Buffer"),
238 size,
239 usage: wgpu::BufferUsages::MAP_WRITE | wgpu::BufferUsages::COPY_SRC,
240 mapped_at_creation: false,
241 });
242
243 Self { buffer, size }
244 }
245
246 pub fn buffer(&self) -> &wgpu::Buffer {
248 &self.buffer
249 }
250
251 pub fn size(&self) -> u64 {
253 self.size
254 }
255
256 pub fn write(&self, queue: &wgpu::Queue, data: &[u8]) {
258 assert!(
259 data.len() as u64 <= self.size,
260 "Data size {} exceeds buffer size {}",
261 data.len(),
262 self.size
263 );
264 queue.write_buffer(&self.buffer, 0, data);
265 }
266
267 pub fn copy_to_buffer(
269 &self,
270 encoder: &mut wgpu::CommandEncoder,
271 dst: &wgpu::Buffer,
272 dst_offset: u64,
273 ) {
274 encoder.copy_buffer_to_buffer(&self.buffer, 0, dst, dst_offset, self.size);
275 }
276
277 pub fn copy_region_to_buffer(
279 &self,
280 encoder: &mut wgpu::CommandEncoder,
281 src_offset: u64,
282 dst: &wgpu::Buffer,
283 dst_offset: u64,
284 size: u64,
285 ) {
286 encoder.copy_buffer_to_buffer(&self.buffer, src_offset, dst, dst_offset, size);
287 }
288}
289
290pub struct StagingBufferPool {
292 available: Vec<StagingBuffer>,
294}
295
296impl StagingBufferPool {
297 pub fn new() -> Self {
299 Self {
300 available: Vec::new(),
301 }
302 }
303
304 pub fn allocate(&mut self, context: &GraphicsContext, size: u64) -> StagingBuffer {
309 let mut best_idx = None;
312 let mut best_size = u64::MAX;
313
314 for (idx, buffer) in self.available.iter().enumerate() {
315 if buffer.size >= size && buffer.size < best_size {
316 best_idx = Some(idx);
317 best_size = buffer.size;
318 }
319 }
320
321 if let Some(idx) = best_idx {
322 self.available.swap_remove(idx)
323 } else {
324 let rounded_size = size.next_power_of_two();
327 StagingBuffer::new(context, rounded_size)
328 }
329 }
330
331 pub fn recycle(&mut self, buffer: StagingBuffer) {
333 self.available.push(buffer);
334 }
335
336 pub fn clear(&mut self) {
338 self.available.clear();
339 }
340
341 pub fn available_count(&self) -> usize {
343 self.available.len()
344 }
345
346 pub fn total_available_size(&self) -> u64 {
348 self.available.iter().map(|b| b.size).sum()
349 }
350}
351
352impl Default for StagingBufferPool {
353 fn default() -> Self {
354 Self::new()
355 }
356}
357
358#[cfg(test)]
359mod tests {
360 use super::*;
361
362 #[test]
363 fn test_ring_buffer_allocation() {
364 let ctx = GraphicsContext::new_owned_sync_or_panic();
365 let mut ring = RingBuffer::new(ctx, 1024, wgpu::BufferUsages::UNIFORM);
366
367 let alloc1 = ring.allocate(256, 256);
369 assert!(alloc1.is_some());
370 let alloc1 = alloc1.unwrap();
371 assert_eq!(alloc1.offset, 0);
372 assert_eq!(alloc1.size, 256);
373
374 let alloc2 = ring.allocate(256, 256);
376 assert!(alloc2.is_some());
377 let alloc2 = alloc2.unwrap();
378 assert_eq!(alloc2.offset, 256);
379 assert_eq!(alloc2.size, 256);
380 }
381
382 #[test]
383 fn test_ring_buffer_frame_advance() {
384 let ctx = GraphicsContext::new_owned_sync_or_panic();
385 let mut ring = RingBuffer::new(ctx, 1024, wgpu::BufferUsages::UNIFORM);
386
387 let alloc1 = ring.allocate(512, 256);
389 assert!(alloc1.is_some());
390
391 ring.next_frame();
393 assert_eq!(ring.frame(), 1);
394
395 let alloc2 = ring.allocate(512, 256);
397 assert!(alloc2.is_some());
398 let alloc2 = alloc2.unwrap();
399 assert_eq!(alloc2.offset, 1024); }
401
402 #[test]
403 fn test_staging_pool() {
404 let ctx = GraphicsContext::new_owned_sync_or_panic();
405 let mut pool = StagingBufferPool::new();
406
407 let buffer1 = pool.allocate(&ctx, 1024);
409 assert_eq!(buffer1.size(), 1024);
410 assert_eq!(pool.available_count(), 0);
411
412 pool.recycle(buffer1);
414 assert_eq!(pool.available_count(), 1);
415
416 let buffer2 = pool.allocate(&ctx, 1024);
418 assert_eq!(buffer2.size(), 1024);
419 assert_eq!(pool.available_count(), 0);
420
421 pool.recycle(buffer2);
422 }
423
424 #[test]
425 fn test_staging_pool_size_matching() {
426 let ctx = GraphicsContext::new_owned_sync_or_panic();
427 let mut pool = StagingBufferPool::new();
428
429 pool.recycle(StagingBuffer::new(&ctx, 512));
431 pool.recycle(StagingBuffer::new(&ctx, 1024));
432 pool.recycle(StagingBuffer::new(&ctx, 2048));
433
434 let buffer = pool.allocate(&ctx, 600);
436 assert_eq!(buffer.size(), 1024);
437 assert_eq!(pool.available_count(), 2);
438 }
439}