1use astrelis_core::profiling::profile_function;
44
45use crate::GraphicsContext;
46use std::sync::Arc;
47
48const RING_BUFFER_FRAMES: usize = 3;
50
51pub struct RingBufferAllocation {
53 buffer: Arc<wgpu::Buffer>,
55 offset: u64,
57 size: u64,
59}
60
61impl RingBufferAllocation {
62 pub fn buffer(&self) -> &wgpu::Buffer {
64 &self.buffer
65 }
66
67 pub fn offset(&self) -> u64 {
69 self.offset
70 }
71
72 pub fn size(&self) -> u64 {
74 self.size
75 }
76
77 pub fn write(&self, queue: &wgpu::Queue, data: &[u8]) {
83 assert!(
84 data.len() as u64 <= self.size,
85 "Data size {} exceeds allocation size {}",
86 data.len(),
87 self.size
88 );
89 queue.write_buffer(&self.buffer, self.offset, data);
90 }
91
92 pub fn as_binding(&self) -> wgpu::BindingResource<'_> {
94 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
95 buffer: &self.buffer,
96 offset: self.offset,
97 size: Some(std::num::NonZeroU64::new(self.size).unwrap()),
98 })
99 }
100}
101
102pub struct RingBuffer {
107 buffer: Arc<wgpu::Buffer>,
109 size: u64,
111 offset: u64,
113 frame: u64,
115}
116
117impl RingBuffer {
118 pub fn new(context: Arc<GraphicsContext>, size: u64, usage: wgpu::BufferUsages) -> Self {
126 let total_size = size * RING_BUFFER_FRAMES as u64;
127
128 let buffer = context.device().create_buffer(&wgpu::BufferDescriptor {
129 label: Some("Ring Buffer"),
130 size: total_size,
131 usage: usage | wgpu::BufferUsages::COPY_DST,
132 mapped_at_creation: false,
133 });
134
135 Self {
136 buffer: Arc::new(buffer),
137 size: total_size,
138 offset: 0,
139 frame: 0,
140 }
141 }
142
143 pub fn allocate(&mut self, size: u64, alignment: u64) -> Option<RingBufferAllocation> {
154 profile_function!();
155 let aligned_offset = if !self.offset.is_multiple_of(alignment) {
157 self.offset + (alignment - (self.offset % alignment))
158 } else {
159 self.offset
160 };
161
162 let frame_size = self.size / RING_BUFFER_FRAMES as u64;
164 let frame_start = (self.frame % RING_BUFFER_FRAMES as u64) * frame_size;
165 let frame_end = frame_start + frame_size;
166
167 if aligned_offset + size > frame_end {
168 return None;
169 }
170
171 let allocation = RingBufferAllocation {
172 buffer: self.buffer.clone(),
173 offset: aligned_offset,
174 size,
175 };
176
177 self.offset = aligned_offset + size;
178
179 Some(allocation)
180 }
181
182 pub fn next_frame(&mut self) {
187 self.frame += 1;
188 let frame_size = self.size / RING_BUFFER_FRAMES as u64;
189 self.offset = (self.frame % RING_BUFFER_FRAMES as u64) * frame_size;
190 }
191
192 pub fn reset(&mut self) {
194 self.frame = 0;
195 self.offset = 0;
196 }
197
198 pub fn frame(&self) -> u64 {
200 self.frame
201 }
202
203 pub fn offset(&self) -> u64 {
205 self.offset
206 }
207
208 pub fn size(&self) -> u64 {
210 self.size
211 }
212
213 pub fn remaining(&self) -> u64 {
215 let frame_size = self.size / RING_BUFFER_FRAMES as u64;
216 let frame_end = ((self.frame % RING_BUFFER_FRAMES as u64) + 1) * frame_size;
217 frame_end.saturating_sub(self.offset)
218 }
219}
220
221pub struct StagingBuffer {
223 buffer: wgpu::Buffer,
225 size: u64,
227}
228
229impl StagingBuffer {
230 fn new(context: &GraphicsContext, size: u64) -> Self {
232 let buffer = context.device().create_buffer(&wgpu::BufferDescriptor {
233 label: Some("Staging Buffer"),
234 size,
235 usage: wgpu::BufferUsages::MAP_WRITE | wgpu::BufferUsages::COPY_SRC,
236 mapped_at_creation: false,
237 });
238
239 Self { buffer, size }
240 }
241
242 pub fn buffer(&self) -> &wgpu::Buffer {
244 &self.buffer
245 }
246
247 pub fn size(&self) -> u64 {
249 self.size
250 }
251
252 pub fn write(&self, queue: &wgpu::Queue, data: &[u8]) {
254 assert!(
255 data.len() as u64 <= self.size,
256 "Data size {} exceeds buffer size {}",
257 data.len(),
258 self.size
259 );
260 queue.write_buffer(&self.buffer, 0, data);
261 }
262
263 pub fn copy_to_buffer(
265 &self,
266 encoder: &mut wgpu::CommandEncoder,
267 dst: &wgpu::Buffer,
268 dst_offset: u64,
269 ) {
270 encoder.copy_buffer_to_buffer(&self.buffer, 0, dst, dst_offset, self.size);
271 }
272
273 pub fn copy_region_to_buffer(
275 &self,
276 encoder: &mut wgpu::CommandEncoder,
277 src_offset: u64,
278 dst: &wgpu::Buffer,
279 dst_offset: u64,
280 size: u64,
281 ) {
282 encoder.copy_buffer_to_buffer(&self.buffer, src_offset, dst, dst_offset, size);
283 }
284}
285
286pub struct StagingBufferPool {
288 available: Vec<StagingBuffer>,
290}
291
292impl StagingBufferPool {
293 pub fn new() -> Self {
295 Self {
296 available: Vec::new(),
297 }
298 }
299
300 pub fn allocate(&mut self, context: &GraphicsContext, size: u64) -> StagingBuffer {
305 profile_function!();
306 let mut best_idx = None;
309 let mut best_size = u64::MAX;
310
311 for (idx, buffer) in self.available.iter().enumerate() {
312 if buffer.size >= size && buffer.size < best_size {
313 best_idx = Some(idx);
314 best_size = buffer.size;
315 }
316 }
317
318 if let Some(idx) = best_idx {
319 self.available.swap_remove(idx)
320 } else {
321 let rounded_size = size.next_power_of_two();
324 StagingBuffer::new(context, rounded_size)
325 }
326 }
327
328 pub fn recycle(&mut self, buffer: StagingBuffer) {
330 self.available.push(buffer);
331 }
332
333 pub fn clear(&mut self) {
335 self.available.clear();
336 }
337
338 pub fn available_count(&self) -> usize {
340 self.available.len()
341 }
342
343 pub fn total_available_size(&self) -> u64 {
345 self.available.iter().map(|b| b.size).sum()
346 }
347}
348
349impl Default for StagingBufferPool {
350 fn default() -> Self {
351 Self::new()
352 }
353}
354
355#[cfg(test)]
356mod tests {
357 use super::*;
358
359 #[test]
360 fn test_ring_buffer_allocation() {
361 let ctx = GraphicsContext::new_owned_sync().expect("Failed to create graphics context");
362 let mut ring = RingBuffer::new(ctx, 1024, wgpu::BufferUsages::UNIFORM);
363
364 let alloc1 = ring.allocate(256, 256);
366 assert!(alloc1.is_some());
367 let alloc1 = alloc1.unwrap();
368 assert_eq!(alloc1.offset, 0);
369 assert_eq!(alloc1.size, 256);
370
371 let alloc2 = ring.allocate(256, 256);
373 assert!(alloc2.is_some());
374 let alloc2 = alloc2.unwrap();
375 assert_eq!(alloc2.offset, 256);
376 assert_eq!(alloc2.size, 256);
377 }
378
379 #[test]
380 fn test_ring_buffer_frame_advance() {
381 let ctx = GraphicsContext::new_owned_sync().expect("Failed to create graphics context");
382 let mut ring = RingBuffer::new(ctx, 1024, wgpu::BufferUsages::UNIFORM);
383
384 let alloc1 = ring.allocate(512, 256);
386 assert!(alloc1.is_some());
387
388 ring.next_frame();
390 assert_eq!(ring.frame(), 1);
391
392 let alloc2 = ring.allocate(512, 256);
394 assert!(alloc2.is_some());
395 let alloc2 = alloc2.unwrap();
396 assert_eq!(alloc2.offset, 1024); }
398
399 #[test]
400 fn test_staging_pool() {
401 let ctx = GraphicsContext::new_owned_sync().expect("Failed to create graphics context");
402 let mut pool = StagingBufferPool::new();
403
404 let buffer1 = pool.allocate(&ctx, 1024);
406 assert_eq!(buffer1.size(), 1024);
407 assert_eq!(pool.available_count(), 0);
408
409 pool.recycle(buffer1);
411 assert_eq!(pool.available_count(), 1);
412
413 let buffer2 = pool.allocate(&ctx, 1024);
415 assert_eq!(buffer2.size(), 1024);
416 assert_eq!(pool.available_count(), 0);
417
418 pool.recycle(buffer2);
419 }
420
421 #[test]
422 fn test_staging_pool_size_matching() {
423 let ctx = GraphicsContext::new_owned_sync().expect("Failed to create graphics context");
424 let mut pool = StagingBufferPool::new();
425
426 pool.recycle(StagingBuffer::new(&ctx, 512));
428 pool.recycle(StagingBuffer::new(&ctx, 1024));
429 pool.recycle(StagingBuffer::new(&ctx, 2048));
430
431 let buffer = pool.allocate(&ctx, 600);
433 assert_eq!(buffer.size(), 1024);
434 assert_eq!(pool.available_count(), 2);
435 }
436}