1use astrelis_core::profiling::profile_function;
44
45use crate::GraphicsContext;
46use std::sync::Arc;
47
48const RING_BUFFER_FRAMES: usize = 3;
50
51pub struct RingBufferAllocation {
53 buffer: Arc<wgpu::Buffer>,
55 offset: u64,
57 size: u64,
59}
60
61impl RingBufferAllocation {
62 pub fn buffer(&self) -> &wgpu::Buffer {
64 &self.buffer
65 }
66
67 pub fn offset(&self) -> u64 {
69 self.offset
70 }
71
72 pub fn size(&self) -> u64 {
74 self.size
75 }
76
77 pub fn write(&self, queue: &wgpu::Queue, data: &[u8]) {
83 assert!(
84 data.len() as u64 <= self.size,
85 "Data size {} exceeds allocation size {}",
86 data.len(),
87 self.size
88 );
89 queue.write_buffer(&self.buffer, self.offset, data);
90 }
91
92 pub fn as_binding(&self) -> wgpu::BindingResource<'_> {
94 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
95 buffer: &self.buffer,
96 offset: self.offset,
97 size: Some(std::num::NonZeroU64::new(self.size).unwrap()),
98 })
99 }
100}
101
102pub struct RingBuffer {
107 buffer: Arc<wgpu::Buffer>,
109 size: u64,
111 offset: u64,
113 frame: u64,
115}
116
117impl RingBuffer {
118 pub fn new(
126 context: Arc<GraphicsContext>,
127 size: u64,
128 usage: wgpu::BufferUsages,
129 ) -> Self {
130 let total_size = size * RING_BUFFER_FRAMES as u64;
131
132 let buffer = context.device().create_buffer(&wgpu::BufferDescriptor {
133 label: Some("Ring Buffer"),
134 size: total_size,
135 usage: usage | wgpu::BufferUsages::COPY_DST,
136 mapped_at_creation: false,
137 });
138
139 Self {
140 buffer: Arc::new(buffer),
141 size: total_size,
142 offset: 0,
143 frame: 0,
144 }
145 }
146
147 pub fn allocate(&mut self, size: u64, alignment: u64) -> Option<RingBufferAllocation> {
158 profile_function!();
159 let aligned_offset = if !self.offset.is_multiple_of(alignment) {
161 self.offset + (alignment - (self.offset % alignment))
162 } else {
163 self.offset
164 };
165
166 let frame_size = self.size / RING_BUFFER_FRAMES as u64;
168 let frame_start = (self.frame % RING_BUFFER_FRAMES as u64) * frame_size;
169 let frame_end = frame_start + frame_size;
170
171 if aligned_offset + size > frame_end {
172 return None;
173 }
174
175 let allocation = RingBufferAllocation {
176 buffer: self.buffer.clone(),
177 offset: aligned_offset,
178 size,
179 };
180
181 self.offset = aligned_offset + size;
182
183 Some(allocation)
184 }
185
186 pub fn next_frame(&mut self) {
191 self.frame += 1;
192 let frame_size = self.size / RING_BUFFER_FRAMES as u64;
193 self.offset = (self.frame % RING_BUFFER_FRAMES as u64) * frame_size;
194 }
195
196 pub fn reset(&mut self) {
198 self.frame = 0;
199 self.offset = 0;
200 }
201
202 pub fn frame(&self) -> u64 {
204 self.frame
205 }
206
207 pub fn offset(&self) -> u64 {
209 self.offset
210 }
211
212 pub fn size(&self) -> u64 {
214 self.size
215 }
216
217 pub fn remaining(&self) -> u64 {
219 let frame_size = self.size / RING_BUFFER_FRAMES as u64;
220 let frame_end = ((self.frame % RING_BUFFER_FRAMES as u64) + 1) * frame_size;
221 frame_end.saturating_sub(self.offset)
222 }
223}
224
225pub struct StagingBuffer {
227 buffer: wgpu::Buffer,
229 size: u64,
231}
232
233impl StagingBuffer {
234 fn new(context: &GraphicsContext, size: u64) -> Self {
236 let buffer = context.device().create_buffer(&wgpu::BufferDescriptor {
237 label: Some("Staging Buffer"),
238 size,
239 usage: wgpu::BufferUsages::MAP_WRITE | wgpu::BufferUsages::COPY_SRC,
240 mapped_at_creation: false,
241 });
242
243 Self { buffer, size }
244 }
245
246 pub fn buffer(&self) -> &wgpu::Buffer {
248 &self.buffer
249 }
250
251 pub fn size(&self) -> u64 {
253 self.size
254 }
255
256 pub fn write(&self, queue: &wgpu::Queue, data: &[u8]) {
258 assert!(
259 data.len() as u64 <= self.size,
260 "Data size {} exceeds buffer size {}",
261 data.len(),
262 self.size
263 );
264 queue.write_buffer(&self.buffer, 0, data);
265 }
266
267 pub fn copy_to_buffer(
269 &self,
270 encoder: &mut wgpu::CommandEncoder,
271 dst: &wgpu::Buffer,
272 dst_offset: u64,
273 ) {
274 encoder.copy_buffer_to_buffer(&self.buffer, 0, dst, dst_offset, self.size);
275 }
276
277 pub fn copy_region_to_buffer(
279 &self,
280 encoder: &mut wgpu::CommandEncoder,
281 src_offset: u64,
282 dst: &wgpu::Buffer,
283 dst_offset: u64,
284 size: u64,
285 ) {
286 encoder.copy_buffer_to_buffer(&self.buffer, src_offset, dst, dst_offset, size);
287 }
288}
289
290pub struct StagingBufferPool {
292 available: Vec<StagingBuffer>,
294}
295
296impl StagingBufferPool {
297 pub fn new() -> Self {
299 Self {
300 available: Vec::new(),
301 }
302 }
303
304 pub fn allocate(&mut self, context: &GraphicsContext, size: u64) -> StagingBuffer {
309 profile_function!();
310 let mut best_idx = None;
313 let mut best_size = u64::MAX;
314
315 for (idx, buffer) in self.available.iter().enumerate() {
316 if buffer.size >= size && buffer.size < best_size {
317 best_idx = Some(idx);
318 best_size = buffer.size;
319 }
320 }
321
322 if let Some(idx) = best_idx {
323 self.available.swap_remove(idx)
324 } else {
325 let rounded_size = size.next_power_of_two();
328 StagingBuffer::new(context, rounded_size)
329 }
330 }
331
332 pub fn recycle(&mut self, buffer: StagingBuffer) {
334 self.available.push(buffer);
335 }
336
337 pub fn clear(&mut self) {
339 self.available.clear();
340 }
341
342 pub fn available_count(&self) -> usize {
344 self.available.len()
345 }
346
347 pub fn total_available_size(&self) -> u64 {
349 self.available.iter().map(|b| b.size).sum()
350 }
351}
352
353impl Default for StagingBufferPool {
354 fn default() -> Self {
355 Self::new()
356 }
357}
358
359#[cfg(test)]
360mod tests {
361 use super::*;
362
363 #[test]
364 fn test_ring_buffer_allocation() {
365 let ctx = GraphicsContext::new_owned_sync().expect("Failed to create graphics context");
366 let mut ring = RingBuffer::new(ctx, 1024, wgpu::BufferUsages::UNIFORM);
367
368 let alloc1 = ring.allocate(256, 256);
370 assert!(alloc1.is_some());
371 let alloc1 = alloc1.unwrap();
372 assert_eq!(alloc1.offset, 0);
373 assert_eq!(alloc1.size, 256);
374
375 let alloc2 = ring.allocate(256, 256);
377 assert!(alloc2.is_some());
378 let alloc2 = alloc2.unwrap();
379 assert_eq!(alloc2.offset, 256);
380 assert_eq!(alloc2.size, 256);
381 }
382
383 #[test]
384 fn test_ring_buffer_frame_advance() {
385 let ctx = GraphicsContext::new_owned_sync().expect("Failed to create graphics context");
386 let mut ring = RingBuffer::new(ctx, 1024, wgpu::BufferUsages::UNIFORM);
387
388 let alloc1 = ring.allocate(512, 256);
390 assert!(alloc1.is_some());
391
392 ring.next_frame();
394 assert_eq!(ring.frame(), 1);
395
396 let alloc2 = ring.allocate(512, 256);
398 assert!(alloc2.is_some());
399 let alloc2 = alloc2.unwrap();
400 assert_eq!(alloc2.offset, 1024); }
402
403 #[test]
404 fn test_staging_pool() {
405 let ctx = GraphicsContext::new_owned_sync().expect("Failed to create graphics context");
406 let mut pool = StagingBufferPool::new();
407
408 let buffer1 = pool.allocate(&ctx, 1024);
410 assert_eq!(buffer1.size(), 1024);
411 assert_eq!(pool.available_count(), 0);
412
413 pool.recycle(buffer1);
415 assert_eq!(pool.available_count(), 1);
416
417 let buffer2 = pool.allocate(&ctx, 1024);
419 assert_eq!(buffer2.size(), 1024);
420 assert_eq!(pool.available_count(), 0);
421
422 pool.recycle(buffer2);
423 }
424
425 #[test]
426 fn test_staging_pool_size_matching() {
427 let ctx = GraphicsContext::new_owned_sync().expect("Failed to create graphics context");
428 let mut pool = StagingBufferPool::new();
429
430 pool.recycle(StagingBuffer::new(&ctx, 512));
432 pool.recycle(StagingBuffer::new(&ctx, 1024));
433 pool.recycle(StagingBuffer::new(&ctx, 2048));
434
435 let buffer = pool.allocate(&ctx, 600);
437 assert_eq!(buffer.size(), 1024);
438 assert_eq!(pool.available_count(), 2);
439 }
440}