1use std::collections::HashMap;
5use std::marker::PhantomData;
6use std::sync::atomic::{AtomicU64, Ordering};
7use std::sync::{Arc, Mutex};
8
9#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
15pub struct BufferHandle {
16 pub raw: u32,
18 pub size_bytes: usize,
20}
21
22impl BufferHandle {
23 pub fn new(raw: u32, size_bytes: usize) -> Self {
25 Self { raw, size_bytes }
26 }
27
28 pub fn null() -> Self {
30 Self {
31 raw: 0,
32 size_bytes: 0,
33 }
34 }
35
36 pub fn is_null(&self) -> bool {
38 self.raw == 0
39 }
40}
41
42#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
44pub enum BufferUsage {
45 StaticDraw,
47 DynamicDraw,
49 StreamDraw,
51 StreamRead,
53 DynamicCopy,
55}
56
57impl BufferUsage {
58 pub fn to_gl(self) -> u32 {
60 match self {
61 BufferUsage::StaticDraw => 0x88E4, BufferUsage::DynamicDraw => 0x88E8, BufferUsage::StreamDraw => 0x88E0, BufferUsage::StreamRead => 0x88E1, BufferUsage::DynamicCopy => 0x88EA, }
67 }
68}
69
70const GL_SHADER_STORAGE_BUFFER: u32 = 0x90D2;
76const GL_ATOMIC_COUNTER_BUFFER: u32 = 0x92C0;
78const GL_COPY_READ_BUFFER: u32 = 0x8F36;
80const GL_COPY_WRITE_BUFFER: u32 = 0x8F37;
82const GL_MAP_READ_BIT: u32 = 0x0001;
84const GL_MAP_WRITE_BIT: u32 = 0x0002;
86const _GL_BUFFER_UPDATE_BARRIER_BIT: u32 = 0x00000200;
88const GL_SHADER_STORAGE_BARRIER_BIT: u32 = 0x00002000;
90const GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT: u32 = 0x00000001;
92const GL_COMMAND_BARRIER_BIT: u32 = 0x00000040;
94
95#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
101pub enum BufferBarrierType {
102 ShaderStorage,
104 VertexAttrib,
106 IndirectDraw,
108 ShaderStorageAndVertex,
110 All,
112}
113
114impl BufferBarrierType {
115 pub fn to_gl_bits(self) -> u32 {
117 match self {
118 BufferBarrierType::ShaderStorage => GL_SHADER_STORAGE_BARRIER_BIT,
119 BufferBarrierType::VertexAttrib => GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT,
120 BufferBarrierType::IndirectDraw => GL_COMMAND_BARRIER_BIT,
121 BufferBarrierType::ShaderStorageAndVertex => {
122 GL_SHADER_STORAGE_BARRIER_BIT | GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT
123 }
124 BufferBarrierType::All => {
125 GL_SHADER_STORAGE_BARRIER_BIT
126 | GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT
127 | GL_COMMAND_BARRIER_BIT
128 }
129 }
130 }
131}
132
133#[derive(Debug)]
139pub struct MemoryTracker {
140 allocated_bytes: AtomicU64,
142 peak_bytes: AtomicU64,
144 allocation_count: AtomicU64,
146 free_count: AtomicU64,
148}
149
150impl MemoryTracker {
151 pub fn new() -> Self {
153 Self {
154 allocated_bytes: AtomicU64::new(0),
155 peak_bytes: AtomicU64::new(0),
156 allocation_count: AtomicU64::new(0),
157 free_count: AtomicU64::new(0),
158 }
159 }
160
161 pub fn record_alloc(&self, bytes: usize) {
163 let prev = self.allocated_bytes.fetch_add(bytes as u64, Ordering::Relaxed);
164 let new_total = prev + bytes as u64;
165 let mut current_peak = self.peak_bytes.load(Ordering::Relaxed);
167 while new_total > current_peak {
168 match self.peak_bytes.compare_exchange_weak(
169 current_peak,
170 new_total,
171 Ordering::Relaxed,
172 Ordering::Relaxed,
173 ) {
174 Ok(_) => break,
175 Err(actual) => current_peak = actual,
176 }
177 }
178 self.allocation_count.fetch_add(1, Ordering::Relaxed);
179 }
180
181 pub fn record_free(&self, bytes: usize) {
183 self.allocated_bytes.fetch_sub(bytes as u64, Ordering::Relaxed);
184 self.free_count.fetch_add(1, Ordering::Relaxed);
185 }
186
187 pub fn current_bytes(&self) -> u64 {
189 self.allocated_bytes.load(Ordering::Relaxed)
190 }
191
192 pub fn peak_bytes(&self) -> u64 {
194 self.peak_bytes.load(Ordering::Relaxed)
195 }
196
197 pub fn allocation_count(&self) -> u64 {
199 self.allocation_count.load(Ordering::Relaxed)
200 }
201
202 pub fn free_count(&self) -> u64 {
204 self.free_count.load(Ordering::Relaxed)
205 }
206
207 pub fn summary(&self) -> String {
209 let curr = self.current_bytes();
210 let peak = self.peak_bytes();
211 let allocs = self.allocation_count();
212 let frees = self.free_count();
213 format!(
214 "GPU Memory: {:.2} MB current, {:.2} MB peak, {} allocs, {} frees",
215 curr as f64 / (1024.0 * 1024.0),
216 peak as f64 / (1024.0 * 1024.0),
217 allocs,
218 frees,
219 )
220 }
221}
222
223impl Default for MemoryTracker {
224 fn default() -> Self {
225 Self::new()
226 }
227}
228
229pub struct TypedBuffer<T: Copy> {
241 handle: BufferHandle,
242 len: usize,
243 capacity: usize,
244 usage: BufferUsage,
245 binding_index: u32,
246 tracker: Arc<MemoryTracker>,
247 _marker: PhantomData<T>,
248}
249
250impl<T: Copy> TypedBuffer<T> {
251 const ELEM_SIZE: usize = std::mem::size_of::<T>();
253
254 pub fn create(
259 gl: &glow::Context,
260 capacity: usize,
261 usage: BufferUsage,
262 binding_index: u32,
263 tracker: Arc<MemoryTracker>,
264 ) -> Self {
265 use glow::HasContext;
266 let byte_size = capacity * Self::ELEM_SIZE;
267 let raw = unsafe {
268 let buf = gl.create_buffer().expect("Failed to create GL buffer");
269 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
270 gl.buffer_data_size(GL_SHADER_STORAGE_BUFFER, byte_size as i32, usage.to_gl());
271 gl.bind_buffer_base(GL_SHADER_STORAGE_BUFFER, binding_index, Some(buf));
272 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
273 buf
274 };
275 tracker.record_alloc(byte_size);
276 Self {
277 handle: BufferHandle::new(raw.0.get(), byte_size),
278 len: 0,
279 capacity,
280 usage,
281 binding_index,
282 tracker,
283 _marker: PhantomData,
284 }
285 }
286
287 pub fn upload(&mut self, gl: &glow::Context, data: &[T]) {
290 if data.len() > self.capacity {
291 self.resize(gl, data.len());
292 }
293 self.len = data.len();
294 let byte_slice = unsafe {
295 std::slice::from_raw_parts(data.as_ptr() as *const u8, data.len() * Self::ELEM_SIZE)
296 };
297 use glow::HasContext;
298 unsafe {
299 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(self.gl_buffer()));
300 gl.buffer_sub_data_u8_slice(GL_SHADER_STORAGE_BUFFER, 0, byte_slice);
301 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
302 }
303 }
304
305 pub fn upload_range(&mut self, gl: &glow::Context, offset: usize, data: &[T]) {
307 assert!(
308 offset + data.len() <= self.capacity,
309 "upload_range out of bounds"
310 );
311 let byte_offset = (offset * Self::ELEM_SIZE) as i32;
312 let byte_slice = unsafe {
313 std::slice::from_raw_parts(data.as_ptr() as *const u8, data.len() * Self::ELEM_SIZE)
314 };
315 use glow::HasContext;
316 unsafe {
317 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(self.gl_buffer()));
318 gl.buffer_sub_data_u8_slice(GL_SHADER_STORAGE_BUFFER, byte_offset, byte_slice);
319 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
320 }
321 let new_end = offset + data.len();
323 if new_end > self.len {
324 self.len = new_end;
325 }
326 }
327
328 pub fn download(&self, gl: &glow::Context) -> Vec<T> {
330 if self.len == 0 {
331 return Vec::new();
332 }
333 let byte_count = self.len * Self::ELEM_SIZE;
334 let mut bytes = vec![0u8; byte_count];
335 use glow::HasContext;
336 unsafe {
337 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(self.gl_buffer()));
338 gl.get_buffer_sub_data(GL_SHADER_STORAGE_BUFFER, 0, &mut bytes);
339 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
340 }
341 let mut result = Vec::with_capacity(self.len);
343 let src_ptr = bytes.as_ptr() as *const T;
344 for i in 0..self.len {
345 result.push(unsafe { std::ptr::read(src_ptr.add(i)) });
346 }
347 result
348 }
349
350 pub fn download_range(&self, gl: &glow::Context, offset: usize, count: usize) -> Vec<T> {
352 assert!(
353 offset + count <= self.len,
354 "download_range out of bounds"
355 );
356 let byte_offset = (offset * Self::ELEM_SIZE) as i32;
357 let byte_count = count * Self::ELEM_SIZE;
358 let mut bytes = vec![0u8; byte_count];
359 use glow::HasContext;
360 unsafe {
361 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(self.gl_buffer()));
362 gl.get_buffer_sub_data(GL_SHADER_STORAGE_BUFFER, byte_offset, &mut bytes);
363 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
364 }
365 let mut result = Vec::with_capacity(count);
366 let src_ptr = bytes.as_ptr() as *const T;
367 for i in 0..count {
368 result.push(unsafe { std::ptr::read(src_ptr.add(i)) });
369 }
370 result
371 }
372
373 pub fn resize(&mut self, gl: &glow::Context, new_capacity: usize) {
375 let old_byte_size = self.capacity * Self::ELEM_SIZE;
376 let new_byte_size = new_capacity * Self::ELEM_SIZE;
377 self.tracker.record_free(old_byte_size);
378 use glow::HasContext;
379 unsafe {
380 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(self.gl_buffer()));
381 gl.buffer_data_size(
382 GL_SHADER_STORAGE_BUFFER,
383 new_byte_size as i32,
384 self.usage.to_gl(),
385 );
386 gl.bind_buffer_base(
387 GL_SHADER_STORAGE_BUFFER,
388 self.binding_index,
389 Some(self.gl_buffer()),
390 );
391 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
392 }
393 self.tracker.record_alloc(new_byte_size);
394 self.handle.size_bytes = new_byte_size;
395 self.capacity = new_capacity;
396 self.len = 0; }
398
399 pub fn resize_preserve(&mut self, gl: &glow::Context, new_capacity: usize) {
401 if new_capacity == self.capacity {
402 return;
403 }
404 let old_byte_size = self.capacity * Self::ELEM_SIZE;
405 let new_byte_size = new_capacity * Self::ELEM_SIZE;
406 let copy_bytes = std::cmp::min(self.len * Self::ELEM_SIZE, new_byte_size);
407
408 use glow::HasContext;
409 unsafe {
410 let tmp = gl.create_buffer().expect("Failed to create temp buffer");
412 gl.bind_buffer(GL_COPY_READ_BUFFER, Some(self.gl_buffer()));
413 gl.bind_buffer(GL_COPY_WRITE_BUFFER, Some(tmp));
414 gl.buffer_data_size(GL_COPY_WRITE_BUFFER, copy_bytes as i32, BufferUsage::StreamDraw.to_gl());
415 gl.copy_buffer_sub_data(
416 GL_COPY_READ_BUFFER,
417 GL_COPY_WRITE_BUFFER,
418 0,
419 0,
420 copy_bytes as i32,
421 );
422 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(self.gl_buffer()));
424 gl.buffer_data_size(
425 GL_SHADER_STORAGE_BUFFER,
426 new_byte_size as i32,
427 self.usage.to_gl(),
428 );
429 gl.bind_buffer(GL_COPY_READ_BUFFER, Some(tmp));
431 gl.bind_buffer(GL_COPY_WRITE_BUFFER, Some(self.gl_buffer()));
432 gl.copy_buffer_sub_data(
433 GL_COPY_READ_BUFFER,
434 GL_COPY_WRITE_BUFFER,
435 0,
436 0,
437 copy_bytes as i32,
438 );
439 gl.delete_buffer(tmp);
440 gl.bind_buffer_base(
441 GL_SHADER_STORAGE_BUFFER,
442 self.binding_index,
443 Some(self.gl_buffer()),
444 );
445 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
446 }
447 self.tracker.record_free(old_byte_size);
448 self.tracker.record_alloc(new_byte_size);
449 self.handle.size_bytes = new_byte_size;
450 self.capacity = new_capacity;
451 self.len = std::cmp::min(self.len, new_capacity);
452 }
453
454 pub fn bind(&self, gl: &glow::Context) {
456 use glow::HasContext;
457 unsafe {
458 gl.bind_buffer_base(
459 GL_SHADER_STORAGE_BUFFER,
460 self.binding_index,
461 Some(self.gl_buffer()),
462 );
463 }
464 }
465
466 pub fn unbind(&self, gl: &glow::Context) {
468 use glow::HasContext;
469 unsafe {
470 gl.bind_buffer_base(GL_SHADER_STORAGE_BUFFER, self.binding_index, None);
471 }
472 }
473
474 pub fn barrier(&self, gl: &glow::Context, barrier_type: BufferBarrierType) {
476 use glow::HasContext;
477 unsafe {
478 gl.memory_barrier(barrier_type.to_gl_bits());
479 }
480 }
481
482 pub fn clear(&mut self, gl: &glow::Context) {
484 let byte_size = self.capacity * Self::ELEM_SIZE;
485 let zeros = vec![0u8; byte_size];
486 use glow::HasContext;
487 unsafe {
488 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(self.gl_buffer()));
489 gl.buffer_sub_data_u8_slice(GL_SHADER_STORAGE_BUFFER, 0, &zeros);
490 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
491 }
492 self.len = 0;
493 }
494
495 pub fn destroy(self, gl: &glow::Context) {
497 let byte_size = self.capacity * Self::ELEM_SIZE;
498 self.tracker.record_free(byte_size);
499 use glow::HasContext;
500 unsafe {
501 gl.delete_buffer(self.gl_buffer());
502 }
503 }
504
505 pub fn len(&self) -> usize {
507 self.len
508 }
509
510 pub fn is_empty(&self) -> bool {
512 self.len == 0
513 }
514
515 pub fn capacity(&self) -> usize {
517 self.capacity
518 }
519
520 pub fn byte_size(&self) -> usize {
522 self.handle.size_bytes
523 }
524
525 pub fn handle(&self) -> BufferHandle {
527 self.handle
528 }
529
530 pub fn binding(&self) -> u32 {
532 self.binding_index
533 }
534
535 fn gl_buffer(&self) -> glow::NativeBuffer {
537 glow::NativeBuffer(std::num::NonZeroU32::new(self.handle.raw).unwrap())
539 }
540}
541
542#[derive(Debug, Clone, Copy)]
548#[repr(C)]
549pub struct ParticleGpuData {
550 pub position: [f32; 4], pub velocity: [f32; 4], }
553
554impl Default for ParticleGpuData {
555 fn default() -> Self {
556 Self {
557 position: [0.0; 4],
558 velocity: [0.0; 4],
559 }
560 }
561}
562
563pub struct ParticleBuffer {
568 buffers: [BufferHandle; 2],
569 capacity: usize,
570 active_count: usize,
571 read_index: usize,
572 binding_read: u32,
573 binding_write: u32,
574 tracker: Arc<MemoryTracker>,
575}
576
577impl ParticleBuffer {
578 const PARTICLE_SIZE: usize = std::mem::size_of::<ParticleGpuData>();
580
581 pub fn create(
583 gl: &glow::Context,
584 capacity: usize,
585 binding_read: u32,
586 binding_write: u32,
587 tracker: Arc<MemoryTracker>,
588 ) -> Self {
589 let byte_size = capacity * Self::PARTICLE_SIZE;
590 use glow::HasContext;
591 let mut handles = [BufferHandle::null(); 2];
592 for handle in handles.iter_mut() {
593 let raw = unsafe {
594 let buf = gl.create_buffer().expect("Failed to create particle buffer");
595 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
596 gl.buffer_data_size(
597 GL_SHADER_STORAGE_BUFFER,
598 byte_size as i32,
599 BufferUsage::DynamicCopy.to_gl(),
600 );
601 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
602 buf
603 };
604 *handle = BufferHandle::new(raw.0.get(), byte_size);
605 tracker.record_alloc(byte_size);
606 }
607 Self {
608 buffers: handles,
609 capacity,
610 active_count: 0,
611 read_index: 0,
612 binding_read,
613 binding_write,
614 tracker,
615 }
616 }
617
618 pub fn swap(&mut self) {
620 self.read_index = 1 - self.read_index;
621 }
622
623 pub fn bind(&self, gl: &glow::Context) {
625 use glow::HasContext;
626 let read_buf = self.gl_buffer(self.read_index);
627 let write_buf = self.gl_buffer(1 - self.read_index);
628 unsafe {
629 gl.bind_buffer_base(GL_SHADER_STORAGE_BUFFER, self.binding_read, Some(read_buf));
630 gl.bind_buffer_base(GL_SHADER_STORAGE_BUFFER, self.binding_write, Some(write_buf));
631 }
632 }
633
634 pub fn upload_initial(&mut self, gl: &glow::Context, data: &[ParticleGpuData]) {
636 assert!(data.len() <= self.capacity);
637 self.active_count = data.len();
638 let byte_slice = unsafe {
639 std::slice::from_raw_parts(
640 data.as_ptr() as *const u8,
641 data.len() * Self::PARTICLE_SIZE,
642 )
643 };
644 use glow::HasContext;
645 let buf = self.gl_buffer(self.read_index);
646 unsafe {
647 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
648 gl.buffer_sub_data_u8_slice(GL_SHADER_STORAGE_BUFFER, 0, byte_slice);
649 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
650 }
651 }
652
653 pub fn download(&self, gl: &glow::Context) -> Vec<ParticleGpuData> {
655 if self.active_count == 0 {
656 return Vec::new();
657 }
658 let byte_count = self.active_count * Self::PARTICLE_SIZE;
659 let mut bytes = vec![0u8; byte_count];
660 use glow::HasContext;
661 let buf = self.gl_buffer(self.read_index);
662 unsafe {
663 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
664 gl.get_buffer_sub_data(GL_SHADER_STORAGE_BUFFER, 0, &mut bytes);
665 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
666 }
667 let mut result = Vec::with_capacity(self.active_count);
668 let src_ptr = bytes.as_ptr() as *const ParticleGpuData;
669 for i in 0..self.active_count {
670 result.push(unsafe { std::ptr::read(src_ptr.add(i)) });
671 }
672 result
673 }
674
675 pub fn set_active_count(&mut self, count: usize) {
677 self.active_count = count.min(self.capacity);
678 }
679
680 pub fn active_count(&self) -> usize {
682 self.active_count
683 }
684
685 pub fn capacity(&self) -> usize {
687 self.capacity
688 }
689
690 pub fn resize(&mut self, gl: &glow::Context, new_capacity: usize) {
692 let old_byte = self.capacity * Self::PARTICLE_SIZE;
693 let new_byte = new_capacity * Self::PARTICLE_SIZE;
694 use glow::HasContext;
695 for i in 0..2 {
696 let buf = self.gl_buffer(i);
697 unsafe {
698 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
699 gl.buffer_data_size(
700 GL_SHADER_STORAGE_BUFFER,
701 new_byte as i32,
702 BufferUsage::DynamicCopy.to_gl(),
703 );
704 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
705 }
706 self.tracker.record_free(old_byte);
707 self.tracker.record_alloc(new_byte);
708 self.buffers[i].size_bytes = new_byte;
709 }
710 self.capacity = new_capacity;
711 self.active_count = 0;
712 }
713
714 pub fn read_handle(&self) -> BufferHandle {
716 self.buffers[self.read_index]
717 }
718
719 pub fn write_handle(&self) -> BufferHandle {
721 self.buffers[1 - self.read_index]
722 }
723
724 pub fn destroy(self, gl: &glow::Context) {
726 use glow::HasContext;
727 for i in 0..2 {
728 let byte_size = self.buffers[i].size_bytes;
729 self.tracker.record_free(byte_size);
730 let buf = self.gl_buffer(i);
731 unsafe {
732 gl.delete_buffer(buf);
733 }
734 }
735 }
736
737 fn gl_buffer(&self, idx: usize) -> glow::NativeBuffer {
739 glow::NativeBuffer(std::num::NonZeroU32::new(self.buffers[idx].raw).unwrap())
740 }
741}
742
743pub struct AtomicCounter {
750 handle: BufferHandle,
751 binding_index: u32,
752 tracker: Arc<MemoryTracker>,
753}
754
755impl AtomicCounter {
756 const COUNTER_SIZE: usize = std::mem::size_of::<u32>();
758
759 pub fn create(
761 gl: &glow::Context,
762 binding_index: u32,
763 tracker: Arc<MemoryTracker>,
764 ) -> Self {
765 use glow::HasContext;
766 let raw = unsafe {
767 let buf = gl
768 .create_buffer()
769 .expect("Failed to create atomic counter buffer");
770 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, Some(buf));
771 let zero = 0u32.to_le_bytes();
772 gl.buffer_data_u8_slice(
773 GL_ATOMIC_COUNTER_BUFFER,
774 &zero,
775 BufferUsage::DynamicDraw.to_gl(),
776 );
777 gl.bind_buffer_base(GL_ATOMIC_COUNTER_BUFFER, binding_index, Some(buf));
778 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, None);
779 buf
780 };
781 tracker.record_alloc(Self::COUNTER_SIZE);
782 Self {
783 handle: BufferHandle::new(raw.0.get(), Self::COUNTER_SIZE),
784 binding_index,
785 tracker,
786 }
787 }
788
789 pub fn reset(&self, gl: &glow::Context) {
791 use glow::HasContext;
792 let zero = 0u32.to_le_bytes();
793 unsafe {
794 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, Some(self.gl_buffer()));
795 gl.buffer_sub_data_u8_slice(GL_ATOMIC_COUNTER_BUFFER, 0, &zero);
796 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, None);
797 }
798 }
799
800 pub fn set(&self, gl: &glow::Context, value: u32) {
802 use glow::HasContext;
803 let bytes = value.to_le_bytes();
804 unsafe {
805 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, Some(self.gl_buffer()));
806 gl.buffer_sub_data_u8_slice(GL_ATOMIC_COUNTER_BUFFER, 0, &bytes);
807 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, None);
808 }
809 }
810
811 pub fn read(&self, gl: &glow::Context) -> u32 {
813 use glow::HasContext;
814 let mut bytes = [0u8; 4];
815 unsafe {
816 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, Some(self.gl_buffer()));
817 gl.get_buffer_sub_data(GL_ATOMIC_COUNTER_BUFFER, 0, &mut bytes);
818 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, None);
819 }
820 u32::from_le_bytes(bytes)
821 }
822
823 pub fn bind(&self, gl: &glow::Context) {
825 use glow::HasContext;
826 unsafe {
827 gl.bind_buffer_base(
828 GL_ATOMIC_COUNTER_BUFFER,
829 self.binding_index,
830 Some(self.gl_buffer()),
831 );
832 }
833 }
834
835 pub fn handle(&self) -> BufferHandle {
837 self.handle
838 }
839
840 pub fn destroy(self, gl: &glow::Context) {
842 self.tracker.record_free(Self::COUNTER_SIZE);
843 use glow::HasContext;
844 unsafe {
845 gl.delete_buffer(self.gl_buffer());
846 }
847 }
848
849 fn gl_buffer(&self) -> glow::NativeBuffer {
850 glow::NativeBuffer(std::num::NonZeroU32::new(self.handle.raw).unwrap())
851 }
852}
853
854#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
860struct PoolKey {
861 size_bytes: usize,
862 usage: BufferUsage,
863}
864
865pub struct BufferPool {
871 available: HashMap<PoolKey, Vec<BufferHandle>>,
872 in_use: HashMap<u32, PoolKey>,
873 tracker: Arc<MemoryTracker>,
874 max_pool_size: usize,
875}
876
877impl BufferPool {
878 pub fn new(tracker: Arc<MemoryTracker>, max_pool_size: usize) -> Self {
880 Self {
881 available: HashMap::new(),
882 in_use: HashMap::new(),
883 tracker,
884 max_pool_size,
885 }
886 }
887
888 pub fn acquire(
891 &mut self,
892 gl: &glow::Context,
893 size_bytes: usize,
894 usage: BufferUsage,
895 ) -> BufferHandle {
896 let key = PoolKey { size_bytes, usage };
897 if let Some(list) = self.available.get_mut(&key) {
899 if let Some(handle) = list.pop() {
900 self.in_use.insert(handle.raw, key);
901 return handle;
902 }
903 }
904 use glow::HasContext;
906 let raw = unsafe {
907 let buf = gl.create_buffer().expect("Failed to create pooled buffer");
908 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
909 gl.buffer_data_size(GL_SHADER_STORAGE_BUFFER, size_bytes as i32, usage.to_gl());
910 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
911 buf
912 };
913 self.tracker.record_alloc(size_bytes);
914 let handle = BufferHandle::new(raw.0.get(), size_bytes);
915 self.in_use.insert(handle.raw, key);
916 handle
917 }
918
919 pub fn release(&mut self, gl: &glow::Context, handle: BufferHandle) {
921 if let Some(key) = self.in_use.remove(&handle.raw) {
922 let list = self.available.entry(key).or_insert_with(Vec::new);
923 if list.len() < self.max_pool_size {
924 list.push(handle);
925 } else {
926 self.tracker.record_free(handle.size_bytes);
928 use glow::HasContext;
929 unsafe {
930 let buf = glow::NativeBuffer(
931 std::num::NonZeroU32::new(handle.raw).unwrap(),
932 );
933 gl.delete_buffer(buf);
934 }
935 }
936 }
937 }
938
939 pub fn drain(&mut self, gl: &glow::Context) {
941 use glow::HasContext;
942 for (_key, list) in self.available.drain() {
943 for handle in list {
944 self.tracker.record_free(handle.size_bytes);
945 unsafe {
946 let buf = glow::NativeBuffer(
947 std::num::NonZeroU32::new(handle.raw).unwrap(),
948 );
949 gl.delete_buffer(buf);
950 }
951 }
952 }
953 }
954
955 pub fn destroy(mut self, gl: &glow::Context) {
958 self.drain(gl);
959 use glow::HasContext;
960 for (raw, key) in self.in_use.drain() {
961 self.tracker.record_free(key.size_bytes);
962 unsafe {
963 let buf = glow::NativeBuffer(std::num::NonZeroU32::new(raw).unwrap());
964 gl.delete_buffer(buf);
965 }
966 }
967 }
968
969 pub fn available_count(&self) -> usize {
971 self.available.values().map(|v| v.len()).sum()
972 }
973
974 pub fn in_use_count(&self) -> usize {
976 self.in_use.len()
977 }
978
979 pub fn pooled_bytes(&self) -> usize {
981 self.available
982 .values()
983 .flat_map(|v| v.iter())
984 .map(|h| h.size_bytes)
985 .sum()
986 }
987}
988
989pub struct MappedRange {
996 pub buffer: BufferHandle,
998 pub offset: usize,
1000 pub length: usize,
1002 flushed: bool,
1004}
1005
1006impl MappedRange {
1007 pub fn map_write(
1009 gl: &glow::Context,
1010 buffer: BufferHandle,
1011 offset: usize,
1012 length: usize,
1013 ) -> Self {
1014 use glow::HasContext;
1015 let buf = glow::NativeBuffer(std::num::NonZeroU32::new(buffer.raw).unwrap());
1016 unsafe {
1017 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
1018 let _ptr = gl.map_buffer_range(
1020 GL_SHADER_STORAGE_BUFFER,
1021 offset as i32,
1022 length as i32,
1023 GL_MAP_WRITE_BIT,
1024 );
1025 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
1026 }
1027 Self {
1028 buffer,
1029 offset,
1030 length,
1031 flushed: false,
1032 }
1033 }
1034
1035 pub fn map_read(
1037 gl: &glow::Context,
1038 buffer: BufferHandle,
1039 offset: usize,
1040 length: usize,
1041 ) -> Self {
1042 use glow::HasContext;
1043 let buf = glow::NativeBuffer(std::num::NonZeroU32::new(buffer.raw).unwrap());
1044 unsafe {
1045 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
1046 let _ptr = gl.map_buffer_range(
1047 GL_SHADER_STORAGE_BUFFER,
1048 offset as i32,
1049 length as i32,
1050 GL_MAP_READ_BIT,
1051 );
1052 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
1053 }
1054 Self {
1055 buffer,
1056 offset,
1057 length,
1058 flushed: false,
1059 }
1060 }
1061
1062 pub fn flush(&mut self, gl: &glow::Context) {
1064 use glow::HasContext;
1065 let buf = glow::NativeBuffer(std::num::NonZeroU32::new(self.buffer.raw).unwrap());
1066 unsafe {
1067 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
1068 gl.flush_mapped_buffer_range(GL_SHADER_STORAGE_BUFFER, self.offset as i32, self.length as i32);
1069 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
1070 }
1071 self.flushed = true;
1072 }
1073
1074 pub fn unmap(self, gl: &glow::Context) {
1076 use glow::HasContext;
1077 let buf = glow::NativeBuffer(std::num::NonZeroU32::new(self.buffer.raw).unwrap());
1078 unsafe {
1079 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
1080 gl.unmap_buffer(GL_SHADER_STORAGE_BUFFER);
1081 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
1082 }
1083 }
1084
1085 pub fn is_flushed(&self) -> bool {
1087 self.flushed
1088 }
1089}
1090
1091pub struct BufferCopyEngine {
1097 total_bytes_copied: u64,
1099 copy_count: u64,
1101}
1102
1103impl BufferCopyEngine {
1104 pub fn new() -> Self {
1106 Self {
1107 total_bytes_copied: 0,
1108 copy_count: 0,
1109 }
1110 }
1111
1112 pub fn copy(
1115 &mut self,
1116 gl: &glow::Context,
1117 src: BufferHandle,
1118 src_offset: usize,
1119 dst: BufferHandle,
1120 dst_offset: usize,
1121 length: usize,
1122 ) {
1123 assert!(
1124 src_offset + length <= src.size_bytes,
1125 "copy: source range out of bounds"
1126 );
1127 assert!(
1128 dst_offset + length <= dst.size_bytes,
1129 "copy: dest range out of bounds"
1130 );
1131 use glow::HasContext;
1132 let src_buf = glow::NativeBuffer(std::num::NonZeroU32::new(src.raw).unwrap());
1133 let dst_buf = glow::NativeBuffer(std::num::NonZeroU32::new(dst.raw).unwrap());
1134 unsafe {
1135 gl.bind_buffer(GL_COPY_READ_BUFFER, Some(src_buf));
1136 gl.bind_buffer(GL_COPY_WRITE_BUFFER, Some(dst_buf));
1137 gl.copy_buffer_sub_data(
1138 GL_COPY_READ_BUFFER,
1139 GL_COPY_WRITE_BUFFER,
1140 src_offset as i32,
1141 dst_offset as i32,
1142 length as i32,
1143 );
1144 gl.bind_buffer(GL_COPY_READ_BUFFER, None);
1145 gl.bind_buffer(GL_COPY_WRITE_BUFFER, None);
1146 }
1147 self.total_bytes_copied += length as u64;
1148 self.copy_count += 1;
1149 }
1150
1151 pub fn copy_full(
1153 &mut self,
1154 gl: &glow::Context,
1155 src: BufferHandle,
1156 dst: BufferHandle,
1157 ) {
1158 let length = src.size_bytes.min(dst.size_bytes);
1159 self.copy(gl, src, 0, dst, 0, length);
1160 }
1161
1162 pub fn copy_regions(
1164 &mut self,
1165 gl: &glow::Context,
1166 src: BufferHandle,
1167 dst: BufferHandle,
1168 regions: &[CopyRegion],
1169 ) {
1170 for region in regions {
1171 self.copy(
1172 gl,
1173 src,
1174 region.src_offset,
1175 dst,
1176 region.dst_offset,
1177 region.length,
1178 );
1179 }
1180 }
1181
1182 pub fn total_bytes_copied(&self) -> u64 {
1184 self.total_bytes_copied
1185 }
1186
1187 pub fn copy_count(&self) -> u64 {
1189 self.copy_count
1190 }
1191
1192 pub fn reset_stats(&mut self) {
1194 self.total_bytes_copied = 0;
1195 self.copy_count = 0;
1196 }
1197}
1198
1199impl Default for BufferCopyEngine {
1200 fn default() -> Self {
1201 Self::new()
1202 }
1203}
1204
1205#[derive(Debug, Clone, Copy)]
1207pub struct CopyRegion {
1208 pub src_offset: usize,
1209 pub dst_offset: usize,
1210 pub length: usize,
1211}
1212
1213pub struct BufferRingAllocator {
1220 handle: BufferHandle,
1221 total_size: usize,
1222 frame_count: usize,
1223 current_frame: usize,
1224 frame_size: usize,
1225 tracker: Arc<MemoryTracker>,
1226}
1227
1228impl BufferRingAllocator {
1229 pub fn create(
1231 gl: &glow::Context,
1232 frame_count: usize,
1233 frame_size: usize,
1234 tracker: Arc<MemoryTracker>,
1235 ) -> Self {
1236 let total_size = frame_count * frame_size;
1237 use glow::HasContext;
1238 let raw = unsafe {
1239 let buf = gl.create_buffer().expect("Failed to create ring buffer");
1240 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
1241 gl.buffer_data_size(
1242 GL_SHADER_STORAGE_BUFFER,
1243 total_size as i32,
1244 BufferUsage::StreamDraw.to_gl(),
1245 );
1246 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
1247 buf
1248 };
1249 tracker.record_alloc(total_size);
1250 Self {
1251 handle: BufferHandle::new(raw.0.get(), total_size),
1252 total_size,
1253 frame_count,
1254 current_frame: 0,
1255 frame_size,
1256 tracker,
1257 }
1258 }
1259
1260 pub fn advance(&mut self) -> usize {
1262 let offset = self.current_frame * self.frame_size;
1263 self.current_frame = (self.current_frame + 1) % self.frame_count;
1264 offset
1265 }
1266
1267 pub fn write_current(&self, gl: &glow::Context, data: &[u8]) {
1269 assert!(data.len() <= self.frame_size, "data exceeds frame size");
1270 let offset = self.current_frame * self.frame_size;
1271 use glow::HasContext;
1272 let buf = glow::NativeBuffer(std::num::NonZeroU32::new(self.handle.raw).unwrap());
1273 unsafe {
1274 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
1275 gl.buffer_sub_data_u8_slice(GL_SHADER_STORAGE_BUFFER, offset as i32, data);
1276 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
1277 }
1278 }
1279
1280 pub fn current_frame(&self) -> usize {
1282 self.current_frame
1283 }
1284
1285 pub fn frame_offset(&self, frame: usize) -> usize {
1287 (frame % self.frame_count) * self.frame_size
1288 }
1289
1290 pub fn handle(&self) -> BufferHandle {
1292 self.handle
1293 }
1294
1295 pub fn frame_size(&self) -> usize {
1297 self.frame_size
1298 }
1299
1300 pub fn total_size(&self) -> usize {
1302 self.total_size
1303 }
1304
1305 pub fn destroy(self, gl: &glow::Context) {
1307 self.tracker.record_free(self.total_size);
1308 use glow::HasContext;
1309 let buf = glow::NativeBuffer(std::num::NonZeroU32::new(self.handle.raw).unwrap());
1310 unsafe {
1311 gl.delete_buffer(buf);
1312 }
1313 }
1314}
1315
1316pub fn issue_barrier(gl: &glow::Context, barrier: BufferBarrierType) {
1322 use glow::HasContext;
1323 unsafe {
1324 gl.memory_barrier(barrier.to_gl_bits());
1325 }
1326}
1327
1328pub fn issue_barrier_raw(gl: &glow::Context, bits: u32) {
1330 use glow::HasContext;
1331 unsafe {
1332 gl.memory_barrier(bits);
1333 }
1334}
1335
1336pub struct BufferDebug;
1342
1343impl BufferDebug {
1344 pub fn label(gl: &glow::Context, handle: BufferHandle, name: &str) {
1346 use glow::HasContext;
1347 let buf = glow::NativeBuffer(std::num::NonZeroU32::new(handle.raw).unwrap());
1348 unsafe {
1349 gl.object_label(glow::BUFFER, buf.0.get(), Some(name));
1350 }
1351 }
1352}
1353
1354pub fn shared_tracker() -> Arc<MemoryTracker> {
1360 Arc::new(MemoryTracker::new())
1361}
1362
1363pub struct MultiCounter {
1369 handle: BufferHandle,
1370 count: usize,
1371 binding_index: u32,
1372 tracker: Arc<MemoryTracker>,
1373}
1374
1375impl MultiCounter {
1376 pub fn create(
1378 gl: &glow::Context,
1379 count: usize,
1380 binding_index: u32,
1381 tracker: Arc<MemoryTracker>,
1382 ) -> Self {
1383 let byte_size = count * std::mem::size_of::<u32>();
1384 use glow::HasContext;
1385 let raw = unsafe {
1386 let buf = gl
1387 .create_buffer()
1388 .expect("Failed to create multi-counter buffer");
1389 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, Some(buf));
1390 let zeros = vec![0u8; byte_size];
1391 gl.buffer_data_u8_slice(
1392 GL_ATOMIC_COUNTER_BUFFER,
1393 &zeros,
1394 BufferUsage::DynamicDraw.to_gl(),
1395 );
1396 gl.bind_buffer_base(GL_ATOMIC_COUNTER_BUFFER, binding_index, Some(buf));
1397 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, None);
1398 buf
1399 };
1400 tracker.record_alloc(byte_size);
1401 Self {
1402 handle: BufferHandle::new(raw.0.get(), byte_size),
1403 count,
1404 binding_index,
1405 tracker,
1406 }
1407 }
1408
1409 pub fn reset_all(&self, gl: &glow::Context) {
1411 let byte_size = self.count * 4;
1412 let zeros = vec![0u8; byte_size];
1413 use glow::HasContext;
1414 let buf = self.gl_buffer();
1415 unsafe {
1416 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, Some(buf));
1417 gl.buffer_sub_data_u8_slice(GL_ATOMIC_COUNTER_BUFFER, 0, &zeros);
1418 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, None);
1419 }
1420 }
1421
1422 pub fn reset_one(&self, gl: &glow::Context, index: usize) {
1424 assert!(index < self.count);
1425 let zero = 0u32.to_le_bytes();
1426 use glow::HasContext;
1427 let buf = self.gl_buffer();
1428 unsafe {
1429 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, Some(buf));
1430 gl.buffer_sub_data_u8_slice(
1431 GL_ATOMIC_COUNTER_BUFFER,
1432 (index * 4) as i32,
1433 &zero,
1434 );
1435 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, None);
1436 }
1437 }
1438
1439 pub fn read_all(&self, gl: &glow::Context) -> Vec<u32> {
1441 let byte_size = self.count * 4;
1442 let mut bytes = vec![0u8; byte_size];
1443 use glow::HasContext;
1444 let buf = self.gl_buffer();
1445 unsafe {
1446 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, Some(buf));
1447 gl.get_buffer_sub_data(GL_ATOMIC_COUNTER_BUFFER, 0, &mut bytes);
1448 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, None);
1449 }
1450 bytes
1451 .chunks_exact(4)
1452 .map(|c| u32::from_le_bytes([c[0], c[1], c[2], c[3]]))
1453 .collect()
1454 }
1455
1456 pub fn read_one(&self, gl: &glow::Context, index: usize) -> u32 {
1458 assert!(index < self.count);
1459 let mut bytes = [0u8; 4];
1460 use glow::HasContext;
1461 let buf = self.gl_buffer();
1462 unsafe {
1463 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, Some(buf));
1464 gl.get_buffer_sub_data(GL_ATOMIC_COUNTER_BUFFER, (index * 4) as i32, &mut bytes);
1465 gl.bind_buffer(GL_ATOMIC_COUNTER_BUFFER, None);
1466 }
1467 u32::from_le_bytes(bytes)
1468 }
1469
1470 pub fn bind(&self, gl: &glow::Context) {
1472 use glow::HasContext;
1473 let buf = self.gl_buffer();
1474 unsafe {
1475 gl.bind_buffer_base(GL_ATOMIC_COUNTER_BUFFER, self.binding_index, Some(buf));
1476 }
1477 }
1478
1479 pub fn count(&self) -> usize {
1481 self.count
1482 }
1483
1484 pub fn handle(&self) -> BufferHandle {
1486 self.handle
1487 }
1488
1489 pub fn destroy(self, gl: &glow::Context) {
1491 self.tracker.record_free(self.handle.size_bytes);
1492 use glow::HasContext;
1493 unsafe {
1494 gl.delete_buffer(self.gl_buffer());
1495 }
1496 }
1497
1498 fn gl_buffer(&self) -> glow::NativeBuffer {
1499 glow::NativeBuffer(std::num::NonZeroU32::new(self.handle.raw).unwrap())
1500 }
1501}
1502
1503#[derive(Debug, Clone, Copy)]
1509#[repr(C)]
1510pub struct IndirectDispatchCommand {
1511 pub num_groups_x: u32,
1512 pub num_groups_y: u32,
1513 pub num_groups_z: u32,
1514}
1515
1516const GL_DISPATCH_INDIRECT_BUFFER: u32 = 0x90EE;
1518
1519pub struct IndirectBuffer {
1521 handle: BufferHandle,
1522 command_count: usize,
1523 tracker: Arc<MemoryTracker>,
1524}
1525
1526impl IndirectBuffer {
1527 const CMD_SIZE: usize = std::mem::size_of::<IndirectDispatchCommand>();
1528
1529 pub fn create(
1531 gl: &glow::Context,
1532 count: usize,
1533 tracker: Arc<MemoryTracker>,
1534 ) -> Self {
1535 let byte_size = count * Self::CMD_SIZE;
1536 use glow::HasContext;
1537 let raw = unsafe {
1538 let buf = gl
1539 .create_buffer()
1540 .expect("Failed to create indirect buffer");
1541 gl.bind_buffer(GL_DISPATCH_INDIRECT_BUFFER, Some(buf));
1542 gl.buffer_data_size(
1543 GL_DISPATCH_INDIRECT_BUFFER,
1544 byte_size as i32,
1545 BufferUsage::DynamicDraw.to_gl(),
1546 );
1547 gl.bind_buffer(GL_DISPATCH_INDIRECT_BUFFER, None);
1548 buf
1549 };
1550 tracker.record_alloc(byte_size);
1551 Self {
1552 handle: BufferHandle::new(raw.0.get(), byte_size),
1553 command_count: count,
1554 tracker,
1555 }
1556 }
1557
1558 pub fn upload(&self, gl: &glow::Context, commands: &[IndirectDispatchCommand]) {
1560 assert!(commands.len() <= self.command_count);
1561 let byte_slice = unsafe {
1562 std::slice::from_raw_parts(
1563 commands.as_ptr() as *const u8,
1564 commands.len() * Self::CMD_SIZE,
1565 )
1566 };
1567 use glow::HasContext;
1568 let buf = self.gl_buffer();
1569 unsafe {
1570 gl.bind_buffer(GL_DISPATCH_INDIRECT_BUFFER, Some(buf));
1571 gl.buffer_sub_data_u8_slice(GL_DISPATCH_INDIRECT_BUFFER, 0, byte_slice);
1572 gl.bind_buffer(GL_DISPATCH_INDIRECT_BUFFER, None);
1573 }
1574 }
1575
1576 pub fn bind(&self, gl: &glow::Context) {
1578 use glow::HasContext;
1579 let buf = self.gl_buffer();
1580 unsafe {
1581 gl.bind_buffer(GL_DISPATCH_INDIRECT_BUFFER, Some(buf));
1582 }
1583 }
1584
1585 pub fn handle(&self) -> BufferHandle {
1587 self.handle
1588 }
1589
1590 pub fn destroy(self, gl: &glow::Context) {
1592 self.tracker.record_free(self.handle.size_bytes);
1593 use glow::HasContext;
1594 unsafe {
1595 gl.delete_buffer(self.gl_buffer());
1596 }
1597 }
1598
1599 fn gl_buffer(&self) -> glow::NativeBuffer {
1600 glow::NativeBuffer(std::num::NonZeroU32::new(self.handle.raw).unwrap())
1601 }
1602}
1603
1604#[derive(Debug, Clone)]
1610pub struct FieldDescriptor {
1611 pub name: String,
1613 pub offset: usize,
1615 pub size: usize,
1617}
1618
1619pub struct StructuredBuffer {
1621 handle: BufferHandle,
1622 stride: usize,
1623 element_count: usize,
1624 fields: Vec<FieldDescriptor>,
1625 binding_index: u32,
1626 tracker: Arc<MemoryTracker>,
1627}
1628
1629impl StructuredBuffer {
1630 pub fn create(
1632 gl: &glow::Context,
1633 stride: usize,
1634 capacity: usize,
1635 fields: Vec<FieldDescriptor>,
1636 binding_index: u32,
1637 tracker: Arc<MemoryTracker>,
1638 ) -> Self {
1639 let byte_size = stride * capacity;
1640 use glow::HasContext;
1641 let raw = unsafe {
1642 let buf = gl.create_buffer().expect("Failed to create structured buffer");
1643 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
1644 gl.buffer_data_size(
1645 GL_SHADER_STORAGE_BUFFER,
1646 byte_size as i32,
1647 BufferUsage::DynamicDraw.to_gl(),
1648 );
1649 gl.bind_buffer_base(GL_SHADER_STORAGE_BUFFER, binding_index, Some(buf));
1650 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
1651 buf
1652 };
1653 tracker.record_alloc(byte_size);
1654 Self {
1655 handle: BufferHandle::new(raw.0.get(), byte_size),
1656 stride,
1657 element_count: capacity,
1658 fields,
1659 binding_index,
1660 tracker,
1661 }
1662 }
1663
1664 pub fn upload_raw(&self, gl: &glow::Context, data: &[u8]) {
1666 assert!(data.len() <= self.handle.size_bytes);
1667 use glow::HasContext;
1668 let buf = self.gl_buffer();
1669 unsafe {
1670 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, Some(buf));
1671 gl.buffer_sub_data_u8_slice(GL_SHADER_STORAGE_BUFFER, 0, data);
1672 gl.bind_buffer(GL_SHADER_STORAGE_BUFFER, None);
1673 }
1674 }
1675
1676 pub fn bind(&self, gl: &glow::Context) {
1678 use glow::HasContext;
1679 let buf = self.gl_buffer();
1680 unsafe {
1681 gl.bind_buffer_base(GL_SHADER_STORAGE_BUFFER, self.binding_index, Some(buf));
1682 }
1683 }
1684
1685 pub fn fields(&self) -> &[FieldDescriptor] {
1687 &self.fields
1688 }
1689
1690 pub fn stride(&self) -> usize {
1692 self.stride
1693 }
1694
1695 pub fn element_count(&self) -> usize {
1697 self.element_count
1698 }
1699
1700 pub fn handle(&self) -> BufferHandle {
1702 self.handle
1703 }
1704
1705 pub fn destroy(self, gl: &glow::Context) {
1707 self.tracker.record_free(self.handle.size_bytes);
1708 use glow::HasContext;
1709 unsafe {
1710 gl.delete_buffer(self.gl_buffer());
1711 }
1712 }
1713
1714 fn gl_buffer(&self) -> glow::NativeBuffer {
1715 glow::NativeBuffer(std::num::NonZeroU32::new(self.handle.raw).unwrap())
1716 }
1717}
1718
1719pub struct SharedBufferPool {
1722 inner: Mutex<BufferPool>,
1723}
1724
1725impl SharedBufferPool {
1726 pub fn new(tracker: Arc<MemoryTracker>, max_pool_size: usize) -> Self {
1728 Self {
1729 inner: Mutex::new(BufferPool::new(tracker, max_pool_size)),
1730 }
1731 }
1732
1733 pub fn acquire(
1735 &self,
1736 gl: &glow::Context,
1737 size_bytes: usize,
1738 usage: BufferUsage,
1739 ) -> BufferHandle {
1740 self.inner.lock().unwrap().acquire(gl, size_bytes, usage)
1741 }
1742
1743 pub fn release(&self, gl: &glow::Context, handle: BufferHandle) {
1745 self.inner.lock().unwrap().release(gl, handle);
1746 }
1747
1748 pub fn drain(&self, gl: &glow::Context) {
1750 self.inner.lock().unwrap().drain(gl);
1751 }
1752
1753 pub fn available_count(&self) -> usize {
1755 self.inner.lock().unwrap().available_count()
1756 }
1757
1758 pub fn in_use_count(&self) -> usize {
1760 self.inner.lock().unwrap().in_use_count()
1761 }
1762}