blade_util/
belt.rs

1use blade_graphics as gpu;
2use std::mem;
3
4struct ReusableBuffer {
5    raw: gpu::Buffer,
6    size: u64,
7}
8
9/// Configuration of the Blade belt.
10pub struct BufferBeltDescriptor {
11    /// Kind of memory to allocate from.
12    pub memory: gpu::Memory,
13    pub min_chunk_size: u64,
14    pub alignment: u64,
15}
16
17/// A belt of reusable buffer space.
18/// Could be useful for temporary data, such as texture staging areas.
19pub struct BufferBelt {
20    desc: BufferBeltDescriptor,
21    buffers: Vec<(ReusableBuffer, gpu::SyncPoint)>,
22    active: Vec<(ReusableBuffer, u64)>,
23}
24
25impl BufferBelt {
26    /// Create a new belt.
27    pub fn new(desc: BufferBeltDescriptor) -> Self {
28        assert_ne!(desc.alignment, 0);
29        Self {
30            desc,
31            buffers: Vec::new(),
32            active: Vec::new(),
33        }
34    }
35
36    /// Destroy this belt.
37    pub fn destroy(&mut self, gpu: &gpu::Context) {
38        for (buffer, _) in self.buffers.drain(..) {
39            gpu.destroy_buffer(buffer.raw);
40        }
41        for (buffer, _) in self.active.drain(..) {
42            gpu.destroy_buffer(buffer.raw);
43        }
44    }
45
46    /// Allocate a region of `size` bytes.
47    #[profiling::function]
48    pub fn alloc(&mut self, size: u64, gpu: &gpu::Context) -> gpu::BufferPiece {
49        for &mut (ref rb, ref mut offset) in self.active.iter_mut() {
50            let aligned = offset.next_multiple_of(self.desc.alignment);
51            if aligned + size <= rb.size {
52                let piece = rb.raw.at(aligned);
53                *offset = aligned + size;
54                return piece;
55            }
56        }
57
58        let index_maybe = self
59            .buffers
60            .iter()
61            .position(|(rb, sp)| size <= rb.size && gpu.wait_for(sp, 0));
62        if let Some(index) = index_maybe {
63            let (rb, _) = self.buffers.remove(index);
64            let piece = rb.raw.into();
65            self.active.push((rb, size));
66            return piece;
67        }
68
69        let chunk_index = self.buffers.len() + self.active.len();
70        let chunk_size = size.max(self.desc.min_chunk_size);
71        let chunk = gpu.create_buffer(gpu::BufferDesc {
72            name: &format!("chunk-{}", chunk_index),
73            size: chunk_size,
74            memory: self.desc.memory,
75        });
76        let rb = ReusableBuffer {
77            raw: chunk,
78            size: chunk_size,
79        };
80        self.active.push((rb, size));
81        chunk.into()
82    }
83
84    /// Allocate a region to hold the byte `data` slice contents.
85    pub fn alloc_bytes(&mut self, data: &[u8], gpu: &gpu::Context) -> gpu::BufferPiece {
86        assert!(!data.is_empty());
87        let bp = self.alloc(data.len() as u64, gpu);
88        unsafe {
89            std::ptr::copy_nonoverlapping(data.as_ptr(), bp.data(), data.len());
90        }
91        bp
92    }
93
94    // SAFETY: T should be zeroable and ordinary data, no references, pointers, cells or other complicated data type.
95    /// Allocate a region to hold the typed `data` slice contents.
96    pub unsafe fn alloc_typed<T>(&mut self, data: &[T], gpu: &gpu::Context) -> gpu::BufferPiece {
97        assert!(!data.is_empty());
98        let type_alignment = mem::align_of::<T>() as u64;
99        debug_assert_eq!(
100            self.desc.alignment % type_alignment,
101            0,
102            "Type alignment {} is too big",
103            type_alignment
104        );
105        let total_bytes = std::mem::size_of_val(data);
106        let bp = self.alloc(total_bytes as u64, gpu);
107        unsafe {
108            std::ptr::copy_nonoverlapping(data.as_ptr() as *const u8, bp.data(), total_bytes);
109        }
110        bp
111    }
112
113    /// Allocate a region to hold the POD `data` slice contents.
114    pub fn alloc_pod<T: bytemuck::Pod>(
115        &mut self,
116        data: &[T],
117        gpu: &gpu::Context,
118    ) -> gpu::BufferPiece {
119        unsafe { self.alloc_typed(data, gpu) }
120    }
121
122    /// Mark the actively used buffers as used by GPU with a given sync point.
123    pub fn flush(&mut self, sp: &gpu::SyncPoint) {
124        self.buffers
125            .extend(self.active.drain(..).map(|(rb, _)| (rb, sp.clone())));
126    }
127}