est_render/gpu/buffer/
mod.rs

1#![allow(dead_code)]
2
3use std::cell::RefMut;
4
5// use crate::{gpu::gpu_inner::GPUInner, utils::ArcRef};
6
7// use super::command::CommandBuffer;
8
9use crate::utils::ArcRef;
10
11use super::{
12    command::CommandBuffer,
13    GPUInner,
14};
15
16pub(crate) mod staging_buffer;
17
18/// Represents the usage flags for a GPU buffer.
19#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
20pub struct BufferUsage(u32);
21
22bitflags::bitflags! {
23    impl BufferUsage: u32 {
24        const MAP_READ = 0x0001;
25        const MAP_WRITE = 0x0002;
26        const COPY_SRC = 0x0004;
27        const COPY_DST = 0x0008;
28        const INDEX = 0x0010;
29        const VERTEX = 0x0020;
30        const UNIFORM = 0x0040;
31        const STORAGE = 0x0080;
32        const INDIRECT = 0x0100;
33        const QUERY_RESOLVE = 0x0200;
34    }
35}
36
37impl Into<wgpu::BufferUsages> for BufferUsage {
38    fn into(self) -> wgpu::BufferUsages {
39        wgpu::BufferUsages::from_bits(self.bits()).unwrap()
40    }
41}
42
43pub enum BufferData<T: bytemuck::Pod + bytemuck::Zeroable> {
44    None,
45    Data(Vec<T>),
46}
47
48pub struct BufferBuilder<T: bytemuck::Pod + bytemuck::Zeroable> {
49    graphics: ArcRef<GPUInner>,
50    data: BufferData<T>,
51    len: usize,
52    usage: BufferUsage,
53    mapped: bool,
54}
55
56impl<T: bytemuck::Pod + bytemuck::Zeroable> BufferBuilder<T> {
57    pub(crate) fn new(graphics: ArcRef<GPUInner>) -> Self {
58        BufferBuilder {
59            graphics,
60            data: BufferData::None,
61            usage: BufferUsage::empty(),
62            len: 0,
63            mapped: false,
64        }
65    }
66
67    /// Set empty data for the buffer.
68    pub fn set_data_empty(mut self, len: usize) -> Self {
69        self.len = len;
70        self
71    }
72
73    /// Set data for the buffer from a vector.
74    pub fn set_data_vec(mut self, data: Vec<T>) -> Self {
75        self.data = BufferData::Data(bytemuck::cast_slice(&data).to_vec());
76        self.len = data.len() * std::mem::size_of::<T>();
77        self
78    }
79
80    /// Set data for the buffer from a slice.
81    pub fn set_data_slice(mut self, data: &[T]) -> Self {
82        self.data = BufferData::Data(bytemuck::cast_slice(data).to_vec());
83        self.len = data.len() * std::mem::size_of::<T>();
84        self
85    }
86
87    /// Set the buffer usage flags.
88    pub fn set_usage(mut self, usage: BufferUsage) -> Self {
89        self.usage = usage;
90        self
91    }
92
93    /// Set mapped state for the buffer.
94    ///
95    /// This is useful when you want to map the buffer for writing the data directly to the GPU memory.
96    ///
97    /// You have to call [Buffer::unmap] to unmap the buffer after you are done using it.
98    /// Otherwise, the command will panic when you try to use the buffer on mapped state.
99    pub fn set_mapped(mut self, mapped: bool) -> Self {
100        self.mapped = mapped;
101        self
102    }
103
104    pub fn build(self) -> Result<Buffer, BufferError> {
105        if self.len == 0 && matches!(self.data, BufferData::None) {
106            return Err(BufferError::InvalidSize);
107        }
108
109        match self.data {
110            BufferData::None => Buffer::new(
111                self.graphics,
112                self.len as wgpu::BufferAddress,
113                self.usage,
114                self.mapped,
115            ),
116            BufferData::Data(data) => {
117                Buffer::from_slice(self.graphics, &data, self.usage, self.mapped)
118            }
119        }
120    }
121}
122
123#[derive(Debug, Clone, PartialEq, Eq, Hash)]
124pub(crate) struct BufferInner {
125    pub buffer: wgpu::Buffer,
126
127    pub size: wgpu::BufferAddress,
128    pub usage: BufferUsage,
129    pub mapped: bool,
130}
131
132/// Represents a GPU buffer.
133#[derive(Debug, Clone, PartialEq, Eq)]
134pub struct Buffer {
135    pub(crate) graphics: ArcRef<GPUInner>,
136    pub(crate) inner: ArcRef<BufferInner>,
137
138    pub(crate) mapped_buffer: Vec<u8>, // Used for mapped buffers
139    pub(crate) mapped_type: BufferMapMode, // Used for mapped buffers
140}
141
142#[derive(Debug, Clone, Copy)]
143pub enum BufferError {
144    InvalidUsage,
145    InvalidSize,
146    BufferNotReadable,
147    BufferNotWritable,
148    FailedToMapBuffer,
149}
150
151impl Buffer {
152    pub(crate) fn new(
153        graphics: ArcRef<GPUInner>,
154        size: wgpu::BufferAddress,
155        usage: BufferUsage,
156        mapped: bool,
157    ) -> Result<Self, BufferError> {
158        if size == 0 {
159            return Err(BufferError::InvalidSize);
160        }
161
162        let buffer = {
163            let mut graphics_ref = graphics.borrow_mut();
164            let usage_wgpu: wgpu::BufferUsages = usage.clone().into();
165
166            graphics_ref.create_buffer(size, usage_wgpu, mapped)
167        };
168
169        let inner = BufferInner {
170            buffer,
171            size,
172            usage,
173            mapped,
174        };
175
176        Ok(Buffer {
177            graphics,
178            inner: ArcRef::new(inner),
179            mapped_buffer: if mapped {
180                vec![0; size as usize]
181            } else {
182                vec![]
183            },
184            mapped_type: BufferMapMode::Write,
185        })
186    }
187
188    pub(crate) fn from_slice<T: bytemuck::Pod>(
189        graphics: ArcRef<GPUInner>,
190        data: &[T],
191        usage: BufferUsage,
192        mapped: bool,
193    ) -> Result<Self, BufferError> {
194        if data.is_empty() {
195            return Err(BufferError::InvalidSize);
196        }
197
198        let size = (data.len() * std::mem::size_of::<T>()) as wgpu::BufferAddress;
199        let buffer = {
200            let mut graphics_ref = graphics.borrow_mut();
201            let usage_wgpu: wgpu::BufferUsages = usage.clone().into();
202
203            graphics_ref.create_buffer_with(data, usage_wgpu)
204        };
205
206        let inner = BufferInner {
207            buffer,
208            size,
209            usage,
210            mapped,
211        };
212
213        Ok(Buffer {
214            graphics,
215            inner: ArcRef::new(inner),
216            mapped_buffer: if mapped {
217                bytemuck::cast_slice(data).to_vec()
218            } else {
219                vec![]
220            },
221            mapped_type: BufferMapMode::Write,
222        })
223    }
224
225    pub fn usage(&self) -> BufferUsage {
226        self.inner.wait_borrow().usage
227    }
228
229    pub fn size(&self) -> u64 {
230        self.inner.wait_borrow().size
231    }
232
233    /// Resizes the buffer to the specified size.
234    ///
235    /// Due to the nature of GPU buffers, this will create a new buffer and copy the existing data into it IF: \
236    /// - The old buffer has usage [BufferUsages::COPY_SRC] and [BufferUsages::MAP_READ].
237    ///
238    /// Otherwise, it will simply resize the buffer without copying the data.
239    pub fn resize(&mut self, size: u64) -> Result<(), BufferError> {
240        if size == 0 {
241            return Err(BufferError::InvalidSize);
242        }
243
244        let old_data = self.read::<u8>();
245
246        let mut inner = self.inner.wait_borrow_mut();
247        let mut graphics_ref = self.graphics.borrow_mut();
248
249        let new_buffer = {
250            if let Ok(old_data) = old_data {
251                // truance or increase old data
252                let mut old_data = old_data;
253
254                if old_data.len() < size as usize {
255                    // If the old data is smaller than the new size, we need to pad it with zeros
256                    old_data.resize(size as usize, 0);
257                } else if old_data.len() > size as usize {
258                    // If the old data is larger than the new size, we need to truncate it
259                    old_data.truncate(size as usize);
260                }
261
262                graphics_ref.create_buffer_with(&old_data, inner.usage.clone().into())
263            } else {
264                graphics_ref.create_buffer(
265                    size as wgpu::BufferAddress,
266                    inner.usage.clone().into(),
267                    false,
268                )
269            }
270        };
271
272        inner.buffer = new_buffer;
273        inner.size = size as wgpu::BufferAddress;
274
275        Ok(())
276    }
277
278    /// Writes the contents of the source buffer to this buffer.
279    pub fn write(&self, src: &Buffer) {
280        let graphics_ref = self.graphics.borrow();
281        let mut encoder =
282            graphics_ref
283                .device()
284                .create_command_encoder(&wgpu::CommandEncoderDescriptor {
285                    label: Some("Buffer Write Command Encoder"),
286                });
287
288        self.internal_write_cmd(src, &mut encoder);
289
290        graphics_ref
291            .queue()
292            .submit(std::iter::once(encoder.finish()));
293        _ = graphics_ref.device().poll(wgpu::PollType::Wait);
294    }
295
296    /// Writes the contents of the source buffer to this buffer using a command buffer.
297    ///
298    /// This function is useful for when you want to write to the buffer in a command buffer context, such as during a render pass.
299    ///
300    /// [CommandBuffer::write_buffer] is a more convenient way to write a buffer in a command buffer context.
301    pub fn write_cmd(&self, src: &Buffer, encoder: &mut CommandBuffer) {
302        #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
303        {
304            let inner = self.inner.wait_borrow();
305            let src_inner = src.inner.wait_borrow();
306
307            if !src_inner.usage.contains(BufferUsage::COPY_SRC) {
308                panic!("Source buffer is not readable");
309            }
310
311            if inner.size < src_inner.size {
312                panic!("Destination buffer is too small");
313            }
314        }
315
316        self.internal_write_cmd(src, &mut encoder.command.as_mut().unwrap().borrow_mut());
317    }
318
319    #[inline(always)]
320    pub(crate) fn internal_write_cmd_mut_ref(
321        &self,
322        src: &Buffer,
323        encoder: &mut RefMut<'_, wgpu::CommandEncoder>,
324    ) {
325        #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
326        {
327            let inner = self.inner.wait_borrow();
328            let src_inner = src.inner.wait_borrow();
329
330            if !inner.usage.contains(BufferUsage::COPY_DST) {
331                panic!("Buffer is not writable");
332            }
333
334            if !src_inner.usage.contains(BufferUsage::COPY_SRC) {
335                panic!("Source buffer is not readable");
336            }
337
338            if inner.size < src_inner.size {
339                panic!("Destination buffer is too small");
340            }
341        }
342
343        let src_inner = src.inner.wait_borrow();
344        let inner = self.inner.wait_borrow();
345
346        encoder.copy_buffer_to_buffer(&src_inner.buffer, 0, &inner.buffer, 0, inner.size);
347    }
348
349    #[inline(always)]
350    pub(crate) fn internal_write_cmd(&self, src: &Buffer, encoder: &mut wgpu::CommandEncoder) {
351        #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
352        {
353            let inner = self.inner.wait_borrow();
354            let src_inner = src.inner.wait_borrow();
355
356            if !inner.usage.contains(BufferUsage::COPY_DST) {
357                panic!("Buffer is not writable");
358            }
359
360            if !src_inner.usage.contains(BufferUsage::COPY_SRC) {
361                panic!("Source buffer is not readable");
362            }
363
364            if inner.size < src_inner.size {
365                panic!("Destination buffer is too small");
366            }
367        }
368
369        let src_inner = src.inner.wait_borrow();
370        let inner = self.inner.wait_borrow();
371
372        encoder.copy_buffer_to_buffer(&src_inner.buffer, 0, &inner.buffer, 0, inner.size);
373    }
374
375    /// Writes raw data to the buffer.
376    ///
377    /// By default, this will create an intermediate buffer to copy the data into, and then write that buffer to the destination buffer.
378    /// This function also will automatically pad the data to the required alignment if necessary.
379    ///
380    /// Will panic if the buffer is not writable or if the data is larger than the buffer size.
381    pub fn write_raw<T: bytemuck::Pod + bytemuck::Zeroable>(&self, data: &[T]) {
382        #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
383        {
384            let inner = self.inner.wait_borrow();
385            if !inner.usage.contains(BufferUsage::COPY_DST) {
386                panic!("Buffer is not writable");
387            }
388
389            if inner.size < data.len() as u64 * std::mem::size_of::<T>() as u64 {
390                panic!("Destination buffer is too small");
391            }
392        }
393
394        let graphics_ref = self.graphics.borrow();
395
396        let mut encoder =
397            graphics_ref
398                .device()
399                .create_command_encoder(&wgpu::CommandEncoderDescriptor {
400                    label: Some("Buffer Write Raw Command Encoder"),
401                });
402
403        self.internal_write_raw_cmd(data, &mut encoder);
404
405        graphics_ref
406            .queue()
407            .submit(std::iter::once(encoder.finish()));
408
409        _ = graphics_ref.device().poll(wgpu::PollType::Wait);
410    }
411
412    /// Writes raw data to the buffer using a command buffer, useful for writing data during a render pass.
413    ///
414    /// This function is useful for when you want to write to the buffer in a command buffer context, such as during a render pass.
415    /// This function also will automatically pad the data to the required alignment if necessary.
416    ///
417    /// [CommandBuffer::write_buffer_raw] is a more convenient way to write raw data to a buffer in a command buffer context.
418    ///
419    /// Will panic if the buffer is not writable or if the data is larger than the buffer size.
420    pub fn write_raw_cmd<T: bytemuck::Pod + bytemuck::Zeroable>(
421        &self,
422        data: &[T],
423        encoder: &mut CommandBuffer,
424    ) {
425        #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
426        {
427            let inner = self.inner.wait_borrow();
428
429            if !inner.usage.contains(BufferUsage::COPY_DST) {
430                panic!("Buffer is not writable");
431            }
432
433            if inner.size < data.len() as u64 * std::mem::size_of::<T>() as u64 {
434                panic!("Destination buffer is too small");
435            }
436
437            if encoder.command.is_none() {
438                panic!("Command buffer is not writable");
439            }
440        }
441
442        let mut cmd = encoder.command.as_mut().unwrap().borrow_mut();
443
444        self.internal_write_raw_cmd(data, &mut cmd);
445    }
446
447    pub(crate) fn internal_write_raw_cmd<T: bytemuck::Pod + bytemuck::Zeroable>(
448        &self,
449        data: &[T],
450        encoder: &mut wgpu::CommandEncoder,
451    ) {
452        let inner = self.inner.wait_borrow();
453        let mut graphics_ref = self.graphics.borrow_mut();
454
455        let data_len = data.len() as u64 * std::mem::size_of::<T>() as u64;
456
457        #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
458        {
459            if !inner.usage.contains(BufferUsage::COPY_DST) {
460                panic!("Buffer is not writable");
461            }
462
463            if inner.size < data_len {
464                panic!("Destination buffer is too small");
465            }
466        }
467
468        let buffer = {
469            let data: Vec<u8> = bytemuck::cast_slice(data).to_vec();
470
471            if data.len() as wgpu::BufferAddress % wgpu::COPY_BUFFER_ALIGNMENT != 0 {
472                // If the data length is not aligned, we need to pad it
473                let mut padded_data = data.to_vec();
474                padded_data.resize(
475                    ((data_len + wgpu::COPY_BUFFER_ALIGNMENT as u64 - 1)
476                        / wgpu::COPY_BUFFER_ALIGNMENT as u64
477                        * wgpu::COPY_BUFFER_ALIGNMENT as u64) as usize,
478                    0,
479                );
480
481                graphics_ref.create_buffer_with(&padded_data, wgpu::BufferUsages::COPY_SRC)
482            } else {
483                graphics_ref.create_buffer_with(&data, wgpu::BufferUsages::COPY_SRC)
484            }
485        };
486
487        encoder.copy_buffer_to_buffer(
488            &buffer,
489            0,
490            &inner.buffer,
491            0,
492            buffer.size() as wgpu::BufferAddress,
493        );
494    }
495
496    pub(crate) fn internal_write_raw_cmd_ref<T: bytemuck::Pod + bytemuck::Zeroable>(
497        &self,
498        data: &[T],
499        encoder: &mut RefMut<'_, wgpu::CommandEncoder>,
500    ) {
501        let inner = self.inner.wait_borrow();
502        let mut graphics_ref = self.graphics.borrow_mut();
503
504        let data_len = data.len() as u64 * std::mem::size_of::<T>() as u64;
505
506        #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
507        {
508            if !inner.usage.contains(BufferUsage::COPY_DST) {
509                panic!("Buffer is not writable");
510            }
511
512            if inner.size < data_len {
513                panic!("Destination buffer is too small");
514            }
515        }
516
517        let buffer = graphics_ref.create_buffer_with(data, wgpu::BufferUsages::COPY_SRC);
518
519        encoder.copy_buffer_to_buffer(
520            &buffer,
521            0,
522            &inner.buffer,
523            0,
524            buffer.size() as wgpu::BufferAddress,
525        );
526    }
527
528    /// Reads the buffer data into a vector of type T.
529    ///
530    /// Unless if the buffer was created with [BufferUsages::COPY_SRC] or [BufferUsages::MAP_READ], this will create an
531    /// intermediate buffer to copy the data into, and then read from that buffer.
532    pub fn read<T: bytemuck::Pod + bytemuck::Zeroable>(&self) -> Result<Vec<T>, BufferError> {
533        let mut graphics_ref = self.graphics.borrow_mut();
534        let inner = self.inner.wait_borrow();
535
536        if !inner.usage.contains(BufferUsage::COPY_SRC)
537            && !inner.usage.contains(BufferUsage::MAP_READ)
538        {
539            return Err(BufferError::BufferNotReadable);
540        }
541
542        if inner.mapped {
543            let data = inner.buffer.slice(..inner.size).get_mapped_range();
544            let result = bytemuck::cast_slice(&data).to_vec();
545            drop(data);
546
547            Ok(result)
548        } else {
549            let buffer = graphics_ref.create_buffer(
550                inner.size,
551                wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
552                false,
553            );
554
555            let mut encoder =
556                graphics_ref
557                    .device()
558                    .create_command_encoder(&wgpu::CommandEncoderDescriptor {
559                        label: Some("Buffer Read Command Encoder"),
560                    });
561
562            encoder.copy_buffer_to_buffer(
563                &inner.buffer,
564                0,
565                &buffer,
566                0,
567                inner.size as wgpu::BufferAddress,
568            );
569
570            graphics_ref
571                .queue()
572                .submit(std::iter::once(encoder.finish()));
573
574            _ = graphics_ref.device().poll(wgpu::PollType::Wait);
575
576            let result = {
577                let mapped_buffer = buffer.slice(..inner.size).get_mapped_range();
578                let result = bytemuck::cast_slice(&mapped_buffer).to_vec();
579
580                result
581            };
582
583            Ok(result)
584        }
585    }
586
587    pub fn map(&mut self, mode: BufferMapMode) -> Result<&mut Vec<u8>, BufferError> {
588        let mut inner = self.inner.wait_borrow_mut();
589
590        match mode {
591            BufferMapMode::Write => {
592                inner.mapped = true;
593
594                self.mapped_buffer = vec![0; inner.size as usize];
595
596                return Ok(&mut self.mapped_buffer);
597            }
598            BufferMapMode::Read => {
599                inner.mapped = true;
600
601                drop(inner);
602
603                let buffer = self.read::<u8>()?;
604                self.mapped_buffer = buffer;
605
606                return Ok(&mut self.mapped_buffer);
607            }
608        }
609    }
610
611    pub fn unmap(&mut self) {
612        if self.mapped_buffer.is_empty() {
613            #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
614            {
615                panic!("Buffer is not mapped");
616            }
617
618            #[allow(unreachable_code)]
619            return;
620        }
621
622        let inner = self.inner.wait_borrow();
623        if !inner.mapped {
624            #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
625            {
626                panic!("Buffer is not mapped");
627            }
628
629            #[allow(unreachable_code)]
630            return;
631        }
632
633        match self.mapped_type {
634            BufferMapMode::Write => {
635                inner.buffer.unmap();
636
637                drop(inner);
638
639                self.write_raw(&self.mapped_buffer);
640            }
641            BufferMapMode::Read => {
642                self.mapped_buffer = vec![];
643            }
644        }
645    }
646
647    async fn map_buffer(
648        device: &wgpu::Device,
649        buffer: &wgpu::Buffer,
650        map_mode: wgpu::MapMode,
651    ) -> bool {
652        let (sender, receiver) = futures::channel::oneshot::channel();
653
654        buffer.slice(..).map_async(map_mode, |result| {
655            let _ = sender.send(result);
656        });
657
658        _ = device.poll(wgpu::PollType::Wait);
659
660        receiver.await.unwrap().is_ok()
661    }
662}
663
664impl std::hash::Hash for Buffer {
665    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
666        self.inner.hash(state);
667    }
668}
669
670#[derive(Clone, Copy, Debug, PartialEq, Eq)]
671pub enum BufferMapMode {
672    Read,
673    Write,
674}