nannou_wgpu/
bind_group_builder.rs

1use wgpu_upstream::{BufferBinding, SamplerBindingType};
2
3use crate as wgpu;
4
5/// A type aimed at simplifying the creation of a bind group layout.
6#[derive(Debug, Default)]
7pub struct LayoutBuilder {
8    bindings: Vec<(wgpu::ShaderStages, wgpu::BindingType)>,
9}
10
11/// Simplified creation of a bind group.
12#[derive(Debug, Default)]
13pub struct Builder<'a> {
14    resources: Vec<wgpu::BindingResource<'a>>,
15}
16
17impl LayoutBuilder {
18    /// Begin building the bind group layout.
19    pub fn new() -> Self {
20        Self::default()
21    }
22
23    /// Specify a new binding.
24    ///
25    /// The `binding` position of each binding will be inferred as the index within the order that
26    /// they are added to this builder type. If you require manually specifying the binding
27    /// location, you may be better off not using the `BindGroupLayoutBuilder` and instead
28    /// constructing the `BindGroupLayout` and `BindGroup` manually.
29    pub fn binding(mut self, visibility: wgpu::ShaderStages, ty: wgpu::BindingType) -> Self {
30        self.bindings.push((visibility, ty));
31        self
32    }
33
34    /// Add a uniform buffer binding to the layout.
35    pub fn uniform_buffer(self, visibility: wgpu::ShaderStages, has_dynamic_offset: bool) -> Self {
36        let ty = wgpu::BindingType::Buffer {
37            ty: wgpu::BufferBindingType::Uniform,
38            has_dynamic_offset,
39            // wgpu 0.5-0.6 TODO: potential perf hit, investigate this field
40            min_binding_size: None,
41        };
42        self.binding(visibility, ty)
43    }
44
45    /// Add a storage buffer binding to the layout.
46    pub fn storage_buffer(
47        self,
48        visibility: wgpu::ShaderStages,
49        has_dynamic_offset: bool,
50        read_only: bool,
51    ) -> Self {
52        let ty = wgpu::BindingType::Buffer {
53            ty: wgpu::BufferBindingType::Storage { read_only },
54            has_dynamic_offset,
55            // wgpu 0.5-0.6 TODO: potential perf hit, investigate this field
56            min_binding_size: None,
57        };
58        self.binding(visibility, ty)
59    }
60
61    /// Add a sampler binding to the layout.
62    pub fn sampler(self, visibility: wgpu::ShaderStages, filtering: bool) -> Self {
63        let ty = wgpu::BindingType::Sampler(if filtering {
64            SamplerBindingType::Filtering
65        } else {
66            SamplerBindingType::NonFiltering
67        });
68        self.binding(visibility, ty)
69    }
70
71    /// Add a sampler binding to the layout.
72    pub fn comparison_sampler(self, visibility: wgpu::ShaderStages) -> Self {
73        let ty = wgpu::BindingType::Sampler(SamplerBindingType::Comparison);
74        self.binding(visibility, ty)
75    }
76
77    /// Add a texture binding to the layout.
78    pub fn texture(
79        self,
80        visibility: wgpu::ShaderStages,
81        multisampled: bool,
82        view_dimension: wgpu::TextureViewDimension,
83        sample_type: wgpu::TextureSampleType,
84    ) -> Self {
85        // fix sample type in certain scenarios (constraint given by wgpu)
86        let sample_type = if multisampled
87            && matches!(
88                sample_type,
89                wgpu::TextureSampleType::Float { filterable: true }
90            ) {
91            wgpu::TextureSampleType::Float { filterable: false }
92        } else {
93            sample_type
94        };
95        let ty = wgpu::BindingType::Texture {
96            multisampled,
97            view_dimension,
98            sample_type,
99        };
100        self.binding(visibility, ty)
101    }
102
103    /// Short-hand for adding a texture binding for a full view of the given texture to the layout.
104    ///
105    /// The `multisampled` and `dimension` parameters are retrieved from the `Texture` itself.
106    ///
107    /// Note that if you wish to take a `Cube` or `CubeArray` view of the given texture, you will
108    /// need to manually specify the `TextureViewDimension` via the `sampled_texture` method
109    /// instead.
110    pub fn texture_from(self, visibility: wgpu::ShaderStages, texture: &wgpu::Texture) -> Self {
111        self.texture(
112            visibility,
113            texture.sample_count() > 1,
114            texture.view_dimension(),
115            texture.sample_type(),
116        )
117    }
118
119    /// Add a storage texture binding to the layout.
120    pub fn storage_texture(
121        self,
122        visibility: wgpu::ShaderStages,
123        format: wgpu::TextureFormat,
124        view_dimension: wgpu::TextureViewDimension,
125        access: wgpu::StorageTextureAccess,
126    ) -> Self {
127        let ty = wgpu::BindingType::StorageTexture {
128            view_dimension,
129            format,
130            access,
131        };
132        self.binding(visibility, ty)
133    }
134
135    /// Short-hand for adding a storage texture binding for a full view of the given texture to the
136    /// layout.
137    ///
138    /// The `format`, `dimension` and `sample_type` are inferred from the given `texture`.
139    pub fn storage_texture_from(
140        self,
141        visibility: wgpu::ShaderStages,
142        texture: &wgpu::Texture,
143        access: wgpu::StorageTextureAccess,
144    ) -> Self {
145        self.storage_texture(
146            visibility,
147            texture.format(),
148            texture.view_dimension(),
149            access,
150        )
151    }
152
153    /// Build the bind group layout from the specified parameters.
154    pub fn build(self, device: &wgpu::Device) -> wgpu::BindGroupLayout {
155        let mut entries = Vec::with_capacity(self.bindings.len());
156        for (i, (visibility, ty)) in self.bindings.into_iter().enumerate() {
157            let layout_binding = wgpu::BindGroupLayoutEntry {
158                binding: i as u32,
159                visibility,
160                ty,
161                count: None,
162            };
163            entries.push(layout_binding);
164        }
165        let descriptor = wgpu::BindGroupLayoutDescriptor {
166            label: Some("nannou bind group layout"),
167            entries: &entries,
168        };
169        device.create_bind_group_layout(&descriptor)
170    }
171}
172
173impl<'a> Builder<'a> {
174    /// Begin building the bind group.
175    pub fn new() -> Self {
176        Self::default()
177    }
178
179    /// Specify a new binding.
180    ///
181    /// The `binding` position of each binding will be inferred as the index within the order that
182    /// they are added to this builder type. If you require manually specifying the binding
183    /// location, you may be better off not using the `BindGroupBuilder` and instead constructing
184    /// the `BindGroupLayout` and `BindGroup` manually.
185    pub fn binding(mut self, resource: wgpu::BindingResource<'a>) -> Self {
186        self.resources.push(resource);
187        self
188    }
189
190    /// Specify a slice of a buffer to be bound.
191    ///
192    /// The given `range` represents the start and end point of the buffer to be bound in bytes.
193    pub fn buffer_bytes(
194        self,
195        buffer: &'a wgpu::Buffer,
196        offset: wgpu::BufferAddress,
197        size: Option<wgpu::BufferSize>,
198    ) -> Self {
199        let resource = wgpu::BindingResource::Buffer(BufferBinding {
200            buffer,
201            offset,
202            size,
203        });
204        self.binding(resource)
205    }
206
207    /// Specify a slice of a buffer of elements of type `T` to be bound.
208    ///
209    /// This method is similar to `buffer_bytes`, but expects a range of **elements** rather than a
210    /// range of **bytes**.
211    ///
212    /// Type `T` *must* be either `#[repr(C)]` or `#[repr(transparent)]`.
213    // NOTE: We might want to change this to match the wgpu API by using a NonZeroU64 for size.
214    pub fn buffer<T>(self, buffer: &'a wgpu::Buffer, range: std::ops::Range<usize>) -> Self
215    where
216        T: Copy,
217    {
218        let size_bytes = std::mem::size_of::<T>() as wgpu::BufferAddress;
219        let start = range.start as wgpu::BufferAddress * size_bytes;
220        let end = range.end as wgpu::BufferAddress * size_bytes;
221        let size = std::num::NonZeroU64::new(end - start).expect("buffer slice must not be empty");
222        self.buffer_bytes(buffer, start, Some(size))
223    }
224
225    /// Specify a sampler to be bound.
226    pub fn sampler(self, sampler: &'a wgpu::Sampler) -> Self {
227        let resource = wgpu::BindingResource::Sampler(sampler);
228        self.binding(resource)
229    }
230
231    /// Specify a texture view to be bound.
232    pub fn texture_view(self, view: &'a wgpu::TextureViewHandle) -> Self {
233        let resource = wgpu::BindingResource::TextureView(view);
234        self.binding(resource)
235    }
236
237    /// Build the bind group with the specified resources.
238    pub fn build(self, device: &wgpu::Device, layout: &wgpu::BindGroupLayout) -> wgpu::BindGroup {
239        let mut entries = Vec::with_capacity(self.resources.len());
240        for (i, resource) in self.resources.into_iter().enumerate() {
241            let binding = wgpu::BindGroupEntry {
242                binding: i as u32,
243                resource,
244            };
245            entries.push(binding);
246        }
247        let descriptor = wgpu::BindGroupDescriptor {
248            label: Some("nannou bind group"),
249            layout,
250            entries: &entries,
251        };
252        device.create_bind_group(&descriptor)
253    }
254}