screen_13/graph/
pass_ref.rs

1//! Strongly-typed rendering commands.
2
3use {
4    super::{
5        AccelerationStructureLeaseNode, AccelerationStructureNode, AnyAccelerationStructureNode,
6        AnyBufferNode, AnyImageNode, Area, Attachment, Bind, Binding, BufferLeaseNode, BufferNode,
7        ClearColorValue, Edge, Execution, ExecutionFunction, ExecutionPipeline, ImageLeaseNode,
8        ImageNode, Information, Node, NodeIndex, Pass, RenderGraph, SampleCount,
9        SwapchainImageNode,
10    },
11    crate::driver::{
12        accel_struct::{
13            AccelerationStructure, AccelerationStructureGeometry,
14            AccelerationStructureGeometryInfo, DeviceOrHostAddress,
15        },
16        buffer::{Buffer, BufferSubresourceRange},
17        compute::ComputePipeline,
18        device::Device,
19        graphic::{DepthStencilMode, GraphicPipeline},
20        image::{
21            Image, ImageViewInfo, image_subresource_range_contains,
22            image_subresource_range_intersects,
23        },
24        ray_trace::RayTracePipeline,
25        render_pass::ResolveMode,
26    },
27    ash::vk,
28    log::trace,
29    std::{
30        cell::RefCell,
31        marker::PhantomData,
32        ops::{Index, Range},
33        sync::Arc,
34    },
35    vk_sync::AccessType,
36};
37
38/// Alias for the index of a framebuffer attachment.
39pub type AttachmentIndex = u32;
40
41/// Alias for the binding index of a shader descriptor.
42pub type BindingIndex = u32;
43
44/// Alias for the binding offset of a shader descriptor array element.
45pub type BindingOffset = u32;
46
47/// Alias for the descriptor set index of a shader descriptor.
48pub type DescriptorSetIndex = u32;
49
50/// Recording interface for acceleration structure commands.
51///
52/// This structure provides a strongly-typed set of methods which allow acceleration structures to
53/// be built and updated. An instance of `Acceleration` is provided to the closure parameter of
54/// [`PassRef::record_acceleration`].
55///
56/// # Examples
57///
58/// Basic usage:
59///
60/// ```no_run
61/// # use std::sync::Arc;
62/// # use ash::vk;
63/// # use screen_13::driver::accel_struct::{AccelerationStructure, AccelerationStructureInfo};
64/// # use screen_13::driver::DriverError;
65/// # use screen_13::driver::device::{Device, DeviceInfo};
66/// # use screen_13::graph::RenderGraph;
67/// # use screen_13::driver::shader::Shader;
68/// # fn main() -> Result<(), DriverError> {
69/// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
70/// # let mut my_graph = RenderGraph::new();
71/// # let info = AccelerationStructureInfo::blas(1);
72/// my_graph.begin_pass("my acceleration pass")
73///         .record_acceleration(move |acceleration, bindings| {
74///             // During this closure we have access to the acceleration methods!
75///         });
76/// # Ok(()) }
77/// ```
78pub struct Acceleration<'a> {
79    bindings: Bindings<'a>,
80    cmd_buf: vk::CommandBuffer,
81    device: &'a Device,
82}
83
84impl Acceleration<'_> {
85    /// Build an acceleration structure.
86    ///
87    /// Requires a scratch buffer which was created with the following requirements:
88    ///
89    /// - Flags must include [`vk::BufferUsageFlags::SHADER_DEVICE_ADDRESS`]
90    /// - Size must be equal to or greater than the `build_size` value returned by
91    ///   [`AccelerationStructure::size_of`] aligned to `min_accel_struct_scratch_offset_alignment`
92    ///   of
93    ///   [`PhysicalDevice::accel_struct_properties`](crate::driver::physical_device::PhysicalDevice::accel_struct_properties).
94    ///
95    /// # Examples
96    ///
97    /// Basic usage:
98    ///
99    /// ```no_run
100    /// # use std::sync::Arc;
101    /// # use ash::vk;
102    /// # use screen_13::driver::DriverError;
103    /// # use screen_13::driver::device::{Device, DeviceInfo};
104    /// # use screen_13::driver::accel_struct::{AccelerationStructure, AccelerationStructureGeometry, AccelerationStructureGeometryData, AccelerationStructureGeometryInfo, AccelerationStructureInfo, DeviceOrHostAddress};
105    /// # use screen_13::driver::buffer::{Buffer, BufferInfo};
106    /// # use screen_13::graph::RenderGraph;
107    /// # use screen_13::driver::shader::Shader;
108    /// # fn main() -> Result<(), DriverError> {
109    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
110    /// # let mut my_graph = RenderGraph::new();
111    /// # let info = AccelerationStructureInfo::blas(1);
112    /// # let blas_accel_struct = AccelerationStructure::create(&device, info)?;
113    /// # let blas_node = my_graph.bind_node(blas_accel_struct);
114    /// # let scratch_buf_info = BufferInfo::device_mem(8, vk::BufferUsageFlags::SHADER_DEVICE_ADDRESS);
115    /// # let scratch_buf = Buffer::create(&device, scratch_buf_info)?;
116    /// # let scratch_buf = my_graph.bind_node(scratch_buf);
117    /// # let buf_info = BufferInfo::device_mem(8, vk::BufferUsageFlags::INDEX_BUFFER);
118    /// # let my_idx_buf = Buffer::create(&device, buf_info)?;
119    /// # let buf_info = BufferInfo::device_mem(8, vk::BufferUsageFlags::VERTEX_BUFFER);
120    /// # let my_vtx_buf = Buffer::create(&device, buf_info)?;
121    /// # let index_node = my_graph.bind_node(my_idx_buf);
122    /// # let vertex_node = my_graph.bind_node(my_vtx_buf);
123    /// my_graph.begin_pass("my acceleration pass")
124    ///         .read_node(index_node)
125    ///         .read_node(vertex_node)
126    ///         .write_node(blas_node)
127    ///         .write_node(scratch_buf)
128    ///         .record_acceleration(move |acceleration, bindings| {
129    ///             let geom = AccelerationStructureGeometry {
130    ///                 max_primitive_count: 64,
131    ///                 flags: vk::GeometryFlagsKHR::OPAQUE,
132    ///                 geometry: AccelerationStructureGeometryData::Triangles {
133    ///                     index_addr: DeviceOrHostAddress::DeviceAddress(
134    ///                         Buffer::device_address(&bindings[index_node])
135    ///                     ),
136    ///                     index_type: vk::IndexType::UINT32,
137    ///                     max_vertex: 42,
138    ///                     transform_addr: None,
139    ///                     vertex_addr: DeviceOrHostAddress::DeviceAddress(Buffer::device_address(
140    ///                         &bindings[vertex_node],
141    ///                     )),
142    ///                     vertex_format: vk::Format::R32G32B32_SFLOAT,
143    ///                     vertex_stride: 12,
144    ///                 },
145    ///             };
146    ///             let build_range = vk::AccelerationStructureBuildRangeInfoKHR {
147    ///                 first_vertex: 0,
148    ///                 primitive_count: 1,
149    ///                 primitive_offset: 0,
150    ///                 transform_offset: 0,
151    ///             };
152    ///             let info = AccelerationStructureGeometryInfo::blas([(geom, build_range)]);
153    ///
154    ///             acceleration.build_structure(&info, blas_node, Buffer::device_address(&bindings[scratch_buf]));
155    ///         });
156    /// # Ok(()) }
157    /// ```
158    pub fn build_structure(
159        &self,
160        info: &AccelerationStructureGeometryInfo<(
161            AccelerationStructureGeometry,
162            vk::AccelerationStructureBuildRangeInfoKHR,
163        )>,
164        accel_struct: impl Into<AnyAccelerationStructureNode>,
165        scratch_addr: impl Into<DeviceOrHostAddress>,
166    ) -> &Self {
167        #[derive(Default)]
168        struct Tls {
169            geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
170            ranges: Vec<vk::AccelerationStructureBuildRangeInfoKHR>,
171        }
172
173        thread_local! {
174            static TLS: RefCell<Tls> = Default::default();
175        }
176
177        let accel_struct = accel_struct.into();
178        let scratch_addr = scratch_addr.into().into();
179
180        TLS.with_borrow_mut(|tls| {
181            tls.geometries.clear();
182            tls.ranges.clear();
183
184            for (geometry, range) in info.geometries.iter() {
185                tls.geometries.push(geometry.into());
186                tls.ranges.push(*range);
187            }
188
189            unsafe {
190                Device::expect_accel_struct_ext(self.device).cmd_build_acceleration_structures(
191                    self.cmd_buf,
192                    &[vk::AccelerationStructureBuildGeometryInfoKHR::default()
193                        .ty(info.ty)
194                        .flags(info.flags)
195                        .mode(vk::BuildAccelerationStructureModeKHR::BUILD)
196                        .dst_acceleration_structure(*self.bindings[accel_struct])
197                        .geometries(&tls.geometries)
198                        .scratch_data(scratch_addr)],
199                    &[&tls.ranges],
200                );
201            }
202        });
203
204        self
205    }
206
207    /// Build an acceleration structure with some parameters provided on the device.
208    ///
209    /// `range` is a buffer device address which points to `info.geometry.len()`
210    /// [vk::VkAccelerationStructureBuildRangeInfoKHR] structures defining dynamic offsets to the
211    /// addresses where geometry data is stored, as defined by `info`.
212    pub fn build_structure_indirect(
213        &self,
214        info: &AccelerationStructureGeometryInfo<AccelerationStructureGeometry>,
215        accel_struct: impl Into<AnyAccelerationStructureNode>,
216        scratch_addr: impl Into<DeviceOrHostAddress>,
217        range_base: vk::DeviceAddress,
218        range_stride: u32,
219    ) -> &Self {
220        #[derive(Default)]
221        struct Tls {
222            geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
223            max_primitive_counts: Vec<u32>,
224        }
225
226        thread_local! {
227            static TLS: RefCell<Tls> = Default::default();
228        }
229
230        let accel_struct = accel_struct.into();
231        let scratch_addr = scratch_addr.into().into();
232
233        TLS.with_borrow_mut(|tls| {
234            tls.geometries.clear();
235            tls.max_primitive_counts.clear();
236
237            for geometry in info.geometries.iter() {
238                tls.geometries.push(geometry.into());
239                tls.max_primitive_counts.push(geometry.max_primitive_count);
240            }
241
242            unsafe {
243                Device::expect_accel_struct_ext(self.device)
244                    .cmd_build_acceleration_structures_indirect(
245                        self.cmd_buf,
246                        &[vk::AccelerationStructureBuildGeometryInfoKHR::default()
247                            .ty(info.ty)
248                            .flags(info.flags)
249                            .mode(vk::BuildAccelerationStructureModeKHR::BUILD)
250                            .dst_acceleration_structure(*self.bindings[accel_struct])
251                            .geometries(&tls.geometries)
252                            .scratch_data(scratch_addr)],
253                        &[range_base],
254                        &[range_stride],
255                        &[&tls.max_primitive_counts],
256                    );
257            }
258        });
259
260        self
261    }
262
263    /// Build acceleration structures.
264    ///
265    /// There is no ordering or synchronization implied between any of the individual acceleration
266    /// structure builds.
267    pub fn build_structures(&self, infos: &[AccelerationStructureBuildInfo]) -> &Self {
268        #[derive(Default)]
269        struct Tls {
270            geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
271            ranges: Vec<vk::AccelerationStructureBuildRangeInfoKHR>,
272        }
273
274        thread_local! {
275            static TLS: RefCell<Tls> = Default::default();
276        }
277
278        TLS.with_borrow_mut(|tls| {
279            tls.geometries.clear();
280            tls.geometries.extend(infos.iter().flat_map(|info| {
281                info.build_data.geometries.iter().map(|(geometry, _)| {
282                    <&AccelerationStructureGeometry as Into<
283                            vk::AccelerationStructureGeometryKHR,
284                        >>::into(geometry)
285                })
286            }));
287
288            tls.ranges.clear();
289            tls.ranges.extend(
290                infos
291                    .iter()
292                    .flat_map(|info| info.build_data.geometries.iter().map(|(_, range)| *range)),
293            );
294
295            let vk_ranges = {
296                let mut start = 0;
297                let mut vk_ranges = Vec::with_capacity(infos.len());
298                for info in infos {
299                    let end = start + info.build_data.geometries.len();
300                    vk_ranges.push(&tls.ranges[start..end]);
301                    start = end;
302                }
303
304                vk_ranges
305            };
306
307            let vk_infos = {
308                let mut start = 0;
309                let mut vk_infos = Vec::with_capacity(infos.len());
310                for info in infos {
311                    let end = start + info.build_data.geometries.len();
312                    vk_infos.push(
313                        vk::AccelerationStructureBuildGeometryInfoKHR::default()
314                            .ty(info.build_data.ty)
315                            .flags(info.build_data.flags)
316                            .mode(vk::BuildAccelerationStructureModeKHR::BUILD)
317                            .dst_acceleration_structure(*self.bindings[info.accel_struct])
318                            .geometries(&tls.geometries[start..end])
319                            .scratch_data(info.scratch_addr.into()),
320                    );
321                    start = end;
322                }
323
324                vk_infos
325            };
326
327            unsafe {
328                Device::expect_accel_struct_ext(self.device).cmd_build_acceleration_structures(
329                    self.cmd_buf,
330                    &vk_infos,
331                    &vk_ranges,
332                );
333            }
334        });
335
336        self
337    }
338
339    /// Builds acceleration structures with some parameters provided on the device.
340    ///
341    /// There is no ordering or synchronization implied between any of the individual acceleration
342    /// structure builds.
343    ///
344    /// See [Self::build_structure_indirect]
345    pub fn build_structures_indirect(
346        &self,
347        infos: &[AccelerationStructureIndirectBuildInfo],
348    ) -> &Self {
349        #[derive(Default)]
350        struct Tls {
351            geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
352            max_primitive_counts: Vec<u32>,
353            range_bases: Vec<vk::DeviceAddress>,
354            range_strides: Vec<u32>,
355        }
356
357        thread_local! {
358            static TLS: RefCell<Tls> = Default::default();
359        }
360
361        TLS.with_borrow_mut(|tls| {
362            tls.geometries.clear();
363            tls.geometries.extend(infos.iter().flat_map(|info| {
364                info.build_data.geometries.iter().map(
365                    <&AccelerationStructureGeometry as Into<
366                        vk::AccelerationStructureGeometryKHR,
367                    >>::into,
368                )
369            }));
370
371            tls.max_primitive_counts.clear();
372            tls.max_primitive_counts
373                .extend(infos.iter().flat_map(|info| {
374                    info.build_data
375                        .geometries
376                        .iter()
377                        .map(|geometry| geometry.max_primitive_count)
378                }));
379
380            tls.range_bases.clear();
381            tls.range_strides.clear();
382            let (vk_infos, vk_max_primitive_counts) = {
383                let mut start = 0;
384                let mut vk_infos = Vec::with_capacity(infos.len());
385                let mut vk_max_primitive_counts = Vec::with_capacity(infos.len());
386                for info in infos {
387                    let end = start + info.build_data.geometries.len();
388                    vk_infos.push(
389                        vk::AccelerationStructureBuildGeometryInfoKHR::default()
390                            .ty(info.build_data.ty)
391                            .flags(info.build_data.flags)
392                            .mode(vk::BuildAccelerationStructureModeKHR::BUILD)
393                            .dst_acceleration_structure(*self.bindings[info.accel_struct])
394                            .geometries(&tls.geometries[start..end])
395                            .scratch_data(info.scratch_data.into()),
396                    );
397                    vk_max_primitive_counts.push(&tls.max_primitive_counts[start..end]);
398                    start = end;
399
400                    tls.range_bases.push(info.range_base);
401                    tls.range_strides.push(info.range_stride);
402                }
403
404                (vk_infos, vk_max_primitive_counts)
405            };
406
407            unsafe {
408                Device::expect_accel_struct_ext(self.device)
409                    .cmd_build_acceleration_structures_indirect(
410                        self.cmd_buf,
411                        &vk_infos,
412                        &tls.range_bases,
413                        &tls.range_strides,
414                        &vk_max_primitive_counts,
415                    );
416            }
417        });
418
419        self
420    }
421
422    /// Update an acceleration structure.
423    ///
424    /// Requires a scratch buffer which was created with the following requirements:
425    ///
426    /// - Flags must include [`vk::BufferUsageFlags::SHADER_DEVICE_ADDRESS`]
427    /// - Size must be equal to or greater than the `update_size` value returned by
428    ///   [`AccelerationStructure::size_of`] aligned to `min_accel_struct_scratch_offset_alignment`
429    ///   of
430    ///   [`PhysicalDevice::accel_struct_properties`](crate::driver::physical_device::PhysicalDevice::accel_struct_properties).
431    pub fn update_structure(
432        &self,
433        info: &AccelerationStructureGeometryInfo<(
434            AccelerationStructureGeometry,
435            vk::AccelerationStructureBuildRangeInfoKHR,
436        )>,
437        src_accel_struct: impl Into<AnyAccelerationStructureNode>,
438        dst_accel_struct: impl Into<AnyAccelerationStructureNode>,
439        scratch_addr: impl Into<DeviceOrHostAddress>,
440    ) -> &Self {
441        #[derive(Default)]
442        struct Tls {
443            geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
444            ranges: Vec<vk::AccelerationStructureBuildRangeInfoKHR>,
445        }
446
447        thread_local! {
448            static TLS: RefCell<Tls> = Default::default();
449        }
450
451        let src_accel_struct = src_accel_struct.into();
452        let dst_accel_struct = dst_accel_struct.into();
453        let scratch_addr = scratch_addr.into().into();
454
455        TLS.with_borrow_mut(|tls| {
456            tls.geometries.clear();
457            tls.ranges.clear();
458
459            for (geometry, range) in info.geometries.iter() {
460                tls.geometries.push(geometry.into());
461                tls.ranges.push(*range);
462            }
463
464            unsafe {
465                Device::expect_accel_struct_ext(self.device).cmd_build_acceleration_structures(
466                    self.cmd_buf,
467                    &[vk::AccelerationStructureBuildGeometryInfoKHR::default()
468                        .ty(info.ty)
469                        .flags(info.flags)
470                        .mode(vk::BuildAccelerationStructureModeKHR::UPDATE)
471                        .dst_acceleration_structure(*self.bindings[dst_accel_struct])
472                        .src_acceleration_structure(*self.bindings[src_accel_struct])
473                        .geometries(&tls.geometries)
474                        .scratch_data(scratch_addr)],
475                    &[&tls.ranges],
476                );
477            }
478        });
479
480        self
481    }
482
483    /// Update an acceleration structure with some parameters provided on the device.
484    ///
485    /// `range` is a buffer device address which points to `info.geometry.len()`
486    /// [vk::VkAccelerationStructureBuildRangeInfoKHR] structures defining dynamic offsets to the
487    /// addresses where geometry data is stored, as defined by `info`.
488    pub fn update_structure_indirect(
489        &self,
490        info: &AccelerationStructureGeometryInfo<AccelerationStructureGeometry>,
491        src_accel_struct: impl Into<AnyAccelerationStructureNode>,
492        dst_accel_struct: impl Into<AnyAccelerationStructureNode>,
493        scratch_addr: impl Into<DeviceOrHostAddress>,
494        range_base: vk::DeviceAddress,
495        range_stride: u32,
496    ) -> &Self {
497        #[derive(Default)]
498        struct Tls {
499            geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
500            max_primitive_counts: Vec<u32>,
501        }
502
503        thread_local! {
504            static TLS: RefCell<Tls> = Default::default();
505        }
506
507        let src_accel_struct = src_accel_struct.into();
508        let dst_accel_struct = dst_accel_struct.into();
509        let scratch_addr = scratch_addr.into().into();
510
511        TLS.with_borrow_mut(|tls| {
512            tls.geometries.clear();
513            tls.max_primitive_counts.clear();
514
515            for geometry in info.geometries.iter() {
516                tls.geometries.push(geometry.into());
517                tls.max_primitive_counts.push(geometry.max_primitive_count);
518            }
519
520            unsafe {
521                Device::expect_accel_struct_ext(self.device)
522                    .cmd_build_acceleration_structures_indirect(
523                        self.cmd_buf,
524                        &[vk::AccelerationStructureBuildGeometryInfoKHR::default()
525                            .ty(info.ty)
526                            .flags(info.flags)
527                            .mode(vk::BuildAccelerationStructureModeKHR::UPDATE)
528                            .src_acceleration_structure(*self.bindings[src_accel_struct])
529                            .dst_acceleration_structure(*self.bindings[dst_accel_struct])
530                            .geometries(&tls.geometries)
531                            .scratch_data(scratch_addr)],
532                        &[range_base],
533                        &[range_stride],
534                        &[&tls.max_primitive_counts],
535                    );
536            }
537        });
538
539        self
540    }
541
542    /// Update acceleration structures.
543    ///
544    /// There is no ordering or synchronization implied between any of the individual acceleration
545    /// structure updates.
546    pub fn update_structures(&self, infos: &[AccelerationStructureUpdateInfo]) -> &Self {
547        #[derive(Default)]
548        struct Tls {
549            geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
550            ranges: Vec<vk::AccelerationStructureBuildRangeInfoKHR>,
551        }
552
553        thread_local! {
554            static TLS: RefCell<Tls> = Default::default();
555        }
556
557        TLS.with_borrow_mut(|tls| {
558            tls.geometries.clear();
559            tls.geometries.extend(infos.iter().flat_map(|info| {
560                info.update_data.geometries.iter().map(|(geometry, _)| {
561                    <&AccelerationStructureGeometry as Into<
562                            vk::AccelerationStructureGeometryKHR,
563                        >>::into(geometry)
564                })
565            }));
566
567            tls.ranges.clear();
568            tls.ranges.extend(
569                infos
570                    .iter()
571                    .flat_map(|info| info.update_data.geometries.iter().map(|(_, range)| *range)),
572            );
573
574            let vk_ranges = {
575                let mut start = 0;
576                let mut vk_ranges = Vec::with_capacity(infos.len());
577                for info in infos {
578                    let end = start + info.update_data.geometries.len();
579                    vk_ranges.push(&tls.ranges[start..end]);
580                    start = end;
581                }
582
583                vk_ranges
584            };
585
586            let vk_infos = {
587                let mut start = 0;
588                let mut vk_infos = Vec::with_capacity(infos.len());
589                for info in infos {
590                    let end = start + info.update_data.geometries.len();
591                    vk_infos.push(
592                        vk::AccelerationStructureBuildGeometryInfoKHR::default()
593                            .ty(info.update_data.ty)
594                            .flags(info.update_data.flags)
595                            .mode(vk::BuildAccelerationStructureModeKHR::UPDATE)
596                            .dst_acceleration_structure(*self.bindings[info.dst_accel_struct])
597                            .src_acceleration_structure(*self.bindings[info.src_accel_struct])
598                            .geometries(&tls.geometries[start..end])
599                            .scratch_data(info.scratch_addr.into()),
600                    );
601                    start = end;
602                }
603
604                vk_infos
605            };
606
607            unsafe {
608                Device::expect_accel_struct_ext(self.device).cmd_build_acceleration_structures(
609                    self.cmd_buf,
610                    &vk_infos,
611                    &vk_ranges,
612                );
613            }
614        });
615
616        self
617    }
618
619    /// Updates acceleration structures with some parameters provided on the device.
620    ///
621    /// There is no ordering or synchronization implied between any of the individual acceleration
622    /// structure updates.
623    ///
624    /// See [Self::update_structure_indirect]
625    pub fn update_structures_indirect(
626        &self,
627        infos: &[AccelerationStructureIndirectUpdateInfo],
628    ) -> &Self {
629        #[derive(Default)]
630        struct Tls {
631            geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
632            max_primitive_counts: Vec<u32>,
633            range_bases: Vec<vk::DeviceAddress>,
634            range_strides: Vec<u32>,
635        }
636
637        thread_local! {
638            static TLS: RefCell<Tls> = Default::default();
639        }
640
641        TLS.with_borrow_mut(|tls| {
642            tls.geometries.clear();
643            tls.geometries.extend(infos.iter().flat_map(|info| {
644                info.update_data.geometries.iter().map(
645                    <&AccelerationStructureGeometry as Into<
646                        vk::AccelerationStructureGeometryKHR,
647                    >>::into,
648                )
649            }));
650
651            tls.max_primitive_counts.clear();
652            tls.max_primitive_counts
653                .extend(infos.iter().flat_map(|info| {
654                    info.update_data
655                        .geometries
656                        .iter()
657                        .map(|geometry| geometry.max_primitive_count)
658                }));
659
660            tls.range_bases.clear();
661            tls.range_strides.clear();
662            let (vk_infos, vk_max_primitive_counts) = {
663                let mut start = 0;
664                let mut vk_infos = Vec::with_capacity(infos.len());
665                let mut vk_max_primitive_counts = Vec::with_capacity(infos.len());
666                for info in infos {
667                    let end = start + info.update_data.geometries.len();
668                    vk_infos.push(
669                        vk::AccelerationStructureBuildGeometryInfoKHR::default()
670                            .ty(info.update_data.ty)
671                            .flags(info.update_data.flags)
672                            .mode(vk::BuildAccelerationStructureModeKHR::UPDATE)
673                            .src_acceleration_structure(*self.bindings[info.src_accel_struct])
674                            .dst_acceleration_structure(*self.bindings[info.dst_accel_struct])
675                            .geometries(&tls.geometries[start..end])
676                            .scratch_data(info.scratch_addr.into()),
677                    );
678                    vk_max_primitive_counts.push(&tls.max_primitive_counts[start..end]);
679                    start = end;
680
681                    tls.range_bases.push(info.range_base);
682                    tls.range_strides.push(info.range_stride);
683                }
684
685                (vk_infos, vk_max_primitive_counts)
686            };
687
688            unsafe {
689                Device::expect_accel_struct_ext(self.device)
690                    .cmd_build_acceleration_structures_indirect(
691                        self.cmd_buf,
692                        &vk_infos,
693                        &tls.range_bases,
694                        &tls.range_strides,
695                        &vk_max_primitive_counts,
696                    );
697            }
698        });
699
700        self
701    }
702}
703
704/// Specifies the information and data used to build an acceleration structure.
705///
706/// See
707/// [VkAccelerationStructureBuildGeometryInfoKHR](https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkAccelerationStructureBuildGeometryInfoKHR.html)
708/// for more information.
709#[derive(Clone, Debug)]
710pub struct AccelerationStructureBuildInfo {
711    /// The acceleration structure to be written.
712    pub accel_struct: AnyAccelerationStructureNode,
713
714    /// Specifies the geometry data to use when building the acceleration structure.
715    pub build_data: AccelerationStructureGeometryInfo<(
716        AccelerationStructureGeometry,
717        vk::AccelerationStructureBuildRangeInfoKHR,
718    )>,
719
720    /// The temporary buffer or host address (with enough capacity per
721    /// [AccelerationStructure::size_of]).
722    pub scratch_addr: DeviceOrHostAddress,
723}
724
725impl AccelerationStructureBuildInfo {
726    /// Constructs new acceleration structure build information.
727    pub fn new(
728        accel_struct: impl Into<AnyAccelerationStructureNode>,
729        build_data: AccelerationStructureGeometryInfo<(
730            AccelerationStructureGeometry,
731            vk::AccelerationStructureBuildRangeInfoKHR,
732        )>,
733        scratch_addr: impl Into<DeviceOrHostAddress>,
734    ) -> Self {
735        let accel_struct = accel_struct.into();
736        let scratch_addr = scratch_addr.into();
737
738        Self {
739            accel_struct,
740            build_data,
741            scratch_addr,
742        }
743    }
744}
745
746/// Specifies the information and data used to build an acceleration structure with some parameters
747/// sourced on the device.
748///
749/// See
750/// [VkAccelerationStructureBuildGeometryInfoKHR](https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkAccelerationStructureBuildGeometryInfoKHR.html)
751/// for more information.
752#[derive(Clone, Debug)]
753pub struct AccelerationStructureIndirectBuildInfo {
754    /// The acceleration structure to be written.
755    pub accel_struct: AnyAccelerationStructureNode,
756
757    /// Specifies the geometry data to use when building the acceleration structure.
758    pub build_data: AccelerationStructureGeometryInfo<AccelerationStructureGeometry>,
759
760    /// A buffer device addresses which points to `data.geometry.len()`
761    /// [vk::VkAccelerationStructureBuildRangeInfoKHR] structures defining dynamic offsets to the
762    /// addresses where geometry data is stored.
763    pub range_base: vk::DeviceAddress,
764
765    /// Byte stride between elements of [range].
766    pub range_stride: u32,
767
768    /// The temporary buffer or host address (with enough capacity per
769    /// [AccelerationStructure::size_of]).
770    pub scratch_data: DeviceOrHostAddress,
771}
772
773impl AccelerationStructureIndirectBuildInfo {
774    /// Constructs new acceleration structure indirect build information.
775    pub fn new(
776        accel_struct: impl Into<AnyAccelerationStructureNode>,
777        build_data: AccelerationStructureGeometryInfo<AccelerationStructureGeometry>,
778        range_base: vk::DeviceAddress,
779
780        range_stride: u32,
781        scratch_data: impl Into<DeviceOrHostAddress>,
782    ) -> Self {
783        let accel_struct = accel_struct.into();
784        let scratch_data = scratch_data.into();
785
786        Self {
787            accel_struct,
788            build_data,
789            range_base,
790            range_stride,
791            scratch_data,
792        }
793    }
794}
795
796/// Specifies the information and data used to update an acceleration structure with some parameters
797/// sourced on the device.
798///
799/// See
800/// [VkAccelerationStructureBuildGeometryInfoKHR](https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkAccelerationStructureBuildGeometryInfoKHR.html)
801/// for more information.
802#[derive(Clone, Debug)]
803pub struct AccelerationStructureIndirectUpdateInfo {
804    /// The acceleration structure to be written.
805    pub dst_accel_struct: AnyAccelerationStructureNode,
806
807    /// A buffer device addresses which points to `data.geometry.len()`
808    /// [vk::VkAccelerationStructureBuildRangeInfoKHR] structures defining dynamic offsets to the
809    /// addresses where geometry data is stored.
810    pub range_base: vk::DeviceAddress,
811
812    /// Byte stride between elements of [range].
813    pub range_stride: u32,
814
815    /// The temporary buffer or host address (with enough capacity per
816    /// [AccelerationStructure::size_of]).
817    pub scratch_addr: DeviceOrHostAddress,
818
819    /// The source acceleration structure to be read.
820    pub src_accel_struct: AnyAccelerationStructureNode,
821
822    /// Specifies the geometry data to use when building the acceleration structure.
823    pub update_data: AccelerationStructureGeometryInfo<AccelerationStructureGeometry>,
824}
825
826impl AccelerationStructureIndirectUpdateInfo {
827    /// Constructs new acceleration structure indirect update information.
828    pub fn new(
829        src_accel_struct: impl Into<AnyAccelerationStructureNode>,
830        dst_accel_struct: impl Into<AnyAccelerationStructureNode>,
831        update_data: AccelerationStructureGeometryInfo<AccelerationStructureGeometry>,
832        range_base: vk::DeviceAddress,
833
834        range_stride: u32,
835        scratch_addr: impl Into<DeviceOrHostAddress>,
836    ) -> Self {
837        let src_accel_struct = src_accel_struct.into();
838        let dst_accel_struct = dst_accel_struct.into();
839        let scratch_addr = scratch_addr.into();
840
841        Self {
842            dst_accel_struct,
843            range_base,
844            range_stride,
845            scratch_addr,
846            src_accel_struct,
847            update_data,
848        }
849    }
850}
851
852/// Specifies the information and data used to update an acceleration structure.
853///
854/// See
855/// [VkAccelerationStructureBuildGeometryInfoKHR](https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkAccelerationStructureBuildGeometryInfoKHR.html)
856/// for more information.
857#[derive(Clone, Debug)]
858pub struct AccelerationStructureUpdateInfo {
859    /// The acceleration structure to be written.
860    pub dst_accel_struct: AnyAccelerationStructureNode,
861
862    /// The temporary buffer or host address (with enough capacity per
863    /// [AccelerationStructure::size_of]).
864    pub scratch_addr: DeviceOrHostAddress,
865
866    /// The source acceleration structure to be read.
867    pub src_accel_struct: AnyAccelerationStructureNode,
868
869    /// Specifies the geometry data to use when updating the acceleration structure.
870    pub update_data: AccelerationStructureGeometryInfo<(
871        AccelerationStructureGeometry,
872        vk::AccelerationStructureBuildRangeInfoKHR,
873    )>,
874}
875
876impl AccelerationStructureUpdateInfo {
877    /// Constructs new acceleration structure update information.
878    pub fn new(
879        src_accel_struct: impl Into<AnyAccelerationStructureNode>,
880        dst_accel_struct: impl Into<AnyAccelerationStructureNode>,
881        update_data: AccelerationStructureGeometryInfo<(
882            AccelerationStructureGeometry,
883            vk::AccelerationStructureBuildRangeInfoKHR,
884        )>,
885        scratch_addr: impl Into<DeviceOrHostAddress>,
886    ) -> Self {
887        let src_accel_struct = src_accel_struct.into();
888        let dst_accel_struct = dst_accel_struct.into();
889        let scratch_addr = scratch_addr.into();
890
891        Self {
892            dst_accel_struct,
893            scratch_addr,
894            src_accel_struct,
895            update_data,
896        }
897    }
898}
899
900/// Associated type trait which enables default values for read and write methods.
901pub trait Access {
902    /// The default `AccessType` for read operations, if not specified explicitly.
903    const DEFAULT_READ: AccessType;
904
905    /// The default `AccessType` for write operations, if not specified explicitly.
906    const DEFAULT_WRITE: AccessType;
907}
908
909impl Access for ComputePipeline {
910    const DEFAULT_READ: AccessType = AccessType::ComputeShaderReadOther;
911    const DEFAULT_WRITE: AccessType = AccessType::ComputeShaderWrite;
912}
913
914impl Access for GraphicPipeline {
915    const DEFAULT_READ: AccessType = AccessType::AnyShaderReadSampledImageOrUniformTexelBuffer;
916    const DEFAULT_WRITE: AccessType = AccessType::AnyShaderWrite;
917}
918
919impl Access for RayTracePipeline {
920    const DEFAULT_READ: AccessType =
921        AccessType::RayTracingShaderReadSampledImageOrUniformTexelBuffer;
922    const DEFAULT_WRITE: AccessType = AccessType::AnyShaderWrite;
923}
924
925macro_rules! bind {
926    ($name:ident) => {
927        paste::paste! {
928            impl<'a> Bind<PassRef<'a>, PipelinePassRef<'a, [<$name Pipeline>]>> for &'a Arc<[<$name Pipeline>]> {
929                // TODO: Allow binding as explicit secondary command buffers? like with compute/raytrace stuff
930                fn bind(self, mut pass: PassRef<'a>) -> PipelinePassRef<'a, [<$name Pipeline>]> {
931                    let pass_ref = pass.as_mut();
932                    if pass_ref.execs.last().unwrap().pipeline.is_some() {
933                        // Binding from PipelinePass -> PipelinePass (changing shaders)
934                        pass_ref.execs.push(Default::default());
935                    }
936
937                    pass_ref.execs.last_mut().unwrap().pipeline = Some(ExecutionPipeline::$name(Arc::clone(self)));
938
939                    PipelinePassRef {
940                        __: PhantomData,
941                        pass,
942                    }
943                }
944            }
945
946            impl<'a> Bind<PassRef<'a>, PipelinePassRef<'a, [<$name Pipeline>]>> for Arc<[<$name Pipeline>]> {
947                // TODO: Allow binding as explicit secondary command buffers? like with compute/raytrace stuff
948                fn bind(self, mut pass: PassRef<'a>) -> PipelinePassRef<'a, [<$name Pipeline>]> {
949                    let pass_ref = pass.as_mut();
950                    if pass_ref.execs.last().unwrap().pipeline.is_some() {
951                        // Binding from PipelinePass -> PipelinePass (changing shaders)
952                        pass_ref.execs.push(Default::default());
953                    }
954
955                    pass_ref.execs.last_mut().unwrap().pipeline = Some(ExecutionPipeline::$name(self));
956
957                    PipelinePassRef {
958                        __: PhantomData,
959                        pass,
960                    }
961                }
962            }
963
964            impl<'a> Bind<PassRef<'a>, PipelinePassRef<'a, [<$name Pipeline>]>> for [<$name Pipeline>] {
965                // TODO: Allow binding as explicit secondary command buffers? like with compute/raytrace stuff
966                fn bind(self, mut pass: PassRef<'a>) -> PipelinePassRef<'a, [<$name Pipeline>]> {
967                    let pass_ref = pass.as_mut();
968                    if pass_ref.execs.last().unwrap().pipeline.is_some() {
969                        // Binding from PipelinePass -> PipelinePass (changing shaders)
970                        pass_ref.execs.push(Default::default());
971                    }
972
973                    pass_ref.execs.last_mut().unwrap().pipeline = Some(ExecutionPipeline::$name(Arc::new(self)));
974
975                    PipelinePassRef {
976                        __: PhantomData,
977                        pass,
978                    }
979                }
980            }
981
982            impl ExecutionPipeline {
983                #[allow(unused)]
984                pub(super) fn [<is_ $name:snake>](&self) -> bool {
985                    matches!(self, Self::$name(_))
986                }
987
988                #[allow(unused)]
989                pub(super) fn [<unwrap_ $name:snake>](&self) -> &Arc<[<$name Pipeline>]> {
990                    if let Self::$name(binding) = self {
991                        &binding
992                    } else {
993                        panic!();
994                    }
995                }
996            }
997        }
998    };
999}
1000
1001// Pipelines you can bind to a pass
1002bind!(Compute);
1003bind!(Graphic);
1004bind!(RayTrace);
1005
1006/// An indexable structure will provides access to Vulkan smart-pointer resources inside a record
1007/// closure.
1008///
1009/// This type is available while recording commands in the following closures:
1010///
1011/// - [`PassRef::record_acceleration`] for building and updating acceleration structures
1012/// - [`PassRef::record_cmd_buf`] for general command streams
1013/// - [`PipelinePassRef::record_compute`] for dispatched compute operations
1014/// - [`PipelinePassRef::record_subpass`] for raster drawing operations, such as triangles streams
1015/// - [`PipelinePassRef::record_ray_trace`] for ray-traced operations
1016///
1017/// # Examples
1018///
1019/// Basic usage:
1020///
1021/// ```no_run
1022/// # use std::sync::Arc;
1023/// # use ash::vk;
1024/// # use screen_13::driver::DriverError;
1025/// # use screen_13::driver::device::{Device, DeviceInfo};
1026/// # use screen_13::driver::image::{Image, ImageInfo};
1027/// # use screen_13::graph::RenderGraph;
1028/// # use screen_13::graph::node::ImageNode;
1029/// # fn main() -> Result<(), DriverError> {
1030/// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
1031/// # let info = ImageInfo::image_2d(32, 32, vk::Format::R8G8B8A8_UNORM, vk::ImageUsageFlags::SAMPLED);
1032/// # let image = Image::create(&device, info)?;
1033/// # let mut my_graph = RenderGraph::new();
1034/// # let my_image_node = my_graph.bind_node(image);
1035/// my_graph.begin_pass("custom vulkan commands")
1036///         .record_cmd_buf(move |device, cmd_buf, bindings| {
1037///             let my_image = &bindings[my_image_node];
1038///
1039///             assert_ne!(**my_image, vk::Image::null());
1040///             assert_eq!(my_image.info.width, 32);
1041///         });
1042/// # Ok(()) }
1043/// ```
1044#[derive(Clone, Copy, Debug)]
1045pub struct Bindings<'a> {
1046    bindings: &'a [Binding],
1047    exec: &'a Execution,
1048}
1049
1050impl<'a> Bindings<'a> {
1051    pub(super) fn new(bindings: &'a [Binding], exec: &'a Execution) -> Self {
1052        Self { bindings, exec }
1053    }
1054
1055    fn binding_ref(&self, node_idx: usize) -> &Binding {
1056        // You must have called read or write for this node on this execution before indexing
1057        // into the bindings data!
1058        debug_assert!(
1059            self.exec.accesses.contains_key(&node_idx),
1060            "unexpected node access: call access, read, or write first"
1061        );
1062
1063        &self.bindings[node_idx]
1064    }
1065}
1066
1067macro_rules! index {
1068    ($name:ident, $handle:ident) => {
1069        paste::paste! {
1070            impl<'a> Index<[<$name Node>]> for Bindings<'a>
1071            {
1072                type Output = $handle;
1073
1074                fn index(&self, node: [<$name Node>]) -> &Self::Output {
1075                    &*self.binding_ref(node.idx).[<as_ $name:snake>]().unwrap()
1076                }
1077            }
1078        }
1079    };
1080}
1081
1082// Allow indexing the Bindings data during command execution:
1083// (This gets you access to the driver images or other resources)
1084index!(AccelerationStructure, AccelerationStructure);
1085index!(AccelerationStructureLease, AccelerationStructure);
1086index!(Buffer, Buffer);
1087index!(BufferLease, Buffer);
1088index!(Image, Image);
1089index!(ImageLease, Image);
1090index!(SwapchainImage, Image);
1091
1092impl Index<AnyAccelerationStructureNode> for Bindings<'_> {
1093    type Output = AccelerationStructure;
1094
1095    fn index(&self, node: AnyAccelerationStructureNode) -> &Self::Output {
1096        let node_idx = match node {
1097            AnyAccelerationStructureNode::AccelerationStructure(node) => node.idx,
1098            AnyAccelerationStructureNode::AccelerationStructureLease(node) => node.idx,
1099        };
1100        let binding = self.binding_ref(node_idx);
1101
1102        match node {
1103            AnyAccelerationStructureNode::AccelerationStructure(_) => {
1104                binding.as_acceleration_structure().unwrap()
1105            }
1106            AnyAccelerationStructureNode::AccelerationStructureLease(_) => {
1107                binding.as_acceleration_structure_lease().unwrap()
1108            }
1109        }
1110    }
1111}
1112
1113impl Index<AnyBufferNode> for Bindings<'_> {
1114    type Output = Buffer;
1115
1116    fn index(&self, node: AnyBufferNode) -> &Self::Output {
1117        let node_idx = match node {
1118            AnyBufferNode::Buffer(node) => node.idx,
1119            AnyBufferNode::BufferLease(node) => node.idx,
1120        };
1121        let binding = self.binding_ref(node_idx);
1122
1123        match node {
1124            AnyBufferNode::Buffer(_) => binding.as_buffer().unwrap(),
1125            AnyBufferNode::BufferLease(_) => binding.as_buffer_lease().unwrap(),
1126        }
1127    }
1128}
1129
1130impl Index<AnyImageNode> for Bindings<'_> {
1131    type Output = Image;
1132
1133    fn index(&self, node: AnyImageNode) -> &Self::Output {
1134        let node_idx = match node {
1135            AnyImageNode::Image(node) => node.idx,
1136            AnyImageNode::ImageLease(node) => node.idx,
1137            AnyImageNode::SwapchainImage(node) => node.idx,
1138        };
1139        let binding = self.binding_ref(node_idx);
1140
1141        match node {
1142            AnyImageNode::Image(_) => binding.as_image().unwrap(),
1143            AnyImageNode::ImageLease(_) => binding.as_image_lease().unwrap(),
1144            AnyImageNode::SwapchainImage(_) => binding.as_swapchain_image().unwrap(),
1145        }
1146    }
1147}
1148
1149/// Recording interface for computing commands.
1150///
1151/// This structure provides a strongly-typed set of methods which allow compute shader code to be
1152/// executed. An instance of `Compute` is provided to the closure parameter of
1153/// [`PipelinePassRef::record_compute`] which may be accessed by binding a [`ComputePipeline`] to a
1154/// render pass.
1155///
1156/// # Examples
1157///
1158/// Basic usage:
1159///
1160/// ```no_run
1161/// # use std::sync::Arc;
1162/// # use ash::vk;
1163/// # use screen_13::driver::DriverError;
1164/// # use screen_13::driver::device::{Device, DeviceInfo};
1165/// # use screen_13::driver::compute::{ComputePipeline, ComputePipelineInfo};
1166/// # use screen_13::driver::shader::{Shader};
1167/// # use screen_13::graph::RenderGraph;
1168/// # fn main() -> Result<(), DriverError> {
1169/// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
1170/// # let info = ComputePipelineInfo::default();
1171/// # let shader = Shader::new_compute([0u8; 1].as_slice());
1172/// # let my_compute_pipeline = Arc::new(ComputePipeline::create(&device, info, shader)?);
1173/// # let mut my_graph = RenderGraph::new();
1174/// my_graph.begin_pass("my compute pass")
1175///         .bind_pipeline(&my_compute_pipeline)
1176///         .record_compute(move |compute, bindings| {
1177///             // During this closure we have access to the compute methods!
1178///         });
1179/// # Ok(()) }
1180/// ```
1181pub struct Compute<'a> {
1182    bindings: Bindings<'a>,
1183    cmd_buf: vk::CommandBuffer,
1184    device: &'a Device,
1185    pipeline: Arc<ComputePipeline>,
1186}
1187
1188impl Compute<'_> {
1189    /// [Dispatch] compute work items.
1190    ///
1191    /// When the command is executed, a global workgroup consisting of
1192    /// `group_count_x × group_count_y × group_count_z` local workgroups is assembled.
1193    ///
1194    /// # Examples
1195    ///
1196    /// Basic usage:
1197    ///
1198    /// ```
1199    /// # inline_spirv::inline_spirv!(r#"
1200    /// #version 450
1201    ///
1202    /// layout(set = 0, binding = 0, std430) restrict writeonly buffer MyBufer {
1203    ///     uint my_buf[];
1204    /// };
1205    ///
1206    /// void main()
1207    /// {
1208    ///     // TODO
1209    /// }
1210    /// # "#, comp);
1211    /// ```
1212    ///
1213    /// ```no_run
1214    /// # use std::sync::Arc;
1215    /// # use ash::vk;
1216    /// # use screen_13::driver::DriverError;
1217    /// # use screen_13::driver::device::{Device, DeviceInfo};
1218    /// # use screen_13::driver::buffer::{Buffer, BufferInfo};
1219    /// # use screen_13::driver::compute::{ComputePipeline, ComputePipelineInfo};
1220    /// # use screen_13::driver::shader::{Shader};
1221    /// # use screen_13::graph::RenderGraph;
1222    /// # fn main() -> Result<(), DriverError> {
1223    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
1224    /// # let buf_info = BufferInfo::device_mem(8, vk::BufferUsageFlags::STORAGE_BUFFER);
1225    /// # let my_buf = Buffer::create(&device, buf_info)?;
1226    /// # let info = ComputePipelineInfo::default();
1227    /// # let shader = Shader::new_compute([0u8; 1].as_slice());
1228    /// # let my_compute_pipeline = Arc::new(ComputePipeline::create(&device, info, shader)?);
1229    /// # let mut my_graph = RenderGraph::new();
1230    /// # let my_buf_node = my_graph.bind_node(my_buf);
1231    /// my_graph.begin_pass("fill my_buf_node with data")
1232    ///         .bind_pipeline(&my_compute_pipeline)
1233    ///         .write_descriptor(0, my_buf_node)
1234    ///         .record_compute(move |compute, bindings| {
1235    ///             compute.dispatch(128, 64, 32);
1236    ///         });
1237    /// # Ok(()) }
1238    /// ```
1239    ///
1240    /// [Dispatch]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/vkCmdDispatch.html
1241    #[profiling::function]
1242    pub fn dispatch(&self, group_count_x: u32, group_count_y: u32, group_count_z: u32) -> &Self {
1243        unsafe {
1244            self.device
1245                .cmd_dispatch(self.cmd_buf, group_count_x, group_count_y, group_count_z);
1246        }
1247
1248        self
1249    }
1250
1251    /// [Dispatch] compute work items with non-zero base values for the workgroup IDs.
1252    ///
1253    /// When the command is executed, a global workgroup consisting of
1254    /// `group_count_x × group_count_y × group_count_z` local workgroups is assembled, with
1255    /// WorkgroupId values ranging from `[base_group*, base_group* + group_count*)` in each
1256    /// component.
1257    ///
1258    /// [`Compute::dispatch`] is equivalent to
1259    /// `dispatch_base(0, 0, 0, group_count_x, group_count_y, group_count_z)`.
1260    ///
1261    /// [Dispatch]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/vkCmdDispatchBase.html
1262    #[profiling::function]
1263    pub fn dispatch_base(
1264        &self,
1265        base_group_x: u32,
1266        base_group_y: u32,
1267        base_group_z: u32,
1268        group_count_x: u32,
1269        group_count_y: u32,
1270        group_count_z: u32,
1271    ) -> &Self {
1272        unsafe {
1273            self.device.cmd_dispatch_base(
1274                self.cmd_buf,
1275                base_group_x,
1276                base_group_y,
1277                base_group_z,
1278                group_count_x,
1279                group_count_y,
1280                group_count_z,
1281            );
1282        }
1283
1284        self
1285    }
1286
1287    /// Dispatch compute work items with indirect parameters.
1288    ///
1289    /// `dispatch_indirect` behaves similarly to [`Compute::dispatch`] except that the parameters
1290    /// are read by the device from `args_buf` during execution. The parameters of the dispatch are
1291    /// encoded in a [`vk::DispatchIndirectCommand`] structure taken from `args_buf` starting at
1292    /// `args_offset`.
1293    ///
1294    /// # Examples
1295    ///
1296    /// Basic usage:
1297    ///
1298    /// ```no_run
1299    /// # use std::sync::Arc;
1300    /// # use std::mem::size_of;
1301    /// # use ash::vk;
1302    /// # use screen_13::driver::DriverError;
1303    /// # use screen_13::driver::device::{Device, DeviceInfo};
1304    /// # use screen_13::driver::buffer::{Buffer, BufferInfo};
1305    /// # use screen_13::driver::compute::{ComputePipeline, ComputePipelineInfo};
1306    /// # use screen_13::driver::shader::{Shader};
1307    /// # use screen_13::graph::RenderGraph;
1308    /// # fn main() -> Result<(), DriverError> {
1309    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
1310    /// # let buf_info = BufferInfo::device_mem(8, vk::BufferUsageFlags::STORAGE_BUFFER);
1311    /// # let my_buf = Buffer::create(&device, buf_info)?;
1312    /// # let info = ComputePipelineInfo::default();
1313    /// # let shader = Shader::new_compute([0u8; 1].as_slice());
1314    /// # let my_compute_pipeline = Arc::new(ComputePipeline::create(&device, info, shader)?);
1315    /// # let mut my_graph = RenderGraph::new();
1316    /// # let my_buf_node = my_graph.bind_node(my_buf);
1317    /// const CMD_SIZE: usize = size_of::<vk::DispatchIndirectCommand>();
1318    ///
1319    /// let cmd = vk::DispatchIndirectCommand {
1320    ///     x: 1,
1321    ///     y: 2,
1322    ///     z: 3,
1323    /// };
1324    /// let cmd_data = unsafe {
1325    ///     std::slice::from_raw_parts(&cmd as *const _ as *const _, CMD_SIZE)
1326    /// };
1327    ///
1328    /// let args_buf_flags = vk::BufferUsageFlags::STORAGE_BUFFER;
1329    /// let args_buf = Buffer::create_from_slice(&device, args_buf_flags, cmd_data)?;
1330    /// let args_buf_node = my_graph.bind_node(args_buf);
1331    ///
1332    /// my_graph.begin_pass("fill my_buf_node with data")
1333    ///         .bind_pipeline(&my_compute_pipeline)
1334    ///         .read_node(args_buf_node)
1335    ///         .write_descriptor(0, my_buf_node)
1336    ///         .record_compute(move |compute, bindings| {
1337    ///             compute.dispatch_indirect(args_buf_node, 0);
1338    ///         });
1339    /// # Ok(()) }
1340    /// ```
1341    ///
1342    /// [Dispatch]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/vkCmdDispatchIndirect.html
1343    /// [VkDispatchIndirectCommand]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkDispatchIndirectCommand.html
1344    #[profiling::function]
1345    pub fn dispatch_indirect(
1346        &self,
1347        args_buf: impl Into<AnyBufferNode>,
1348        args_offset: vk::DeviceSize,
1349    ) -> &Self {
1350        let args_buf = args_buf.into();
1351
1352        unsafe {
1353            self.device
1354                .cmd_dispatch_indirect(self.cmd_buf, *self.bindings[args_buf], args_offset);
1355        }
1356
1357        self
1358    }
1359
1360    /// Updates push constants.
1361    ///
1362    /// Push constants represent a high speed path to modify constant data in pipelines that is
1363    /// expected to outperform memory-backed resource updates.
1364    ///
1365    /// Push constant values can be updated incrementally, causing shader stages to read the new
1366    /// data for push constants modified by this command, while still reading the previous data for
1367    /// push constants not modified by this command.
1368    ///
1369    /// # Device limitations
1370    ///
1371    /// See
1372    /// [`device.physical_device.props.limits.max_push_constants_size`](vk::PhysicalDeviceLimits)
1373    /// for the limits of the current device. You may also check [gpuinfo.org] for a listing of
1374    /// reported limits on other devices.
1375    ///
1376    /// # Examples
1377    ///
1378    /// Basic usage:
1379    ///
1380    /// ```
1381    /// # inline_spirv::inline_spirv!(r#"
1382    /// #version 450
1383    ///
1384    /// layout(push_constant) uniform PushConstants {
1385    ///     layout(offset = 0) uint the_answer;
1386    /// } push_constants;
1387    ///
1388    /// void main()
1389    /// {
1390    ///     // TODO: Add bindings to read/write things!
1391    /// }
1392    /// # "#, comp);
1393    /// ```
1394    ///
1395    /// ```no_run
1396    /// # use std::sync::Arc;
1397    /// # use ash::vk;
1398    /// # use screen_13::driver::DriverError;
1399    /// # use screen_13::driver::device::{Device, DeviceInfo};
1400    /// # use screen_13::driver::buffer::{Buffer, BufferInfo};
1401    /// # use screen_13::driver::compute::{ComputePipeline, ComputePipelineInfo};
1402    /// # use screen_13::driver::shader::{Shader};
1403    /// # use screen_13::graph::RenderGraph;
1404    /// # fn main() -> Result<(), DriverError> {
1405    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
1406    /// # let info = ComputePipelineInfo::default();
1407    /// # let shader = Shader::new_compute([0u8; 1].as_slice());
1408    /// # let my_compute_pipeline = Arc::new(ComputePipeline::create(&device, info, shader)?);
1409    /// # let mut my_graph = RenderGraph::new();
1410    /// my_graph.begin_pass("compute the ultimate question")
1411    ///         .bind_pipeline(&my_compute_pipeline)
1412    ///         .record_compute(move |compute, bindings| {
1413    ///             compute.push_constants(&[42])
1414    ///                    .dispatch(1, 1, 1);
1415    ///         });
1416    /// # Ok(()) }
1417    /// ```
1418    ///
1419    /// [gpuinfo.org]: https://vulkan.gpuinfo.org/displaydevicelimit.php?name=maxPushConstantsSize&platform=all
1420    pub fn push_constants(&self, data: &[u8]) -> &Self {
1421        self.push_constants_offset(0, data)
1422    }
1423
1424    /// Updates push constants starting at the given `offset`.
1425    ///
1426    /// Behaves similary to [`Compute::push_constants`] except that `offset` describes the position
1427    /// at which `data` updates the push constants of the currently bound pipeline. This may be used
1428    /// to update a subset or single field of previously set push constant data.
1429    ///
1430    /// # Device limitations
1431    ///
1432    /// See
1433    /// [`device.physical_device.props.limits.max_push_constants_size`](vk::PhysicalDeviceLimits)
1434    /// for the limits of the current device. You may also check [gpuinfo.org] for a listing of
1435    /// reported limits on other devices.
1436    ///
1437    /// # Examples
1438    ///
1439    /// Basic usage:
1440    ///
1441    /// ```
1442    /// # inline_spirv::inline_spirv!(r#"
1443    /// #version 450
1444    ///
1445    /// layout(push_constant) uniform PushConstants {
1446    ///     layout(offset = 0) uint some_val1;
1447    ///     layout(offset = 4) uint some_val2;
1448    /// } push_constants;
1449    ///
1450    /// void main()
1451    /// {
1452    ///     // TODO: Add bindings to read/write things!
1453    /// }
1454    /// # "#, comp);
1455    /// ```
1456    ///
1457    /// ```no_run
1458    /// # use std::sync::Arc;
1459    /// # use ash::vk;
1460    /// # use screen_13::driver::DriverError;
1461    /// # use screen_13::driver::device::{Device, DeviceInfo};
1462    /// # use screen_13::driver::buffer::{Buffer, BufferInfo};
1463    /// # use screen_13::driver::compute::{ComputePipeline, ComputePipelineInfo};
1464    /// # use screen_13::driver::shader::{Shader};
1465    /// # use screen_13::graph::RenderGraph;
1466    /// # fn main() -> Result<(), DriverError> {
1467    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
1468    /// # let info = ComputePipelineInfo::default();
1469    /// # let shader = Shader::new_compute([0u8; 1].as_slice());
1470    /// # let my_compute_pipeline = Arc::new(ComputePipeline::create(&device, info, shader)?);
1471    /// # let mut my_graph = RenderGraph::new();
1472    /// my_graph.begin_pass("calculate the wow factor")
1473    ///         .bind_pipeline(&my_compute_pipeline)
1474    ///         .record_compute(move |compute, bindings| {
1475    ///             compute.push_constants(&[0x00, 0x00])
1476    ///                    .dispatch(1, 1, 1)
1477    ///                    .push_constants_offset(4, &[0xff])
1478    ///                    .dispatch(1, 1, 1);
1479    ///         });
1480    /// # Ok(()) }
1481    /// ```
1482    ///
1483    /// [gpuinfo.org]: https://vulkan.gpuinfo.org/displaydevicelimit.php?name=maxPushConstantsSize&platform=all
1484    #[profiling::function]
1485    pub fn push_constants_offset(&self, offset: u32, data: &[u8]) -> &Self {
1486        if let Some(push_const) = self.pipeline.push_constants {
1487            // Determine the range of the overall pipline push constants which overlap with `data`
1488            let push_const_end = push_const.offset + push_const.size;
1489            let data_end = offset + data.len() as u32;
1490            let end = data_end.min(push_const_end);
1491            let start = offset.max(push_const.offset);
1492
1493            if end > start {
1494                trace!(
1495                    "      push constants {:?} {}..{}",
1496                    push_const.stage_flags, start, end
1497                );
1498
1499                unsafe {
1500                    self.device.cmd_push_constants(
1501                        self.cmd_buf,
1502                        self.pipeline.layout,
1503                        vk::ShaderStageFlags::COMPUTE,
1504                        push_const.offset,
1505                        &data[(start - offset) as usize..(end - offset) as usize],
1506                    );
1507                }
1508            }
1509        }
1510
1511        self
1512    }
1513}
1514
1515/// Describes the SPIR-V binding index, and optionally a specific descriptor set
1516/// and array index.
1517///
1518/// Generally you might pass a function a descriptor using a simple integer:
1519///
1520/// ```rust
1521/// # fn my_func(_: usize, _: ()) {}
1522/// # let image = ();
1523/// let descriptor = 42;
1524/// my_func(descriptor, image);
1525/// ```
1526///
1527/// But also:
1528///
1529/// - `(0, 42)` for descriptor set `0` and binding index `42`
1530/// - `(42, [8])` for the same binding, but the 8th element
1531/// - `(0, 42, [8])` same as the previous example
1532#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
1533pub enum Descriptor {
1534    /// An array binding which includes an `offset` argument for the bound element.
1535    ArrayBinding(DescriptorSetIndex, BindingIndex, BindingOffset),
1536
1537    /// A single binding.
1538    Binding(DescriptorSetIndex, BindingIndex),
1539}
1540
1541impl Descriptor {
1542    pub(super) fn into_tuple(self) -> (DescriptorSetIndex, BindingIndex, BindingOffset) {
1543        match self {
1544            Self::ArrayBinding(descriptor_set_idx, binding_idx, binding_offset) => {
1545                (descriptor_set_idx, binding_idx, binding_offset)
1546            }
1547            Self::Binding(descriptor_set_idx, binding_idx) => (descriptor_set_idx, binding_idx, 0),
1548        }
1549    }
1550
1551    pub(super) fn set(self) -> DescriptorSetIndex {
1552        let (res, _, _) = self.into_tuple();
1553        res
1554    }
1555}
1556
1557impl From<BindingIndex> for Descriptor {
1558    fn from(val: BindingIndex) -> Self {
1559        Self::Binding(0, val)
1560    }
1561}
1562
1563impl From<(DescriptorSetIndex, BindingIndex)> for Descriptor {
1564    fn from(tuple: (DescriptorSetIndex, BindingIndex)) -> Self {
1565        Self::Binding(tuple.0, tuple.1)
1566    }
1567}
1568
1569impl From<(BindingIndex, [BindingOffset; 1])> for Descriptor {
1570    fn from(tuple: (BindingIndex, [BindingOffset; 1])) -> Self {
1571        Self::ArrayBinding(0, tuple.0, tuple.1[0])
1572    }
1573}
1574
1575impl From<(DescriptorSetIndex, BindingIndex, [BindingOffset; 1])> for Descriptor {
1576    fn from(tuple: (DescriptorSetIndex, BindingIndex, [BindingOffset; 1])) -> Self {
1577        Self::ArrayBinding(tuple.0, tuple.1, tuple.2[0])
1578    }
1579}
1580
1581/// Recording interface for drawing commands.
1582///
1583/// This structure provides a strongly-typed set of methods which allow rasterization shader code to
1584/// be executed. An instance of `Draw` is provided to the closure parameter of
1585/// [`PipelinePassRef::record_subpass`] which may be accessed by binding a [`GraphicPipeline`] to a
1586/// render pass.
1587///
1588/// # Examples
1589///
1590/// Basic usage:
1591///
1592/// ```no_run
1593/// # use std::sync::Arc;
1594/// # use ash::vk;
1595/// # use screen_13::driver::DriverError;
1596/// # use screen_13::driver::device::{Device, DeviceInfo};
1597/// # use screen_13::driver::graphic::{GraphicPipeline, GraphicPipelineInfo};
1598/// # use screen_13::driver::image::{Image, ImageInfo};
1599/// # use screen_13::graph::RenderGraph;
1600/// # use screen_13::driver::shader::Shader;
1601/// # fn main() -> Result<(), DriverError> {
1602/// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
1603/// # let my_frag_code = [0u8; 1];
1604/// # let my_vert_code = [0u8; 1];
1605/// # let vert = Shader::new_vertex(my_vert_code.as_slice());
1606/// # let frag = Shader::new_fragment(my_frag_code.as_slice());
1607/// # let info = GraphicPipelineInfo::default();
1608/// # let my_graphic_pipeline = Arc::new(GraphicPipeline::create(&device, info, [vert, frag])?);
1609/// # let mut my_graph = RenderGraph::new();
1610/// # let info = ImageInfo::image_2d(32, 32, vk::Format::R8G8B8A8_UNORM, vk::ImageUsageFlags::SAMPLED);
1611/// # let swapchain_image = my_graph.bind_node(Image::create(&device, info)?);
1612/// my_graph.begin_pass("my draw pass")
1613///         .bind_pipeline(&my_graphic_pipeline)
1614///         .store_color(0, swapchain_image)
1615///         .record_subpass(move |subpass, bindings| {
1616///             // During this closure we have access to the draw methods!
1617///         });
1618/// # Ok(()) }
1619/// ```
1620pub struct Draw<'a> {
1621    bindings: Bindings<'a>,
1622    cmd_buf: vk::CommandBuffer,
1623    device: &'a Device,
1624    pipeline: Arc<GraphicPipeline>,
1625}
1626
1627impl Draw<'_> {
1628    /// Bind an index buffer to the current pass.
1629    ///
1630    /// # Examples
1631    ///
1632    /// Basic usage:
1633    ///
1634    /// ```no_run
1635    /// # use std::sync::Arc;
1636    /// # use ash::vk;
1637    /// # use screen_13::driver::DriverError;
1638    /// # use screen_13::driver::device::{Device, DeviceInfo};
1639    /// # use screen_13::driver::buffer::{Buffer, BufferInfo};
1640    /// # use screen_13::driver::graphic::{GraphicPipeline, GraphicPipelineInfo};
1641    /// # use screen_13::driver::image::{Image, ImageInfo};
1642    /// # use screen_13::driver::shader::Shader;
1643    /// # use screen_13::graph::RenderGraph;
1644    /// # fn main() -> Result<(), DriverError> {
1645    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
1646    /// # let my_frag_code = [0u8; 1];
1647    /// # let my_vert_code = [0u8; 1];
1648    /// # let vert = Shader::new_vertex(my_vert_code.as_slice());
1649    /// # let frag = Shader::new_fragment(my_frag_code.as_slice());
1650    /// # let info = GraphicPipelineInfo::default();
1651    /// # let my_graphic_pipeline = Arc::new(GraphicPipeline::create(&device, info, [vert, frag])?);
1652    /// # let mut my_graph = RenderGraph::new();
1653    /// # let info = ImageInfo::image_2d(32, 32, vk::Format::R8G8B8A8_UNORM, vk::ImageUsageFlags::SAMPLED);
1654    /// # let swapchain_image = my_graph.bind_node(Image::create(&device, info)?);
1655    /// # let buf_info = BufferInfo::device_mem(8, vk::BufferUsageFlags::INDEX_BUFFER);
1656    /// # let my_idx_buf = Buffer::create(&device, buf_info)?;
1657    /// # let buf_info = BufferInfo::device_mem(8, vk::BufferUsageFlags::VERTEX_BUFFER);
1658    /// # let my_vtx_buf = Buffer::create(&device, buf_info)?;
1659    /// # let my_idx_buf = my_graph.bind_node(my_idx_buf);
1660    /// # let my_vtx_buf = my_graph.bind_node(my_vtx_buf);
1661    /// my_graph.begin_pass("my indexed geometry draw pass")
1662    ///         .bind_pipeline(&my_graphic_pipeline)
1663    ///         .store_color(0, swapchain_image)
1664    ///         .read_node(my_idx_buf)
1665    ///         .read_node(my_vtx_buf)
1666    ///         .record_subpass(move |subpass, bindings| {
1667    ///             subpass.bind_index_buffer(my_idx_buf, vk::IndexType::UINT16)
1668    ///                    .bind_vertex_buffer(my_vtx_buf)
1669    ///                    .draw_indexed(42, 1, 0, 0, 0);
1670    ///         });
1671    /// # Ok(()) }
1672    /// ```
1673    pub fn bind_index_buffer(
1674        &self,
1675        buffer: impl Into<AnyBufferNode>,
1676        index_ty: vk::IndexType,
1677    ) -> &Self {
1678        self.bind_index_buffer_offset(buffer, index_ty, 0)
1679    }
1680
1681    /// Bind an index buffer to the current pass.
1682    ///
1683    /// Behaves similarly to `bind_index_buffer` except that `offset` is the starting offset in
1684    /// bytes within `buffer` used in index buffer address calculations.
1685    #[profiling::function]
1686    pub fn bind_index_buffer_offset(
1687        &self,
1688        buffer: impl Into<AnyBufferNode>,
1689        index_ty: vk::IndexType,
1690        offset: vk::DeviceSize,
1691    ) -> &Self {
1692        let buffer = buffer.into();
1693
1694        unsafe {
1695            self.device.cmd_bind_index_buffer(
1696                self.cmd_buf,
1697                *self.bindings[buffer],
1698                offset,
1699                index_ty,
1700            );
1701        }
1702
1703        self
1704    }
1705
1706    /// Bind a vertex buffer to the current pass.
1707    ///
1708    /// # Examples
1709    ///
1710    /// Basic usage:
1711    ///
1712    /// ```no_run
1713    /// # use std::sync::Arc;
1714    /// # use ash::vk;
1715    /// # use screen_13::driver::DriverError;
1716    /// # use screen_13::driver::device::{Device, DeviceInfo};
1717    /// # use screen_13::driver::buffer::{Buffer, BufferInfo};
1718    /// # use screen_13::driver::graphic::{GraphicPipeline, GraphicPipelineInfo};
1719    /// # use screen_13::driver::image::{Image, ImageInfo};
1720    /// # use screen_13::driver::shader::Shader;
1721    /// # use screen_13::graph::RenderGraph;
1722    /// # fn main() -> Result<(), DriverError> {
1723    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
1724    /// # let buf_info = BufferInfo::device_mem(8, vk::BufferUsageFlags::VERTEX_BUFFER);
1725    /// # let my_vtx_buf = Buffer::create(&device, buf_info)?;
1726    /// # let my_frag_code = [0u8; 1];
1727    /// # let my_vert_code = [0u8; 1];
1728    /// # let vert = Shader::new_vertex(my_vert_code.as_slice());
1729    /// # let frag = Shader::new_fragment(my_frag_code.as_slice());
1730    /// # let info = GraphicPipelineInfo::default();
1731    /// # let my_graphic_pipeline = Arc::new(GraphicPipeline::create(&device, info, [vert, frag])?);
1732    /// # let mut my_graph = RenderGraph::new();
1733    /// # let info = ImageInfo::image_2d(32, 32, vk::Format::R8G8B8A8_UNORM, vk::ImageUsageFlags::SAMPLED);
1734    /// # let swapchain_image = my_graph.bind_node(Image::create(&device, info)?);
1735    /// # let my_vtx_buf = my_graph.bind_node(my_vtx_buf);
1736    /// my_graph.begin_pass("my unindexed geometry draw pass")
1737    ///         .bind_pipeline(&my_graphic_pipeline)
1738    ///         .store_color(0, swapchain_image)
1739    ///         .read_node(my_vtx_buf)
1740    ///         .record_subpass(move |subpass, bindings| {
1741    ///             subpass.bind_vertex_buffer(my_vtx_buf)
1742    ///                    .draw(42, 1, 0, 0);
1743    ///         });
1744    /// # Ok(()) }
1745    /// ```
1746    pub fn bind_vertex_buffer(&self, buffer: impl Into<AnyBufferNode>) -> &Self {
1747        self.bind_vertex_buffer_offset(buffer, 0)
1748    }
1749
1750    /// Bind a vertex buffer to the current pass.
1751    ///
1752    /// Behaves similarly to `bind_vertex_buffer` except the vertex input binding is updated to
1753    /// start at `offset` from the start of `buffer`.
1754    #[profiling::function]
1755    pub fn bind_vertex_buffer_offset(
1756        &self,
1757        buffer: impl Into<AnyBufferNode>,
1758        offset: vk::DeviceSize,
1759    ) -> &Self {
1760        use std::slice::from_ref;
1761
1762        let buffer = buffer.into();
1763
1764        unsafe {
1765            self.device.cmd_bind_vertex_buffers(
1766                self.cmd_buf,
1767                0,
1768                from_ref(&self.bindings[buffer]),
1769                from_ref(&offset),
1770            );
1771        }
1772
1773        self
1774    }
1775
1776    /// Binds multiple vertex buffers to the current pass, starting at the given `first_binding`.
1777    ///
1778    /// Each vertex input binding in `buffers` specifies an offset from the start of the
1779    /// corresponding buffer.
1780    ///
1781    /// The vertex input attributes that use each of these bindings will use these updated addresses
1782    /// in their address calculations for subsequent drawing commands.
1783    #[profiling::function]
1784    pub fn bind_vertex_buffers<B>(
1785        &self,
1786        first_binding: u32,
1787        buffer_offsets: impl IntoIterator<Item = (B, vk::DeviceSize)>,
1788    ) -> &Self
1789    where
1790        B: Into<AnyBufferNode>,
1791    {
1792        thread_local! {
1793            static BUFFERS_OFFSETS: RefCell<(Vec<vk::Buffer>, Vec<vk::DeviceSize>)> = Default::default();
1794        }
1795
1796        BUFFERS_OFFSETS.with_borrow_mut(|(buffers, offsets)| {
1797            buffers.clear();
1798            offsets.clear();
1799
1800            for (buffer, offset) in buffer_offsets {
1801                let buffer = buffer.into();
1802
1803                buffers.push(*self.bindings[buffer]);
1804                offsets.push(offset);
1805            }
1806
1807            unsafe {
1808                self.device.cmd_bind_vertex_buffers(
1809                    self.cmd_buf,
1810                    first_binding,
1811                    buffers.as_slice(),
1812                    offsets.as_slice(),
1813                );
1814            }
1815        });
1816
1817        self
1818    }
1819
1820    /// Draw unindexed primitives.
1821    ///
1822    /// When the command is executed, primitives are assembled using the current primitive topology
1823    /// and `vertex_count` consecutive vertex indices with the first `vertex_index` value equal to
1824    /// `first_vertex`. The primitives are drawn `instance_count` times with `instance_index`
1825    /// starting with `first_instance` and increasing sequentially for each instance.
1826    #[profiling::function]
1827    pub fn draw(
1828        &self,
1829        vertex_count: u32,
1830        instance_count: u32,
1831        first_vertex: u32,
1832        first_instance: u32,
1833    ) -> &Self {
1834        unsafe {
1835            self.device.cmd_draw(
1836                self.cmd_buf,
1837                vertex_count,
1838                instance_count,
1839                first_vertex,
1840                first_instance,
1841            );
1842        }
1843
1844        self
1845    }
1846
1847    /// Draw indexed primitives.
1848    ///
1849    /// When the command is executed, primitives are assembled using the current primitive topology
1850    /// and `index_count` vertices whose indices are retrieved from the index buffer. The index
1851    /// buffer is treated as an array of tightly packed unsigned integers of size defined by the
1852    /// `index_ty` parameter with which the buffer was bound.
1853    #[profiling::function]
1854    pub fn draw_indexed(
1855        &self,
1856        index_count: u32,
1857        instance_count: u32,
1858        first_index: u32,
1859        vertex_offset: i32,
1860        first_instance: u32,
1861    ) -> &Self {
1862        unsafe {
1863            self.device.cmd_draw_indexed(
1864                self.cmd_buf,
1865                index_count,
1866                instance_count,
1867                first_index,
1868                vertex_offset,
1869                first_instance,
1870            );
1871        }
1872
1873        self
1874    }
1875
1876    /// Draw primitives with indirect parameters and indexed vertices.
1877    ///
1878    /// `draw_indexed_indirect` behaves similarly to `draw_indexed` except that the parameters are
1879    /// read by the device from `buffer` during execution. `draw_count` draws are executed by the
1880    /// command, with parameters taken from `buffer` starting at `offset` and increasing by `stride`
1881    /// bytes for each successive draw. The parameters of each draw are encoded in an array of
1882    /// [`vk::DrawIndexedIndirectCommand`] structures.
1883    ///
1884    /// If `draw_count` is less than or equal to one, `stride` is ignored.
1885    ///
1886    /// # Examples
1887    ///
1888    /// Basic usage:
1889    ///
1890    /// ```no_run
1891    /// # use std::sync::Arc;
1892    /// # use std::mem::size_of;
1893    /// # use ash::vk;
1894    /// # use screen_13::driver::DriverError;
1895    /// # use screen_13::driver::device::{Device, DeviceInfo};
1896    /// # use screen_13::driver::buffer::{Buffer, BufferInfo};
1897    /// # use screen_13::driver::graphic::{GraphicPipeline, GraphicPipelineInfo};
1898    /// # use screen_13::driver::image::{Image, ImageInfo};
1899    /// # use screen_13::driver::shader::Shader;
1900    /// # use screen_13::graph::RenderGraph;
1901    /// # fn main() -> Result<(), DriverError> {
1902    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
1903    /// # let my_frag_code = [0u8; 1];
1904    /// # let my_vert_code = [0u8; 1];
1905    /// # let vert = Shader::new_vertex(my_vert_code.as_slice());
1906    /// # let frag = Shader::new_fragment(my_frag_code.as_slice());
1907    /// # let info = GraphicPipelineInfo::default();
1908    /// # let my_graphic_pipeline = Arc::new(GraphicPipeline::create(&device, info, [vert, frag])?);
1909    /// # let mut my_graph = RenderGraph::new();
1910    /// # let buf_info = BufferInfo::device_mem(8, vk::BufferUsageFlags::INDEX_BUFFER);
1911    /// # let my_idx_buf = Buffer::create(&device, buf_info)?;
1912    /// # let buf_info = BufferInfo::device_mem(8, vk::BufferUsageFlags::VERTEX_BUFFER);
1913    /// # let my_vtx_buf = Buffer::create(&device, buf_info)?;
1914    /// # let my_idx_buf = my_graph.bind_node(my_idx_buf);
1915    /// # let my_vtx_buf = my_graph.bind_node(my_vtx_buf);
1916    /// # let info = ImageInfo::image_2d(32, 32, vk::Format::R8G8B8A8_UNORM, vk::ImageUsageFlags::SAMPLED);
1917    /// # let swapchain_image = my_graph.bind_node(Image::create(&device, info)?);
1918    /// const CMD_SIZE: usize = size_of::<vk::DrawIndexedIndirectCommand>();
1919    ///
1920    /// let cmd = vk::DrawIndexedIndirectCommand {
1921    ///     index_count: 3,
1922    ///     instance_count: 1,
1923    ///     first_index: 0,
1924    ///     vertex_offset: 0,
1925    ///     first_instance: 0,
1926    /// };
1927    /// let cmd_data = unsafe {
1928    ///     std::slice::from_raw_parts(&cmd as *const _ as *const _, CMD_SIZE)
1929    /// };
1930    ///
1931    /// let buf_flags = vk::BufferUsageFlags::STORAGE_BUFFER;
1932    /// let buf = Buffer::create_from_slice(&device, buf_flags, cmd_data)?;
1933    /// let buf_node = my_graph.bind_node(buf);
1934    ///
1935    /// my_graph.begin_pass("draw a single triangle")
1936    ///         .bind_pipeline(&my_graphic_pipeline)
1937    ///         .store_color(0, swapchain_image)
1938    ///         .read_node(my_idx_buf)
1939    ///         .read_node(my_vtx_buf)
1940    ///         .read_node(buf_node)
1941    ///         .record_subpass(move |subpass, bindings| {
1942    ///             subpass.bind_index_buffer(my_idx_buf, vk::IndexType::UINT16)
1943    ///                    .bind_vertex_buffer(my_vtx_buf)
1944    ///                    .draw_indexed_indirect(buf_node, 0, 1, 0);
1945    ///         });
1946    /// # Ok(()) }
1947    /// ```
1948    #[profiling::function]
1949    pub fn draw_indexed_indirect(
1950        &self,
1951        buffer: impl Into<AnyBufferNode>,
1952        offset: vk::DeviceSize,
1953        draw_count: u32,
1954        stride: u32,
1955    ) -> &Self {
1956        let buffer = buffer.into();
1957
1958        unsafe {
1959            self.device.cmd_draw_indexed_indirect(
1960                self.cmd_buf,
1961                *self.bindings[buffer],
1962                offset,
1963                draw_count,
1964                stride,
1965            );
1966        }
1967
1968        self
1969    }
1970
1971    /// Draw primitives with indirect parameters, indexed vertices, and draw count.
1972    ///
1973    /// `draw_indexed_indirect_count` behaves similarly to `draw_indexed_indirect` except that the
1974    /// draw count is read by the device from `buffer` during execution. The command will read an
1975    /// unsigned 32-bit integer from `count_buf` located at `count_buf_offset` and use this as the
1976    /// draw count.
1977    ///
1978    /// `max_draw_count` specifies the maximum number of draws that will be executed. The actual
1979    /// number of executed draw calls is the minimum of the count specified in `count_buf` and
1980    /// `max_draw_count`.
1981    ///
1982    /// `stride` is the byte stride between successive sets of draw parameters.
1983    #[profiling::function]
1984    pub fn draw_indexed_indirect_count(
1985        &self,
1986        buffer: impl Into<AnyBufferNode>,
1987        offset: vk::DeviceSize,
1988        count_buf: impl Into<AnyBufferNode>,
1989        count_buf_offset: vk::DeviceSize,
1990        max_draw_count: u32,
1991        stride: u32,
1992    ) -> &Self {
1993        let buffer = buffer.into();
1994        let count_buf = count_buf.into();
1995
1996        unsafe {
1997            self.device.cmd_draw_indexed_indirect_count(
1998                self.cmd_buf,
1999                *self.bindings[buffer],
2000                offset,
2001                *self.bindings[count_buf],
2002                count_buf_offset,
2003                max_draw_count,
2004                stride,
2005            );
2006        }
2007
2008        self
2009    }
2010
2011    /// Draw primitives with indirect parameters and unindexed vertices.
2012    ///
2013    /// Behaves otherwise similar to [`Draw::draw_indexed_indirect`].
2014    #[profiling::function]
2015    pub fn draw_indirect(
2016        &self,
2017        buffer: impl Into<AnyBufferNode>,
2018        offset: vk::DeviceSize,
2019        draw_count: u32,
2020        stride: u32,
2021    ) -> &Self {
2022        let buffer = buffer.into();
2023
2024        unsafe {
2025            self.device.cmd_draw_indirect(
2026                self.cmd_buf,
2027                *self.bindings[buffer],
2028                offset,
2029                draw_count,
2030                stride,
2031            );
2032        }
2033
2034        self
2035    }
2036
2037    /// Draw primitives with indirect parameters, unindexed vertices, and draw count.
2038    ///
2039    /// Behaves otherwise similar to [`Draw::draw_indexed_indirect_count`].
2040    #[profiling::function]
2041    pub fn draw_indirect_count(
2042        &self,
2043        buffer: impl Into<AnyBufferNode>,
2044        offset: vk::DeviceSize,
2045        count_buf: impl Into<AnyBufferNode>,
2046        count_buf_offset: vk::DeviceSize,
2047        max_draw_count: u32,
2048        stride: u32,
2049    ) -> &Self {
2050        let buffer = buffer.into();
2051        let count_buf = count_buf.into();
2052
2053        unsafe {
2054            self.device.cmd_draw_indirect_count(
2055                self.cmd_buf,
2056                *self.bindings[buffer],
2057                offset,
2058                *self.bindings[count_buf],
2059                count_buf_offset,
2060                max_draw_count,
2061                stride,
2062            );
2063        }
2064
2065        self
2066    }
2067
2068    /// Updates push constants.
2069    ///
2070    /// Push constants represent a high speed path to modify constant data in pipelines that is
2071    /// expected to outperform memory-backed resource updates.
2072    ///
2073    /// Push constant values can be updated incrementally, causing shader stages to read the new
2074    /// data for push constants modified by this command, while still reading the previous data for
2075    /// push constants not modified by this command.
2076    ///
2077    /// # Device limitations
2078    ///
2079    /// See
2080    /// [`device.physical_device.props.limits.max_push_constants_size`](vk::PhysicalDeviceLimits)
2081    /// for the limits of the current device. You may also check [gpuinfo.org] for a listing of
2082    /// reported limits on other devices.
2083    ///
2084    /// # Examples
2085    ///
2086    /// Basic usage:
2087    ///
2088    /// ```
2089    /// # inline_spirv::inline_spirv!(r#"
2090    /// #version 450
2091    ///
2092    /// layout(push_constant) uniform PushConstants {
2093    ///     layout(offset = 0) uint the_answer;
2094    /// } push_constants;
2095    ///
2096    /// void main()
2097    /// {
2098    ///     // TODO: Add code!
2099    /// }
2100    /// # "#, vert);
2101    /// ```
2102    ///
2103    /// ```no_run
2104    /// # use std::sync::Arc;
2105    /// # use ash::vk;
2106    /// # use screen_13::driver::DriverError;
2107    /// # use screen_13::driver::device::{Device, DeviceInfo};
2108    /// # use screen_13::driver::graphic::{GraphicPipeline, GraphicPipelineInfo};
2109    /// # use screen_13::driver::image::{Image, ImageInfo};
2110    /// # use screen_13::graph::RenderGraph;
2111    /// # use screen_13::driver::shader::Shader;
2112    /// # fn main() -> Result<(), DriverError> {
2113    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
2114    /// # let my_frag_code = [0u8; 1];
2115    /// # let my_vert_code = [0u8; 1];
2116    /// # let vert = Shader::new_vertex(my_vert_code.as_slice());
2117    /// # let frag = Shader::new_fragment(my_frag_code.as_slice());
2118    /// # let info = GraphicPipelineInfo::default();
2119    /// # let my_graphic_pipeline = Arc::new(GraphicPipeline::create(&device, info, [vert, frag])?);
2120    /// # let info = ImageInfo::image_2d(32, 32, vk::Format::R8G8B8A8_UNORM, vk::ImageUsageFlags::SAMPLED);
2121    /// # let swapchain_image = Image::create(&device, info)?;
2122    /// # let mut my_graph = RenderGraph::new();
2123    /// # let swapchain_image = my_graph.bind_node(swapchain_image);
2124    /// my_graph.begin_pass("draw a quad")
2125    ///         .bind_pipeline(&my_graphic_pipeline)
2126    ///         .store_color(0, swapchain_image)
2127    ///         .record_subpass(move |subpass, bindings| {
2128    ///             subpass.push_constants(&[42])
2129    ///                    .draw(6, 1, 0, 0);
2130    ///         });
2131    /// # Ok(()) }
2132    /// ```
2133    ///
2134    /// [gpuinfo.org]: https://vulkan.gpuinfo.org/displaydevicelimit.php?name=maxPushConstantsSize&platform=all
2135    pub fn push_constants(&self, data: &[u8]) -> &Self {
2136        self.push_constants_offset(0, data)
2137    }
2138
2139    /// Updates push constants starting at the given `offset`.
2140    ///
2141    /// Behaves similary to [`Draw::push_constants`] except that `offset` describes the position at
2142    /// which `data` updates the push constants of the currently bound pipeline. This may be used to
2143    /// update a subset or single field of previously set push constant data.
2144    ///
2145    /// # Device limitations
2146    ///
2147    /// See
2148    /// [`device.physical_device.props.limits.max_push_constants_size`](vk::PhysicalDeviceLimits)
2149    /// for the limits of the current device. You may also check [gpuinfo.org] for a listing of
2150    /// reported limits on other devices.
2151    ///
2152    /// # Examples
2153    ///
2154    /// Basic usage:
2155    ///
2156    /// ```
2157    /// # inline_spirv::inline_spirv!(r#"
2158    /// #version 450
2159    ///
2160    /// layout(push_constant) uniform PushConstants {
2161    ///     layout(offset = 0) uint some_val1;
2162    ///     layout(offset = 4) uint some_val2;
2163    /// } push_constants;
2164    ///
2165    /// void main()
2166    /// {
2167    ///     // TODO: Add code!
2168    /// }
2169    /// # "#, vert);
2170    /// ```
2171    ///
2172    /// ```no_run
2173    /// # use std::sync::Arc;
2174    /// # use ash::vk;
2175    /// # use screen_13::driver::DriverError;
2176    /// # use screen_13::driver::device::{Device, DeviceInfo};
2177    /// # use screen_13::driver::graphic::{GraphicPipeline, GraphicPipelineInfo};
2178    /// # use screen_13::driver::image::{Image, ImageInfo};
2179    /// # use screen_13::graph::RenderGraph;
2180    /// # use screen_13::driver::shader::Shader;
2181    /// # fn main() -> Result<(), DriverError> {
2182    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
2183    /// # let my_frag_code = [0u8; 1];
2184    /// # let my_vert_code = [0u8; 1];
2185    /// # let vert = Shader::new_vertex(my_vert_code.as_slice());
2186    /// # let frag = Shader::new_fragment(my_frag_code.as_slice());
2187    /// # let info = GraphicPipelineInfo::default();
2188    /// # let my_graphic_pipeline = Arc::new(GraphicPipeline::create(&device, info, [vert, frag])?);
2189    /// # let info = ImageInfo::image_2d(32, 32, vk::Format::R8G8B8A8_UNORM, vk::ImageUsageFlags::SAMPLED);
2190    /// # let swapchain_image = Image::create(&device, info)?;
2191    /// # let mut my_graph = RenderGraph::new();
2192    /// # let swapchain_image = my_graph.bind_node(swapchain_image);
2193    /// my_graph.begin_pass("draw a quad")
2194    ///         .bind_pipeline(&my_graphic_pipeline)
2195    ///         .store_color(0, swapchain_image)
2196    ///         .record_subpass(move |subpass, bindings| {
2197    ///             subpass.push_constants(&[0x00, 0x00])
2198    ///                    .draw(6, 1, 0, 0)
2199    ///                    .push_constants_offset(4, &[0xff])
2200    ///                    .draw(6, 1, 0, 0);
2201    ///         });
2202    /// # Ok(()) }
2203    /// ```
2204    ///
2205    /// [gpuinfo.org]: https://vulkan.gpuinfo.org/displaydevicelimit.php?name=maxPushConstantsSize&platform=all
2206    #[profiling::function]
2207    pub fn push_constants_offset(&self, offset: u32, data: &[u8]) -> &Self {
2208        for push_const in self.pipeline.push_constants.iter() {
2209            // Determine the range of the overall pipline push constants which overlap with `data`
2210            let push_const_end = push_const.offset + push_const.size;
2211            let data_end = offset + data.len() as u32;
2212            let end = data_end.min(push_const_end);
2213            let start = offset.max(push_const.offset);
2214
2215            if end > start {
2216                trace!(
2217                    "      push constants {:?} {}..{}",
2218                    push_const.stage_flags, start, end
2219                );
2220
2221                unsafe {
2222                    self.device.cmd_push_constants(
2223                        self.cmd_buf,
2224                        self.pipeline.layout,
2225                        push_const.stage_flags,
2226                        start,
2227                        &data[(start - offset) as usize..(end - offset) as usize],
2228                    );
2229                }
2230            }
2231        }
2232
2233        self
2234    }
2235
2236    /// Set scissor rectangle dynamically for a pass.
2237    #[profiling::function]
2238    pub fn set_scissor(&self, x: i32, y: i32, width: u32, height: u32) -> &Self {
2239        unsafe {
2240            self.device.cmd_set_scissor(
2241                self.cmd_buf,
2242                0,
2243                &[vk::Rect2D {
2244                    extent: vk::Extent2D { width, height },
2245                    offset: vk::Offset2D { x, y },
2246                }],
2247            );
2248        }
2249
2250        self
2251    }
2252
2253    /// Set scissor rectangles dynamically for a pass.
2254    #[profiling::function]
2255    pub fn set_scissors<S>(
2256        &self,
2257        first_scissor: u32,
2258        scissors: impl IntoIterator<Item = S>,
2259    ) -> &Self
2260    where
2261        S: Into<vk::Rect2D>,
2262    {
2263        thread_local! {
2264            static SCISSORS: RefCell<Vec<vk::Rect2D>> = Default::default();
2265        }
2266
2267        SCISSORS.with_borrow_mut(|scissors_vec| {
2268            scissors_vec.clear();
2269
2270            for scissor in scissors {
2271                scissors_vec.push(scissor.into());
2272            }
2273
2274            unsafe {
2275                self.device
2276                    .cmd_set_scissor(self.cmd_buf, first_scissor, scissors_vec.as_slice());
2277            }
2278        });
2279
2280        self
2281    }
2282
2283    /// Set the viewport dynamically for a pass.
2284    #[profiling::function]
2285    pub fn set_viewport(
2286        &self,
2287        x: f32,
2288        y: f32,
2289        width: f32,
2290        height: f32,
2291        depth: Range<f32>,
2292    ) -> &Self {
2293        unsafe {
2294            self.device.cmd_set_viewport(
2295                self.cmd_buf,
2296                0,
2297                &[vk::Viewport {
2298                    x,
2299                    y,
2300                    width,
2301                    height,
2302                    min_depth: depth.start,
2303                    max_depth: depth.end,
2304                }],
2305            );
2306        }
2307
2308        self
2309    }
2310
2311    /// Set the viewports dynamically for a pass.
2312    #[profiling::function]
2313    pub fn set_viewports<V>(
2314        &self,
2315        first_viewport: u32,
2316        viewports: impl IntoIterator<Item = V>,
2317    ) -> &Self
2318    where
2319        V: Into<vk::Viewport>,
2320    {
2321        thread_local! {
2322            static VIEWPORTS: RefCell<Vec<vk::Viewport>> = Default::default();
2323        }
2324
2325        VIEWPORTS.with_borrow_mut(|viewports_vec| {
2326            viewports_vec.clear();
2327
2328            for viewport in viewports {
2329                viewports_vec.push(viewport.into());
2330            }
2331
2332            unsafe {
2333                self.device.cmd_set_viewport(
2334                    self.cmd_buf,
2335                    first_viewport,
2336                    viewports_vec.as_slice(),
2337                );
2338            }
2339        });
2340
2341        self
2342    }
2343}
2344
2345/// A general render pass which may contain acceleration structure commands, general commands, or
2346/// have pipeline bound to then record commands specific to those pipeline types.
2347pub struct PassRef<'a> {
2348    pub(super) exec_idx: usize,
2349    pub(super) graph: &'a mut RenderGraph,
2350    pub(super) pass_idx: usize,
2351}
2352
2353impl<'a> PassRef<'a> {
2354    pub(super) fn new(graph: &'a mut RenderGraph, name: String) -> PassRef<'a> {
2355        let pass_idx = graph.passes.len();
2356        graph.passes.push(Pass {
2357            execs: vec![Default::default()], // We start off with a default execution!
2358            name,
2359        });
2360
2361        Self {
2362            exec_idx: 0,
2363            graph,
2364            pass_idx,
2365        }
2366    }
2367
2368    /// Informs the pass that the next recorded command buffer will read or write the given `node`
2369    /// using `access`.
2370    ///
2371    /// This function must be called for `node` before it is read or written within a `record`
2372    /// function. For general purpose access, see [`PassRef::read_node`] or [`PassRef::write_node`].
2373    pub fn access_node(mut self, node: impl Node + Information, access: AccessType) -> Self {
2374        self.access_node_mut(node, access);
2375
2376        self
2377    }
2378
2379    /// Informs the pass that the next recorded command buffer will read or write the given `node`
2380    /// using `access`.
2381    ///
2382    /// This function must be called for `node` before it is read or written within a `record`
2383    /// function. For general purpose access, see [`PassRef::read_node_mut`] or
2384    /// [`PassRef::write_node_mut`].
2385    pub fn access_node_mut(&mut self, node: impl Node + Information, access: AccessType) {
2386        self.assert_bound_graph_node(node);
2387
2388        let idx = node.index();
2389        let binding = &self.graph.bindings[idx];
2390
2391        let node_access_range = if let Some(buf) = binding.as_driver_buffer() {
2392            Subresource::Buffer((0..buf.info.size).into())
2393        } else if let Some(image) = binding.as_driver_image() {
2394            Subresource::Image(image.info.default_view_info().into())
2395        } else {
2396            Subresource::AccelerationStructure
2397        };
2398
2399        self.push_node_access(node, access, node_access_range);
2400    }
2401
2402    /// Informs the pass that the next recorded command buffer will read or write the `subresource`
2403    /// of `node` using `access`.
2404    ///
2405    /// This function must be called for `node` before it is read or written within a `record`
2406    /// function. For general purpose access, see [`PassRef::read_node`] or [`PassRef::write_node`].
2407    pub fn access_node_subrange<N>(
2408        mut self,
2409        node: N,
2410        access: AccessType,
2411        subresource: impl Into<N::Subresource>,
2412    ) -> Self
2413    where
2414        N: View,
2415    {
2416        self.access_node_subrange_mut(node, access, subresource);
2417
2418        self
2419    }
2420
2421    /// Informs the pass that the next recorded command buffer will read or write the `subresource`
2422    /// of `node` using `access`.
2423    ///
2424    /// This function must be called for `node` before it is read or written within a `record`
2425    /// function. For general purpose access, see [`PassRef::read_node`] or [`PassRef::write_node`].
2426    pub fn access_node_subrange_mut<N>(
2427        &mut self,
2428        node: N,
2429        access: AccessType,
2430        subresource: impl Into<N::Subresource>,
2431    ) where
2432        N: View,
2433    {
2434        self.push_node_access(node, access, subresource.into().into());
2435    }
2436
2437    fn as_mut(&mut self) -> &mut Pass {
2438        &mut self.graph.passes[self.pass_idx]
2439    }
2440
2441    fn as_ref(&self) -> &Pass {
2442        &self.graph.passes[self.pass_idx]
2443    }
2444
2445    fn assert_bound_graph_node(&self, node: impl Node) {
2446        let idx = node.index();
2447
2448        assert!(self.graph.bindings[idx].is_bound());
2449    }
2450
2451    /// Binds a Vulkan acceleration structure, buffer, or image to the graph associated with this
2452    /// pass.
2453    ///
2454    /// Bound nodes may be used in passes for pipeline and shader operations.
2455    pub fn bind_node<'b, B>(&'b mut self, binding: B) -> <B as Edge<RenderGraph>>::Result
2456    where
2457        B: Edge<RenderGraph>,
2458        B: Bind<&'b mut RenderGraph, <B as Edge<RenderGraph>>::Result>,
2459    {
2460        self.graph.bind_node(binding)
2461    }
2462
2463    /// Binds a [`ComputePipeline`], [`GraphicPipeline`], or [`RayTracePipeline`] to the current
2464    /// pass, allowing for strongly typed access to the related functions.
2465    pub fn bind_pipeline<B>(self, binding: B) -> <B as Edge<Self>>::Result
2466    where
2467        B: Edge<Self>,
2468        B: Bind<Self, <B as Edge<Self>>::Result>,
2469    {
2470        binding.bind(self)
2471    }
2472
2473    /// Returns information used to crate a node.
2474    pub fn node_info<N>(&self, node: N) -> <N as Information>::Info
2475    where
2476        N: Information,
2477    {
2478        node.get(self.graph)
2479    }
2480
2481    fn push_execute(
2482        &mut self,
2483        func: impl FnOnce(&Device, vk::CommandBuffer, Bindings<'_>) + Send + 'static,
2484    ) {
2485        let pass = self.as_mut();
2486        let exec = {
2487            let last_exec = pass.execs.last_mut().unwrap();
2488            last_exec.func = Some(ExecutionFunction(Box::new(func)));
2489
2490            Execution {
2491                pipeline: last_exec.pipeline.clone(),
2492                ..Default::default()
2493            }
2494        };
2495
2496        pass.execs.push(exec);
2497        self.exec_idx += 1;
2498    }
2499
2500    fn push_node_access(&mut self, node: impl Node, access: AccessType, subresource: Subresource) {
2501        let node_idx = node.index();
2502        self.assert_bound_graph_node(node);
2503
2504        let access = SubresourceAccess {
2505            access,
2506            subresource,
2507        };
2508        self.as_mut()
2509            .execs
2510            .last_mut()
2511            .unwrap()
2512            .accesses
2513            .entry(node_idx)
2514            .and_modify(|accesses| accesses.push(access))
2515            .or_insert(vec![access]);
2516    }
2517
2518    /// Informs the pass that the next recorded command buffer will read the given `node` using
2519    /// [`AccessType::AnyShaderReadSampledImageOrUniformTexelBuffer`].
2520    ///
2521    /// This function must be called for `node` before it is read within a `record` function. For
2522    /// more specific access, see [`PassRef::access_node`].
2523    pub fn read_node(mut self, node: impl Node + Information) -> Self {
2524        self.read_node_mut(node);
2525
2526        self
2527    }
2528
2529    /// Informs the pass that the next recorded command buffer will read the given `node` using
2530    /// [`AccessType::AnyShaderReadSampledImageOrUniformTexelBuffer`].
2531    ///
2532    /// This function must be called for `node` before it is read within a `record` function. For
2533    /// more specific access, see [`PassRef::access_node`].
2534    pub fn read_node_mut(&mut self, node: impl Node + Information) {
2535        self.access_node_mut(
2536            node,
2537            AccessType::AnyShaderReadSampledImageOrUniformTexelBuffer,
2538        );
2539    }
2540
2541    /// Begin recording an acceleration structure command buffer.
2542    ///
2543    /// This is the entry point for building and updating an [`AccelerationStructure`] instance.
2544    pub fn record_acceleration(
2545        mut self,
2546        func: impl FnOnce(Acceleration<'_>, Bindings<'_>) + Send + 'static,
2547    ) -> Self {
2548        self.push_execute(move |device, cmd_buf, bindings| {
2549            func(
2550                Acceleration {
2551                    bindings,
2552                    cmd_buf,
2553                    device,
2554                },
2555                bindings,
2556            );
2557        });
2558
2559        self
2560    }
2561
2562    /// Begin recording a general command buffer.
2563    ///
2564    /// The provided closure allows you to run any Vulkan code, or interoperate with other Vulkan
2565    /// code and interfaces.
2566    pub fn record_cmd_buf(
2567        mut self,
2568        func: impl FnOnce(&Device, vk::CommandBuffer, Bindings<'_>) + Send + 'static,
2569    ) -> Self {
2570        self.push_execute(func);
2571
2572        self
2573    }
2574
2575    /// Finalize the recording of this pass and return to the `RenderGraph` where you may record
2576    /// additional passes.
2577    pub fn submit_pass(self) -> &'a mut RenderGraph {
2578        // If nothing was done in this pass we can just ignore it
2579        if self.exec_idx == 0 {
2580            self.graph.passes.pop();
2581        }
2582
2583        self.graph
2584    }
2585
2586    /// Informs the pass that the next recorded command buffer will write the given `node` using
2587    /// [`AccessType::AnyShaderWrite`].
2588    ///
2589    /// This function must be called for `node` before it is written within a `record` function. For
2590    /// more specific access, see [`PassRef::access_node`].
2591    pub fn write_node(mut self, node: impl Node + Information) -> Self {
2592        self.write_node_mut(node);
2593
2594        self
2595    }
2596
2597    /// Informs the pass that the next recorded command buffer will write the given `node` using
2598    /// [`AccessType::AnyShaderWrite`].
2599    ///
2600    /// This function must be called for `node` before it is written within a `record` function. For
2601    /// more specific access, see [`PassRef::access_node`].
2602    pub fn write_node_mut(&mut self, node: impl Node + Information) {
2603        self.access_node_mut(node, AccessType::AnyShaderWrite);
2604    }
2605}
2606
2607/// A render pass which has been bound to a particular compute, graphic, or ray-trace pipeline.
2608pub struct PipelinePassRef<'a, T>
2609where
2610    T: Access,
2611{
2612    __: PhantomData<T>,
2613    pass: PassRef<'a>,
2614}
2615
2616impl<'a, T> PipelinePassRef<'a, T>
2617where
2618    T: Access,
2619{
2620    /// Informs the pass that the next recorded command buffer will read or write the given `node`
2621    /// at the specified shader descriptor using `access`.
2622    ///
2623    /// This function must be called for `node` before it is read or written within a `record`
2624    /// function. For general purpose access, see [`PipelinePassRef::read_descriptor`] or
2625    /// [`PipelinePassRef::write_descriptor`].
2626    pub fn access_descriptor<N>(
2627        self,
2628        descriptor: impl Into<Descriptor>,
2629        node: N,
2630        access: AccessType,
2631    ) -> Self
2632    where
2633        N: Information,
2634        N: View,
2635        ViewType: From<<N as View>::Information>,
2636        <N as View>::Information: From<<N as Information>::Info>,
2637        <N as View>::Subresource: From<<N as View>::Information>,
2638    {
2639        let view_info = node.get(self.pass.graph);
2640        self.access_descriptor_as(descriptor, node, access, view_info)
2641    }
2642
2643    /// Informs the pass that the next recorded command buffer will read or write the given `node`
2644    /// at the specified shader descriptor using `access`. The node will be interpreted using
2645    /// `view_info`.
2646    ///
2647    /// This function must be called for `node` before it is read or written within a `record`
2648    /// function. For general purpose access, see [`PipelinePassRef::read_descriptor_as`] or
2649    /// [`PipelinePassRef::write_descriptor_as`].
2650    pub fn access_descriptor_as<N>(
2651        self,
2652        descriptor: impl Into<Descriptor>,
2653        node: N,
2654        access: AccessType,
2655        view_info: impl Into<N::Information>,
2656    ) -> Self
2657    where
2658        N: View,
2659        <N as View>::Information: Into<ViewType>,
2660        <N as View>::Subresource: From<<N as View>::Information>,
2661    {
2662        let view_info = view_info.into();
2663        let subresource = <N as View>::Subresource::from(view_info);
2664
2665        self.access_descriptor_subrange(descriptor, node, access, view_info, subresource)
2666    }
2667
2668    /// Informs the pass that the next recorded command buffer will read or write the `subresource`
2669    /// of `node` at the specified shader descriptor using `access`. The node will be interpreted
2670    /// using `view_info`.
2671    ///
2672    /// This function must be called for `node` before it is read or written within a `record`
2673    /// function. For general purpose access, see [`PipelinePassRef::read_descriptor_subrange`] or
2674    /// [`PipelinePassRef::write_descriptor_subrange`].
2675    pub fn access_descriptor_subrange<N>(
2676        mut self,
2677        descriptor: impl Into<Descriptor>,
2678        node: N,
2679        access: AccessType,
2680        view_info: impl Into<N::Information>,
2681        subresource: impl Into<N::Subresource>,
2682    ) -> Self
2683    where
2684        N: View,
2685        <N as View>::Information: Into<ViewType>,
2686    {
2687        self.pass
2688            .push_node_access(node, access, subresource.into().into());
2689        self.push_node_view_bind(node, view_info.into(), descriptor.into());
2690
2691        self
2692    }
2693
2694    /// Informs the pass that the next recorded command buffer will read or write the given `node`
2695    /// using `access`.
2696    ///
2697    /// This function must be called for `node` before it is read or written within a `record`
2698    /// function. For general purpose access, see [`PipelinePassRef::read_node`] or
2699    /// [`PipelinePassRef::write_node`].
2700    pub fn access_node(mut self, node: impl Node + Information, access: AccessType) -> Self {
2701        self.access_node_mut(node, access);
2702
2703        self
2704    }
2705
2706    /// Informs the pass that the next recorded command buffer will read or write the given `node`
2707    /// using `access`.
2708    ///
2709    /// This function must be called for `node` before it is read or written within a `record`
2710    /// function. For general purpose access, see [`PipelinePassRef::read_node_mut`] or
2711    /// [`PipelinePassRef::write_node_mut`].
2712    pub fn access_node_mut(&mut self, node: impl Node + Information, access: AccessType) {
2713        self.pass.assert_bound_graph_node(node);
2714
2715        let idx = node.index();
2716        let binding = &self.pass.graph.bindings[idx];
2717
2718        let node_access_range = if let Some(buf) = binding.as_driver_buffer() {
2719            Subresource::Buffer((0..buf.info.size).into())
2720        } else if let Some(image) = binding.as_driver_image() {
2721            Subresource::Image(image.info.default_view_info().into())
2722        } else {
2723            Subresource::AccelerationStructure
2724        };
2725
2726        self.pass.push_node_access(node, access, node_access_range);
2727    }
2728
2729    /// Informs the pass that the next recorded command buffer will read or write the `subresource`
2730    /// of `node` using `access`.
2731    ///
2732    /// This function must be called for `node` before it is read or written within a `record`
2733    /// function. For general purpose access, see [`PipelinePassRef::read_node_subrange`] or
2734    /// [`PipelinePassRef::write_node_subrange`].
2735    pub fn access_node_subrange<N>(
2736        mut self,
2737        node: N,
2738        access: AccessType,
2739        subresource: impl Into<N::Subresource>,
2740    ) -> Self
2741    where
2742        N: View,
2743    {
2744        self.access_node_subrange_mut(node, access, subresource);
2745
2746        self
2747    }
2748
2749    /// Informs the pass that the next recorded command buffer will read or write the `subresource`
2750    /// of `node` using `access`.
2751    ///
2752    /// This function must be called for `node` before it is read or written within a `record`
2753    /// function. For general purpose access, see [`PipelinePassRef::read_node_subrange_mut`] or
2754    /// [`PipelinePassRef::write_node_subrange_mut`].
2755    pub fn access_node_subrange_mut<N>(
2756        &mut self,
2757        node: N,
2758        access: AccessType,
2759        subresource: impl Into<N::Subresource>,
2760    ) where
2761        N: View,
2762    {
2763        self.pass
2764            .push_node_access(node, access, subresource.into().into());
2765    }
2766
2767    /// Binds a Vulkan acceleration structure, buffer, or image to the graph associated with this
2768    /// pass.
2769    ///
2770    /// Bound nodes may be used in passes for pipeline and shader operations.
2771    pub fn bind_node<'b, B>(&'b mut self, binding: B) -> <B as Edge<RenderGraph>>::Result
2772    where
2773        B: Edge<RenderGraph>,
2774        B: Bind<&'b mut RenderGraph, <B as Edge<RenderGraph>>::Result>,
2775    {
2776        self.pass.graph.bind_node(binding)
2777    }
2778
2779    /// Returns information used to crate a node.
2780    pub fn node_info<N>(&self, node: N) -> <N as Information>::Info
2781    where
2782        N: Information,
2783    {
2784        node.get(self.pass.graph)
2785    }
2786
2787    fn push_node_view_bind(
2788        &mut self,
2789        node: impl Node,
2790        view_info: impl Into<ViewType>,
2791        binding: Descriptor,
2792    ) {
2793        let node_idx = node.index();
2794        self.pass.assert_bound_graph_node(node);
2795
2796        assert!(
2797            self.pass
2798                .as_mut()
2799                .execs
2800                .last_mut()
2801                .unwrap()
2802                .bindings
2803                .insert(binding, (node_idx, Some(view_info.into())))
2804                .is_none(),
2805            "descriptor {binding:?} has already been bound"
2806        );
2807    }
2808
2809    /// Informs the pass that the next recorded command buffer will read the given `node` at the
2810    /// specified shader descriptor.
2811    ///
2812    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
2813    ///
2814    /// This function must be called for `node` before it is read within a `record` function. For
2815    /// more specific access, see [`PipelinePassRef::access_descriptor`].
2816    pub fn read_descriptor<N>(self, descriptor: impl Into<Descriptor>, node: N) -> Self
2817    where
2818        N: Information,
2819        N: View,
2820        ViewType: From<<N as View>::Information>,
2821        <N as View>::Information: From<<N as Information>::Info>,
2822        <N as View>::Subresource: From<<N as View>::Information>,
2823    {
2824        let view_info = node.get(self.pass.graph);
2825        self.read_descriptor_as(descriptor, node, view_info)
2826    }
2827
2828    /// Informs the pass that the next recorded command buffer will read the given `node` at the
2829    /// specified shader descriptor. The node will be interpreted using `view_info`.
2830    ///
2831    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
2832    ///
2833    /// This function must be called for `node` before it is read within a `record` function. For
2834    /// more specific access, see [`PipelinePassRef::access_descriptor_as`].
2835    pub fn read_descriptor_as<N>(
2836        self,
2837        descriptor: impl Into<Descriptor>,
2838        node: N,
2839        view_info: impl Into<N::Information>,
2840    ) -> Self
2841    where
2842        N: View,
2843        <N as View>::Information: Into<ViewType>,
2844        <N as View>::Subresource: From<<N as View>::Information>,
2845    {
2846        let view_info = view_info.into();
2847        let subresource = <N as View>::Subresource::from(view_info);
2848
2849        self.read_descriptor_subrange(descriptor, node, view_info, subresource)
2850    }
2851
2852    /// Informs the pass that the next recorded command buffer will read the `subresource` of `node`
2853    /// at the specified shader descriptor. The node will be interpreted using `view_info`.
2854    ///
2855    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
2856    ///
2857    /// This function must be called for `node` before it is read within a `record` function. For
2858    /// more specific access, see [`PipelinePassRef::access_descriptor_subrange`].
2859    pub fn read_descriptor_subrange<N>(
2860        self,
2861        descriptor: impl Into<Descriptor>,
2862        node: N,
2863        view_info: impl Into<N::Information>,
2864        subresource: impl Into<N::Subresource>,
2865    ) -> Self
2866    where
2867        N: View,
2868        <N as View>::Information: Into<ViewType>,
2869    {
2870        let access = <T as Access>::DEFAULT_READ;
2871        self.access_descriptor_subrange(descriptor, node, access, view_info, subresource)
2872    }
2873
2874    /// Informs the pass that the next recorded command buffer will read the given `node`.
2875    ///
2876    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
2877    ///
2878    /// This function must be called for `node` before it is read within a `record` function. For
2879    /// more specific access, see [`PipelinePassRef::access_node`].
2880    pub fn read_node(mut self, node: impl Node + Information) -> Self {
2881        self.read_node_mut(node);
2882
2883        self
2884    }
2885
2886    /// Informs the pass that the next recorded command buffer will read the given `node`.
2887    ///
2888    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
2889    ///
2890    /// This function must be called for `node` before it is read within a `record` function. For
2891    /// more specific access, see [`PipelinePassRef::access_node_mut`].
2892    pub fn read_node_mut(&mut self, node: impl Node + Information) {
2893        let access = <T as Access>::DEFAULT_READ;
2894        self.access_node_mut(node, access);
2895    }
2896
2897    /// Informs the pass that the next recorded command buffer will read the `subresource` of
2898    /// `node`.
2899    ///
2900    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
2901    ///
2902    /// This function must be called for `node` before it is read within a `record` function. For
2903    /// more specific access, see [`PipelinePassRef::access_node_subrange`].
2904    pub fn read_node_subrange<N>(mut self, node: N, subresource: impl Into<N::Subresource>) -> Self
2905    where
2906        N: View,
2907    {
2908        self.read_node_subrange_mut(node, subresource);
2909
2910        self
2911    }
2912
2913    /// Informs the pass that the next recorded command buffer will read the `subresource` of
2914    /// `node`.
2915    ///
2916    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
2917    ///
2918    /// This function must be called for `node` before it is read within a `record` function. For
2919    /// more specific access, see [`PipelinePassRef::access_node_subrange_mut`].
2920    pub fn read_node_subrange_mut<N>(&mut self, node: N, subresource: impl Into<N::Subresource>)
2921    where
2922        N: View,
2923    {
2924        let access = <T as Access>::DEFAULT_READ;
2925        self.access_node_subrange_mut(node, access, subresource);
2926    }
2927
2928    /// Finalizes a pass and returns the render graph so that additional passes may be added.
2929    pub fn submit_pass(self) -> &'a mut RenderGraph {
2930        self.pass.submit_pass()
2931    }
2932
2933    /// Informs the pass that the next recorded command buffer will write the given `node` at the
2934    /// specified shader descriptor.
2935    ///
2936    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
2937    ///
2938    /// This function must be called for `node` before it is written within a `record` function. For
2939    /// more specific access, see [`PipelinePassRef::access_descriptor`].
2940    pub fn write_descriptor<N>(self, descriptor: impl Into<Descriptor>, node: N) -> Self
2941    where
2942        N: Information,
2943        N: View,
2944        <N as View>::Information: Into<ViewType>,
2945        <N as View>::Information: From<<N as Information>::Info>,
2946        <N as View>::Subresource: From<<N as View>::Information>,
2947    {
2948        let view_info = node.get(self.pass.graph);
2949        self.write_descriptor_as(descriptor, node, view_info)
2950    }
2951
2952    /// Informs the pass that the next recorded command buffer will write the given `node` at the
2953    /// specified shader descriptor. The node will be interpreted using `view_info`.
2954    ///
2955    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
2956    ///
2957    /// This function must be called for `node` before it is written within a `record` function. For
2958    /// more specific access, see [`PipelinePassRef::access_descriptor_as`].
2959    pub fn write_descriptor_as<N>(
2960        self,
2961        descriptor: impl Into<Descriptor>,
2962        node: N,
2963        view_info: impl Into<N::Information>,
2964    ) -> Self
2965    where
2966        N: View,
2967        <N as View>::Information: Into<ViewType>,
2968        <N as View>::Subresource: From<<N as View>::Information>,
2969    {
2970        let view_info = view_info.into();
2971        let subresource = <N as View>::Subresource::from(view_info);
2972
2973        self.write_descriptor_subrange(descriptor, node, view_info, subresource)
2974    }
2975
2976    /// Informs the pass that the next recorded command buffer will write the `subresource` of
2977    /// `node` at the specified shader descriptor. The node will be interpreted using `view_info`.
2978    ///
2979    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
2980    ///
2981    /// This function must be called for `node` before it is written within a `record` function. For
2982    /// more specific access, see [`PipelinePassRef::access_descriptor_subrange`].
2983    pub fn write_descriptor_subrange<N>(
2984        self,
2985        descriptor: impl Into<Descriptor>,
2986        node: N,
2987        view_info: impl Into<N::Information>,
2988        subresource: impl Into<N::Subresource>,
2989    ) -> Self
2990    where
2991        N: View,
2992        <N as View>::Information: Into<ViewType>,
2993    {
2994        let access = <T as Access>::DEFAULT_WRITE;
2995        self.access_descriptor_subrange(descriptor, node, access, view_info, subresource)
2996    }
2997
2998    /// Informs the pass that the next recorded command buffer will write the given `node`.
2999    ///
3000    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
3001    ///
3002    /// This function must be called for `node` before it is written within a `record` function. For
3003    /// more specific access, see [`PipelinePassRef::access_node`].
3004    pub fn write_node(mut self, node: impl Node + Information) -> Self {
3005        self.write_node_mut(node);
3006
3007        self
3008    }
3009
3010    /// Informs the pass that the next recorded command buffer will write the given `node`.
3011    ///
3012    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
3013    ///
3014    /// This function must be called for `node` before it is written within a `record` function. For
3015    /// more specific access, see [`PipelinePassRef::access_node_mut`].
3016    pub fn write_node_mut(&mut self, node: impl Node + Information) {
3017        let access = <T as Access>::DEFAULT_WRITE;
3018        self.access_node_mut(node, access);
3019    }
3020
3021    /// Informs the pass that the next recorded command buffer will write the `subresource` of
3022    /// `node`.
3023    ///
3024    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
3025    ///
3026    /// This function must be called for `node` before it is written within a `record` function. For
3027    /// more specific access, see [`PipelinePassRef::access_node_subrange`].
3028    pub fn write_node_subrange<N>(mut self, node: N, subresource: impl Into<N::Subresource>) -> Self
3029    where
3030        N: View,
3031    {
3032        self.write_node_subrange_mut(node, subresource);
3033
3034        self
3035    }
3036
3037    /// Informs the pass that the next recorded command buffer will write the `subresource` of
3038    /// `node`.
3039    ///
3040    /// The [`AccessType`] is inferred by the currently bound pipeline. See [`Access`] for details.
3041    ///
3042    /// This function must be called for `node` before it is written within a `record` function. For
3043    /// more specific access, see [`PipelinePassRef::access_node_subrange_mut`].
3044    pub fn write_node_subrange_mut<N>(&mut self, node: N, subresource: impl Into<N::Subresource>)
3045    where
3046        N: View,
3047    {
3048        let access = <T as Access>::DEFAULT_WRITE;
3049        self.access_node_subrange_mut(node, access, subresource);
3050    }
3051}
3052
3053impl PipelinePassRef<'_, ComputePipeline> {
3054    /// Begin recording a computing command buffer.
3055    pub fn record_compute(
3056        mut self,
3057        func: impl FnOnce(Compute<'_>, Bindings<'_>) + Send + 'static,
3058    ) -> Self {
3059        let pipeline = Arc::clone(
3060            self.pass
3061                .as_ref()
3062                .execs
3063                .last()
3064                .unwrap()
3065                .pipeline
3066                .as_ref()
3067                .unwrap()
3068                .unwrap_compute(),
3069        );
3070
3071        self.pass.push_execute(move |device, cmd_buf, bindings| {
3072            func(
3073                Compute {
3074                    bindings,
3075                    cmd_buf,
3076                    device,
3077                    pipeline,
3078                },
3079                bindings,
3080            );
3081        });
3082
3083        self
3084    }
3085}
3086
3087impl PipelinePassRef<'_, GraphicPipeline> {
3088    /// Specifies `VK_ATTACHMENT_LOAD_OP_DONT_CARE` for the render pass attachment, and loads an
3089    /// image into the framebuffer.
3090    pub fn attach_color(
3091        self,
3092        attachment_idx: AttachmentIndex,
3093        image: impl Into<AnyImageNode>,
3094    ) -> Self {
3095        let image: AnyImageNode = image.into();
3096        let image_info = image.get(self.pass.graph);
3097        let image_view_info: ImageViewInfo = image_info.into();
3098
3099        self.attach_color_as(attachment_idx, image, image_view_info)
3100    }
3101
3102    /// Specifies `VK_ATTACHMENT_LOAD_OP_DONT_CARE` for the render pass attachment, and loads an
3103    /// image into the framebuffer.
3104    pub fn attach_color_as(
3105        mut self,
3106        attachment_idx: AttachmentIndex,
3107        image: impl Into<AnyImageNode>,
3108        image_view_info: impl Into<ImageViewInfo>,
3109    ) -> Self {
3110        let image = image.into();
3111        let image_view_info = image_view_info.into();
3112        let node_idx = image.index();
3113        let (_, sample_count) = self.image_info(node_idx);
3114
3115        debug_assert!(
3116            !self
3117                .pass
3118                .as_ref()
3119                .execs
3120                .last()
3121                .unwrap()
3122                .color_clears
3123                .contains_key(&attachment_idx),
3124            "color attachment {attachment_idx} already attached via clear"
3125        );
3126        debug_assert!(
3127            !self
3128                .pass
3129                .as_ref()
3130                .execs
3131                .last()
3132                .unwrap()
3133                .color_loads
3134                .contains_key(&attachment_idx),
3135            "color attachment {attachment_idx} already attached via load"
3136        );
3137
3138        self.pass
3139            .as_mut()
3140            .execs
3141            .last_mut()
3142            .unwrap()
3143            .color_attachments
3144            .insert(
3145                attachment_idx,
3146                Attachment::new(image_view_info, sample_count, node_idx),
3147            );
3148
3149        debug_assert!(
3150            Attachment::are_compatible(
3151                self.pass
3152                    .as_ref()
3153                    .execs
3154                    .last()
3155                    .unwrap()
3156                    .color_resolves
3157                    .get(&attachment_idx)
3158                    .map(|(attachment, _)| *attachment),
3159                self.pass
3160                    .as_ref()
3161                    .execs
3162                    .last()
3163                    .unwrap()
3164                    .color_attachments
3165                    .get(&attachment_idx)
3166                    .copied()
3167            ),
3168            "color attachment {attachment_idx} incompatible with existing resolve"
3169        );
3170        debug_assert!(
3171            Attachment::are_compatible(
3172                self.pass
3173                    .as_ref()
3174                    .execs
3175                    .last()
3176                    .unwrap()
3177                    .color_stores
3178                    .get(&attachment_idx)
3179                    .copied(),
3180                self.pass
3181                    .as_ref()
3182                    .execs
3183                    .last()
3184                    .unwrap()
3185                    .color_attachments
3186                    .get(&attachment_idx)
3187                    .copied()
3188            ),
3189            "color attachment {attachment_idx} incompatible with existing store"
3190        );
3191
3192        self.pass.push_node_access(
3193            image,
3194            AccessType::ColorAttachmentWrite,
3195            Subresource::Image(image_view_info.into()),
3196        );
3197
3198        self
3199    }
3200
3201    /// Specifies `VK_ATTACHMENT_LOAD_OP_DONT_CARE` for the render pass attachment, and loads an
3202    /// image into the framebuffer.
3203    pub fn attach_depth_stencil(self, image: impl Into<AnyImageNode>) -> Self {
3204        let image: AnyImageNode = image.into();
3205        let image_info = image.get(self.pass.graph);
3206        let image_view_info: ImageViewInfo = image_info.into();
3207
3208        self.attach_depth_stencil_as(image, image_view_info)
3209    }
3210
3211    /// Specifies `VK_ATTACHMENT_LOAD_OP_DONT_CARE` for the render pass attachment, and loads an
3212    /// image into the framebuffer.
3213    pub fn attach_depth_stencil_as(
3214        mut self,
3215        image: impl Into<AnyImageNode>,
3216        image_view_info: impl Into<ImageViewInfo>,
3217    ) -> Self {
3218        let image = image.into();
3219        let image_view_info = image_view_info.into();
3220        let node_idx = image.index();
3221        let (_, sample_count) = self.image_info(node_idx);
3222
3223        debug_assert!(
3224            self.pass
3225                .as_ref()
3226                .execs
3227                .last()
3228                .unwrap()
3229                .depth_stencil_clear
3230                .is_none(),
3231            "depth/stencil attachment already attached via clear"
3232        );
3233        debug_assert!(
3234            self.pass
3235                .as_ref()
3236                .execs
3237                .last()
3238                .unwrap()
3239                .depth_stencil_load
3240                .is_none(),
3241            "depth/stencil attachment already attached via load"
3242        );
3243
3244        self.pass
3245            .as_mut()
3246            .execs
3247            .last_mut()
3248            .unwrap()
3249            .depth_stencil_attachment =
3250            Some(Attachment::new(image_view_info, sample_count, node_idx));
3251
3252        debug_assert!(
3253            Attachment::are_compatible(
3254                self.pass
3255                    .as_ref()
3256                    .execs
3257                    .last()
3258                    .unwrap()
3259                    .depth_stencil_resolve
3260                    .map(|(attachment, ..)| attachment),
3261                self.pass
3262                    .as_ref()
3263                    .execs
3264                    .last()
3265                    .unwrap()
3266                    .depth_stencil_attachment
3267            ),
3268            "depth/stencil attachment incompatible with existing resolve"
3269        );
3270        debug_assert!(
3271            Attachment::are_compatible(
3272                self.pass.as_ref().execs.last().unwrap().depth_stencil_store,
3273                self.pass
3274                    .as_ref()
3275                    .execs
3276                    .last()
3277                    .unwrap()
3278                    .depth_stencil_attachment
3279            ),
3280            "depth/stencil attachment incompatible with existing store"
3281        );
3282
3283        self.pass.push_node_access(
3284            image,
3285            if image_view_info
3286                .aspect_mask
3287                .contains(vk::ImageAspectFlags::DEPTH | vk::ImageAspectFlags::STENCIL)
3288            {
3289                AccessType::DepthStencilAttachmentWrite
3290            } else if image_view_info
3291                .aspect_mask
3292                .contains(vk::ImageAspectFlags::DEPTH)
3293            {
3294                AccessType::DepthAttachmentWriteStencilReadOnly
3295            } else {
3296                AccessType::StencilAttachmentWriteDepthReadOnly
3297            },
3298            Subresource::Image(image_view_info.into()),
3299        );
3300
3301        self
3302    }
3303
3304    /// Clears the render pass attachment of any existing data.
3305    pub fn clear_color(
3306        self,
3307        attachment_idx: AttachmentIndex,
3308        image: impl Into<AnyImageNode>,
3309    ) -> Self {
3310        self.clear_color_value(attachment_idx, image, [0.0, 0.0, 0.0, 0.0])
3311    }
3312
3313    /// Clears the render pass attachment of any existing data.
3314    pub fn clear_color_value(
3315        self,
3316        attachment_idx: AttachmentIndex,
3317        image: impl Into<AnyImageNode>,
3318        color: impl Into<ClearColorValue>,
3319    ) -> Self {
3320        let image: AnyImageNode = image.into();
3321        let image_info = image.get(self.pass.graph);
3322        let image_view_info: ImageViewInfo = image_info.into();
3323
3324        self.clear_color_value_as(attachment_idx, image, color, image_view_info)
3325    }
3326
3327    /// Clears the render pass attachment of any existing data.
3328    pub fn clear_color_value_as(
3329        mut self,
3330        attachment_idx: AttachmentIndex,
3331        image: impl Into<AnyImageNode>,
3332        color: impl Into<ClearColorValue>,
3333        image_view_info: impl Into<ImageViewInfo>,
3334    ) -> Self {
3335        let image = image.into();
3336        let image_view_info = image_view_info.into();
3337        let node_idx = image.index();
3338        let (_, sample_count) = self.image_info(node_idx);
3339
3340        let color = color.into();
3341
3342        debug_assert!(
3343            !self
3344                .pass
3345                .as_ref()
3346                .execs
3347                .last()
3348                .unwrap()
3349                .color_attachments
3350                .contains_key(&attachment_idx),
3351            "color attachment {attachment_idx} already attached"
3352        );
3353        debug_assert!(
3354            !self
3355                .pass
3356                .as_ref()
3357                .execs
3358                .last()
3359                .unwrap()
3360                .color_loads
3361                .contains_key(&attachment_idx),
3362            "color attachment {attachment_idx} already attached via load"
3363        );
3364
3365        self.pass
3366            .as_mut()
3367            .execs
3368            .last_mut()
3369            .unwrap()
3370            .color_clears
3371            .insert(
3372                attachment_idx,
3373                (
3374                    Attachment::new(image_view_info, sample_count, node_idx),
3375                    color,
3376                ),
3377            );
3378
3379        debug_assert!(
3380            Attachment::are_compatible(
3381                self.pass
3382                    .as_ref()
3383                    .execs
3384                    .last()
3385                    .unwrap()
3386                    .color_resolves
3387                    .get(&attachment_idx)
3388                    .map(|(attachment, _)| *attachment),
3389                self.pass
3390                    .as_ref()
3391                    .execs
3392                    .last()
3393                    .unwrap()
3394                    .color_clears
3395                    .get(&attachment_idx)
3396                    .map(|(attachment, _)| *attachment)
3397            ),
3398            "color attachment {attachment_idx} clear incompatible with existing resolve"
3399        );
3400        debug_assert!(
3401            Attachment::are_compatible(
3402                self.pass
3403                    .as_ref()
3404                    .execs
3405                    .last()
3406                    .unwrap()
3407                    .color_stores
3408                    .get(&attachment_idx)
3409                    .copied(),
3410                self.pass
3411                    .as_ref()
3412                    .execs
3413                    .last()
3414                    .unwrap()
3415                    .color_clears
3416                    .get(&attachment_idx)
3417                    .map(|(attachment, _)| *attachment)
3418            ),
3419            "color attachment {attachment_idx} clear incompatible with existing store"
3420        );
3421
3422        let mut image_access = AccessType::ColorAttachmentWrite;
3423        let image_range = image_view_info.into();
3424
3425        // Upgrade existing read access to read-write
3426        if let Some(accesses) = self
3427            .pass
3428            .as_mut()
3429            .execs
3430            .last_mut()
3431            .unwrap()
3432            .accesses
3433            .get_mut(&node_idx)
3434        {
3435            for SubresourceAccess {
3436                access,
3437                subresource,
3438            } in accesses
3439            {
3440                let access_image_range = *subresource.as_image().unwrap();
3441                if !image_subresource_range_intersects(access_image_range, image_range) {
3442                    continue;
3443                }
3444
3445                image_access = match *access {
3446                    AccessType::ColorAttachmentRead | AccessType::ColorAttachmentReadWrite => {
3447                        AccessType::ColorAttachmentReadWrite
3448                    }
3449                    AccessType::ColorAttachmentWrite => AccessType::ColorAttachmentWrite,
3450                    _ => continue,
3451                };
3452
3453                *access = image_access;
3454
3455                // If the clear access is a subset of the existing access range there is no need
3456                // to push a new access
3457                if image_subresource_range_contains(access_image_range, image_range) {
3458                    return self;
3459                }
3460            }
3461        }
3462
3463        self.pass
3464            .push_node_access(image, image_access, Subresource::Image(image_range));
3465
3466        self
3467    }
3468
3469    /// Clears the render pass attachment of any existing data.
3470    pub fn clear_depth_stencil(self, image: impl Into<AnyImageNode>) -> Self {
3471        self.clear_depth_stencil_value(image, 1.0, 0)
3472    }
3473
3474    /// Clears the render pass attachment of any existing data.
3475    pub fn clear_depth_stencil_value(
3476        self,
3477        image: impl Into<AnyImageNode>,
3478        depth: f32,
3479        stencil: u32,
3480    ) -> Self {
3481        let image: AnyImageNode = image.into();
3482        let image_info = image.get(self.pass.graph);
3483        let image_view_info: ImageViewInfo = image_info.into();
3484
3485        self.clear_depth_stencil_value_as(image, depth, stencil, image_view_info)
3486    }
3487
3488    /// Clears the render pass attachment of any existing data.
3489    pub fn clear_depth_stencil_value_as(
3490        mut self,
3491        image: impl Into<AnyImageNode>,
3492        depth: f32,
3493        stencil: u32,
3494        image_view_info: impl Into<ImageViewInfo>,
3495    ) -> Self {
3496        let image = image.into();
3497        let image_view_info = image_view_info.into();
3498        let node_idx = image.index();
3499        let (_, sample_count) = self.image_info(node_idx);
3500
3501        debug_assert!(
3502            self.pass
3503                .as_ref()
3504                .execs
3505                .last()
3506                .unwrap()
3507                .depth_stencil_attachment
3508                .is_none(),
3509            "depth/stencil attachment already attached"
3510        );
3511        debug_assert!(
3512            self.pass
3513                .as_ref()
3514                .execs
3515                .last()
3516                .unwrap()
3517                .depth_stencil_load
3518                .is_none(),
3519            "depth/stencil attachment already attached via load"
3520        );
3521
3522        self.pass
3523            .as_mut()
3524            .execs
3525            .last_mut()
3526            .unwrap()
3527            .depth_stencil_clear = Some((
3528            Attachment::new(image_view_info, sample_count, node_idx),
3529            vk::ClearDepthStencilValue { depth, stencil },
3530        ));
3531
3532        debug_assert!(
3533            Attachment::are_compatible(
3534                self.pass
3535                    .as_ref()
3536                    .execs
3537                    .last()
3538                    .unwrap()
3539                    .depth_stencil_resolve
3540                    .map(|(attachment, ..)| attachment),
3541                self.pass
3542                    .as_ref()
3543                    .execs
3544                    .last()
3545                    .unwrap()
3546                    .depth_stencil_clear
3547                    .map(|(attachment, _)| attachment)
3548            ),
3549            "depth/stencil attachment clear incompatible with existing resolve"
3550        );
3551        debug_assert!(
3552            Attachment::are_compatible(
3553                self.pass.as_ref().execs.last().unwrap().depth_stencil_store,
3554                self.pass
3555                    .as_ref()
3556                    .execs
3557                    .last()
3558                    .unwrap()
3559                    .depth_stencil_clear
3560                    .map(|(attachment, _)| attachment)
3561            ),
3562            "depth/stencil attachment clear incompatible with existing store"
3563        );
3564
3565        let mut image_access = if image_view_info
3566            .aspect_mask
3567            .contains(vk::ImageAspectFlags::DEPTH | vk::ImageAspectFlags::STENCIL)
3568        {
3569            AccessType::DepthStencilAttachmentWrite
3570        } else if image_view_info
3571            .aspect_mask
3572            .contains(vk::ImageAspectFlags::DEPTH)
3573        {
3574            AccessType::DepthAttachmentWriteStencilReadOnly
3575        } else {
3576            debug_assert!(
3577                image_view_info
3578                    .aspect_mask
3579                    .contains(vk::ImageAspectFlags::STENCIL)
3580            );
3581
3582            AccessType::StencilAttachmentWriteDepthReadOnly
3583        };
3584        let image_range = image_view_info.into();
3585
3586        // Upgrade existing read access to read-write
3587        if let Some(accesses) = self
3588            .pass
3589            .as_mut()
3590            .execs
3591            .last_mut()
3592            .unwrap()
3593            .accesses
3594            .get_mut(&node_idx)
3595        {
3596            for SubresourceAccess {
3597                access,
3598                subresource,
3599            } in accesses
3600            {
3601                let access_image_range = *subresource.as_image().unwrap();
3602                if !image_subresource_range_intersects(access_image_range, image_range) {
3603                    continue;
3604                }
3605
3606                image_access = match *access {
3607                    AccessType::DepthAttachmentWriteStencilReadOnly => {
3608                        if image_view_info
3609                            .aspect_mask
3610                            .contains(vk::ImageAspectFlags::STENCIL)
3611                        {
3612                            AccessType::DepthStencilAttachmentReadWrite
3613                        } else {
3614                            AccessType::DepthAttachmentWriteStencilReadOnly
3615                        }
3616                    }
3617                    AccessType::DepthStencilAttachmentRead => {
3618                        if !image_view_info
3619                            .aspect_mask
3620                            .contains(vk::ImageAspectFlags::DEPTH)
3621                        {
3622                            AccessType::StencilAttachmentWriteDepthReadOnly
3623                        } else {
3624                            AccessType::DepthAttachmentWriteStencilReadOnly
3625                        }
3626                    }
3627                    AccessType::DepthStencilAttachmentWrite => {
3628                        AccessType::DepthStencilAttachmentWrite
3629                    }
3630                    AccessType::StencilAttachmentWriteDepthReadOnly => {
3631                        if image_view_info
3632                            .aspect_mask
3633                            .contains(vk::ImageAspectFlags::DEPTH)
3634                        {
3635                            AccessType::DepthStencilAttachmentReadWrite
3636                        } else {
3637                            AccessType::StencilAttachmentWriteDepthReadOnly
3638                        }
3639                    }
3640                    _ => continue,
3641                };
3642
3643                *access = image_access;
3644
3645                // If the clear access is a subset of the existing access range there is no need
3646                // to push a new access
3647                if image_subresource_range_contains(access_image_range, image_range) {
3648                    return self;
3649                }
3650            }
3651        }
3652
3653        self.pass
3654            .push_node_access(image, image_access, Subresource::Image(image_range));
3655
3656        self
3657    }
3658
3659    fn image_info(&self, node_idx: NodeIndex) -> (vk::Format, SampleCount) {
3660        let image_info = self.pass.graph.bindings[node_idx]
3661            .as_driver_image()
3662            .unwrap()
3663            .info;
3664
3665        (image_info.fmt, image_info.sample_count)
3666    }
3667
3668    /// Specifies `VK_ATTACHMENT_LOAD_OP_LOAD` for the render pass attachment, and loads an image
3669    /// into the framebuffer.
3670    pub fn load_color(
3671        self,
3672        attachment_idx: AttachmentIndex,
3673        image: impl Into<AnyImageNode>,
3674    ) -> Self {
3675        let image: AnyImageNode = image.into();
3676        let image_info = image.get(self.pass.graph);
3677        let image_view_info: ImageViewInfo = image_info.into();
3678
3679        self.load_color_as(attachment_idx, image, image_view_info)
3680    }
3681
3682    /// Specifies `VK_ATTACHMENT_LOAD_OP_LOAD` for the render pass attachment, and loads an image
3683    /// into the framebuffer.
3684    pub fn load_color_as(
3685        mut self,
3686        attachment_idx: AttachmentIndex,
3687        image: impl Into<AnyImageNode>,
3688        image_view_info: impl Into<ImageViewInfo>,
3689    ) -> Self {
3690        let image = image.into();
3691        let image_view_info = image_view_info.into();
3692        let node_idx = image.index();
3693        let (_, sample_count) = self.image_info(node_idx);
3694
3695        debug_assert!(
3696            !self
3697                .pass
3698                .as_ref()
3699                .execs
3700                .last()
3701                .unwrap()
3702                .color_attachments
3703                .contains_key(&attachment_idx),
3704            "color attachment {attachment_idx} already attached"
3705        );
3706        debug_assert!(
3707            !self
3708                .pass
3709                .as_ref()
3710                .execs
3711                .last()
3712                .unwrap()
3713                .color_clears
3714                .contains_key(&attachment_idx),
3715            "color attachment {attachment_idx} already attached via clear"
3716        );
3717
3718        self.pass
3719            .as_mut()
3720            .execs
3721            .last_mut()
3722            .unwrap()
3723            .color_loads
3724            .insert(
3725                attachment_idx,
3726                Attachment::new(image_view_info, sample_count, node_idx),
3727            );
3728
3729        debug_assert!(
3730            Attachment::are_compatible(
3731                self.pass
3732                    .as_ref()
3733                    .execs
3734                    .last()
3735                    .unwrap()
3736                    .color_resolves
3737                    .get(&attachment_idx)
3738                    .map(|(attachment, _)| *attachment),
3739                self.pass
3740                    .as_ref()
3741                    .execs
3742                    .last()
3743                    .unwrap()
3744                    .color_loads
3745                    .get(&attachment_idx)
3746                    .copied()
3747            ),
3748            "color attachment {attachment_idx} load incompatible with existing resolve"
3749        );
3750        debug_assert!(
3751            Attachment::are_compatible(
3752                self.pass
3753                    .as_ref()
3754                    .execs
3755                    .last()
3756                    .unwrap()
3757                    .color_stores
3758                    .get(&attachment_idx)
3759                    .copied(),
3760                self.pass
3761                    .as_ref()
3762                    .execs
3763                    .last()
3764                    .unwrap()
3765                    .color_loads
3766                    .get(&attachment_idx)
3767                    .copied()
3768            ),
3769            "color attachment {attachment_idx} load incompatible with existing store"
3770        );
3771
3772        let mut image_access = AccessType::ColorAttachmentRead;
3773        let image_range = image_view_info.into();
3774
3775        // Upgrade existing write access to read-write
3776        if let Some(accesses) = self
3777            .pass
3778            .as_mut()
3779            .execs
3780            .last_mut()
3781            .unwrap()
3782            .accesses
3783            .get_mut(&node_idx)
3784        {
3785            for SubresourceAccess {
3786                access,
3787                subresource,
3788            } in accesses
3789            {
3790                let access_image_range = *subresource.as_image().unwrap();
3791                if !image_subresource_range_intersects(access_image_range, image_range) {
3792                    continue;
3793                }
3794
3795                image_access = match *access {
3796                    AccessType::ColorAttachmentRead => AccessType::ColorAttachmentRead,
3797                    AccessType::ColorAttachmentReadWrite | AccessType::ColorAttachmentWrite => {
3798                        AccessType::ColorAttachmentReadWrite
3799                    }
3800                    _ => continue,
3801                };
3802
3803                *access = image_access;
3804
3805                // If the load access is a subset of the existing access range there is no need
3806                // to push a new access
3807                if image_subresource_range_contains(access_image_range, image_range) {
3808                    return self;
3809                }
3810            }
3811        }
3812
3813        self.pass
3814            .push_node_access(image, image_access, Subresource::Image(image_range));
3815
3816        self
3817    }
3818
3819    /// Specifies `VK_ATTACHMENT_LOAD_OP_LOAD` for the render pass attachment, and loads an image
3820    /// into the framebuffer.
3821    pub fn load_depth_stencil(self, image: impl Into<AnyImageNode>) -> Self {
3822        let image: AnyImageNode = image.into();
3823        let image_info = image.get(self.pass.graph);
3824        let image_view_info: ImageViewInfo = image_info.into();
3825
3826        self.load_depth_stencil_as(image, image_view_info)
3827    }
3828
3829    /// Specifies `VK_ATTACHMENT_LOAD_OP_LOAD` for the render pass attachment, and loads an image
3830    /// into the framebuffer.
3831    pub fn load_depth_stencil_as(
3832        mut self,
3833        image: impl Into<AnyImageNode>,
3834        image_view_info: impl Into<ImageViewInfo>,
3835    ) -> Self {
3836        let image = image.into();
3837        let image_view_info = image_view_info.into();
3838        let node_idx = image.index();
3839        let (_, sample_count) = self.image_info(node_idx);
3840
3841        debug_assert!(
3842            self.pass
3843                .as_ref()
3844                .execs
3845                .last()
3846                .unwrap()
3847                .depth_stencil_attachment
3848                .is_none(),
3849            "depth/stencil attachment already attached"
3850        );
3851        debug_assert!(
3852            self.pass
3853                .as_ref()
3854                .execs
3855                .last()
3856                .unwrap()
3857                .depth_stencil_clear
3858                .is_none(),
3859            "depth/stencil attachment already attached via clear"
3860        );
3861
3862        self.pass
3863            .as_mut()
3864            .execs
3865            .last_mut()
3866            .unwrap()
3867            .depth_stencil_load = Some(Attachment::new(image_view_info, sample_count, node_idx));
3868
3869        debug_assert!(
3870            Attachment::are_compatible(
3871                self.pass
3872                    .as_ref()
3873                    .execs
3874                    .last()
3875                    .unwrap()
3876                    .depth_stencil_resolve
3877                    .map(|(attachment, ..)| attachment),
3878                self.pass.as_ref().execs.last().unwrap().depth_stencil_load
3879            ),
3880            "depth/stencil attachment load incompatible with existing resolve"
3881        );
3882        debug_assert!(
3883            Attachment::are_compatible(
3884                self.pass.as_ref().execs.last().unwrap().depth_stencil_store,
3885                self.pass.as_ref().execs.last().unwrap().depth_stencil_load
3886            ),
3887            "depth/stencil attachment load incompatible with existing store"
3888        );
3889
3890        let mut image_access = AccessType::DepthStencilAttachmentRead;
3891        let image_range = image_view_info.into();
3892
3893        // Upgrade existing write access to read-write
3894        if let Some(accesses) = self
3895            .pass
3896            .as_mut()
3897            .execs
3898            .last_mut()
3899            .unwrap()
3900            .accesses
3901            .get_mut(&node_idx)
3902        {
3903            for SubresourceAccess {
3904                access,
3905                subresource,
3906            } in accesses
3907            {
3908                let access_image_range = *subresource.as_image().unwrap();
3909                if !image_subresource_range_intersects(access_image_range, image_range) {
3910                    continue;
3911                }
3912
3913                image_access = match *access {
3914                    AccessType::DepthAttachmentWriteStencilReadOnly => {
3915                        AccessType::DepthAttachmentWriteStencilReadOnly
3916                    }
3917                    AccessType::DepthStencilAttachmentRead => {
3918                        AccessType::DepthStencilAttachmentRead
3919                    }
3920                    AccessType::DepthStencilAttachmentWrite => {
3921                        AccessType::DepthStencilAttachmentReadWrite
3922                    }
3923                    AccessType::StencilAttachmentWriteDepthReadOnly => {
3924                        AccessType::StencilAttachmentWriteDepthReadOnly
3925                    }
3926                    _ => continue,
3927                };
3928
3929                *access = image_access;
3930
3931                // If the load access is a subset of the existing access range there is no need
3932                // to push a new access
3933                if image_subresource_range_contains(access_image_range, image_range) {
3934                    return self;
3935                }
3936            }
3937        }
3938
3939        self.pass
3940            .push_node_access(image, image_access, Subresource::Image(image_range));
3941
3942        self
3943    }
3944
3945    /// Begin recording a graphics command buffer.
3946    pub fn record_subpass(
3947        mut self,
3948        func: impl FnOnce(Draw<'_>, Bindings<'_>) + Send + 'static,
3949    ) -> Self {
3950        let pipeline = Arc::clone(
3951            self.pass
3952                .as_ref()
3953                .execs
3954                .last()
3955                .unwrap()
3956                .pipeline
3957                .as_ref()
3958                .unwrap()
3959                .unwrap_graphic(),
3960        );
3961
3962        self.pass.push_execute(move |device, cmd_buf, bindings| {
3963            func(
3964                Draw {
3965                    bindings,
3966                    cmd_buf,
3967                    device,
3968                    pipeline,
3969                },
3970                bindings,
3971            );
3972        });
3973
3974        self
3975    }
3976
3977    /// Resolves a multisample framebuffer to a non-multisample image for the render pass
3978    /// attachment.
3979    pub fn resolve_color(
3980        self,
3981        src_attachment_idx: AttachmentIndex,
3982        dst_attachment_idx: AttachmentIndex,
3983        image: impl Into<AnyImageNode>,
3984    ) -> Self {
3985        let image: AnyImageNode = image.into();
3986        let image_info = image.get(self.pass.graph);
3987        let image_view_info: ImageViewInfo = image_info.into();
3988
3989        self.resolve_color_as(
3990            src_attachment_idx,
3991            dst_attachment_idx,
3992            image,
3993            image_view_info,
3994        )
3995    }
3996
3997    /// Resolves a multisample framebuffer to a non-multisample image for the render pass
3998    /// attachment.
3999    pub fn resolve_color_as(
4000        mut self,
4001        src_attachment_idx: AttachmentIndex,
4002        dst_attachment_idx: AttachmentIndex,
4003        image: impl Into<AnyImageNode>,
4004        image_view_info: impl Into<ImageViewInfo>,
4005    ) -> Self {
4006        let image = image.into();
4007        let image_view_info = image_view_info.into();
4008        let node_idx = image.index();
4009        let (_, sample_count) = self.image_info(node_idx);
4010
4011        self.pass
4012            .as_mut()
4013            .execs
4014            .last_mut()
4015            .unwrap()
4016            .color_resolves
4017            .insert(
4018                dst_attachment_idx,
4019                (
4020                    Attachment::new(image_view_info, sample_count, node_idx),
4021                    src_attachment_idx,
4022                ),
4023            );
4024
4025        debug_assert!(
4026            Attachment::are_compatible(
4027                self.pass
4028                    .as_ref()
4029                    .execs
4030                    .last()
4031                    .unwrap()
4032                    .color_attachments
4033                    .get(&dst_attachment_idx)
4034                    .copied(),
4035                self.pass
4036                    .as_ref()
4037                    .execs
4038                    .last()
4039                    .unwrap()
4040                    .color_resolves
4041                    .get(&dst_attachment_idx)
4042                    .map(|(attachment, _)| *attachment)
4043            ),
4044            "color attachment {dst_attachment_idx} resolve incompatible with existing attachment"
4045        );
4046        debug_assert!(
4047            Attachment::are_compatible(
4048                self.pass
4049                    .as_ref()
4050                    .execs
4051                    .last()
4052                    .unwrap()
4053                    .color_clears
4054                    .get(&dst_attachment_idx)
4055                    .map(|(attachment, _)| *attachment),
4056                self.pass
4057                    .as_ref()
4058                    .execs
4059                    .last()
4060                    .unwrap()
4061                    .color_resolves
4062                    .get(&dst_attachment_idx)
4063                    .map(|(attachment, _)| *attachment)
4064            ),
4065            "color attachment {dst_attachment_idx} resolve incompatible with existing clear"
4066        );
4067        debug_assert!(
4068            Attachment::are_compatible(
4069                self.pass
4070                    .as_ref()
4071                    .execs
4072                    .last()
4073                    .unwrap()
4074                    .color_loads
4075                    .get(&dst_attachment_idx)
4076                    .copied(),
4077                self.pass
4078                    .as_ref()
4079                    .execs
4080                    .last()
4081                    .unwrap()
4082                    .color_resolves
4083                    .get(&dst_attachment_idx)
4084                    .map(|(attachment, _)| *attachment)
4085            ),
4086            "color attachment {dst_attachment_idx} resolve incompatible with existing load"
4087        );
4088
4089        let mut image_access = AccessType::ColorAttachmentWrite;
4090        let image_range = image_view_info.into();
4091
4092        // Upgrade existing read access to read-write
4093        if let Some(accesses) = self
4094            .pass
4095            .as_mut()
4096            .execs
4097            .last_mut()
4098            .unwrap()
4099            .accesses
4100            .get_mut(&node_idx)
4101        {
4102            for SubresourceAccess {
4103                access,
4104                subresource,
4105            } in accesses
4106            {
4107                let access_image_range = *subresource.as_image().unwrap();
4108                if !image_subresource_range_intersects(access_image_range, image_range) {
4109                    continue;
4110                }
4111
4112                image_access = match *access {
4113                    AccessType::ColorAttachmentRead | AccessType::ColorAttachmentReadWrite => {
4114                        AccessType::ColorAttachmentReadWrite
4115                    }
4116                    AccessType::ColorAttachmentWrite => AccessType::ColorAttachmentWrite,
4117                    _ => continue,
4118                };
4119
4120                *access = image_access;
4121
4122                // If the resolve access is a subset of the existing access range there is no need
4123                // to push a new access
4124                if image_subresource_range_contains(access_image_range, image_range) {
4125                    return self;
4126                }
4127            }
4128        }
4129
4130        self.pass
4131            .push_node_access(image, image_access, Subresource::Image(image_range));
4132
4133        self
4134    }
4135
4136    /// Resolves a multisample framebuffer to a non-multisample image for the render pass
4137    /// attachment.
4138    pub fn resolve_depth_stencil(
4139        self,
4140        dst_attachment_idx: AttachmentIndex,
4141        image: impl Into<AnyImageNode>,
4142        depth_mode: Option<ResolveMode>,
4143        stencil_mode: Option<ResolveMode>,
4144    ) -> Self {
4145        let image: AnyImageNode = image.into();
4146        let image_info = image.get(self.pass.graph);
4147        let image_view_info: ImageViewInfo = image_info.into();
4148
4149        self.resolve_depth_stencil_as(
4150            dst_attachment_idx,
4151            image,
4152            image_view_info,
4153            depth_mode,
4154            stencil_mode,
4155        )
4156    }
4157
4158    /// Resolves a multisample framebuffer to a non-multisample image for the render pass
4159    /// attachment.
4160    pub fn resolve_depth_stencil_as(
4161        mut self,
4162        dst_attachment_idx: AttachmentIndex,
4163        image: impl Into<AnyImageNode>,
4164        image_view_info: impl Into<ImageViewInfo>,
4165        depth_mode: Option<ResolveMode>,
4166        stencil_mode: Option<ResolveMode>,
4167    ) -> Self {
4168        let image = image.into();
4169        let image_view_info = image_view_info.into();
4170        let node_idx = image.index();
4171        let (_, sample_count) = self.image_info(node_idx);
4172
4173        self.pass
4174            .as_mut()
4175            .execs
4176            .last_mut()
4177            .unwrap()
4178            .depth_stencil_resolve = Some((
4179            Attachment::new(image_view_info, sample_count, node_idx),
4180            dst_attachment_idx,
4181            depth_mode,
4182            stencil_mode,
4183        ));
4184
4185        let mut image_access = if image_view_info
4186            .aspect_mask
4187            .contains(vk::ImageAspectFlags::DEPTH | vk::ImageAspectFlags::STENCIL)
4188        {
4189            AccessType::DepthStencilAttachmentWrite
4190        } else if image_view_info
4191            .aspect_mask
4192            .contains(vk::ImageAspectFlags::DEPTH)
4193        {
4194            AccessType::DepthAttachmentWriteStencilReadOnly
4195        } else {
4196            debug_assert!(
4197                image_view_info
4198                    .aspect_mask
4199                    .contains(vk::ImageAspectFlags::STENCIL)
4200            );
4201
4202            AccessType::StencilAttachmentWriteDepthReadOnly
4203        };
4204        let image_range = image_view_info.into();
4205
4206        // Upgrade existing read access to read-write
4207        if let Some(accesses) = self
4208            .pass
4209            .as_mut()
4210            .execs
4211            .last_mut()
4212            .unwrap()
4213            .accesses
4214            .get_mut(&node_idx)
4215        {
4216            for SubresourceAccess {
4217                access,
4218                subresource,
4219            } in accesses
4220            {
4221                let access_image_range = *subresource.as_image().unwrap();
4222                if !image_subresource_range_intersects(access_image_range, image_range) {
4223                    continue;
4224                }
4225
4226                image_access = match *access {
4227                    AccessType::DepthAttachmentWriteStencilReadOnly => {
4228                        if image_view_info
4229                            .aspect_mask
4230                            .contains(vk::ImageAspectFlags::STENCIL)
4231                        {
4232                            AccessType::DepthStencilAttachmentReadWrite
4233                        } else {
4234                            AccessType::DepthAttachmentWriteStencilReadOnly
4235                        }
4236                    }
4237                    AccessType::DepthStencilAttachmentRead => {
4238                        if !image_view_info
4239                            .aspect_mask
4240                            .contains(vk::ImageAspectFlags::DEPTH)
4241                        {
4242                            AccessType::StencilAttachmentWriteDepthReadOnly
4243                        } else {
4244                            AccessType::DepthStencilAttachmentReadWrite
4245                        }
4246                    }
4247                    AccessType::DepthStencilAttachmentWrite => {
4248                        AccessType::DepthStencilAttachmentWrite
4249                    }
4250                    AccessType::StencilAttachmentWriteDepthReadOnly => {
4251                        if image_view_info
4252                            .aspect_mask
4253                            .contains(vk::ImageAspectFlags::DEPTH)
4254                        {
4255                            AccessType::DepthStencilAttachmentReadWrite
4256                        } else {
4257                            AccessType::StencilAttachmentWriteDepthReadOnly
4258                        }
4259                    }
4260                    _ => continue,
4261                };
4262
4263                *access = image_access;
4264
4265                // If the resolve access is a subset of the existing access range there is no need
4266                // to push a new access
4267                if image_subresource_range_contains(access_image_range, image_range) {
4268                    return self;
4269                }
4270            }
4271        }
4272
4273        self.pass
4274            .push_node_access(image, image_access, Subresource::Image(image_range));
4275
4276        self
4277    }
4278
4279    /// Sets a particular depth/stencil mode.
4280    pub fn set_depth_stencil(mut self, depth_stencil: DepthStencilMode) -> Self {
4281        let pass = self.pass.as_mut();
4282        let exec = pass.execs.last_mut().unwrap();
4283
4284        assert!(exec.depth_stencil.is_none());
4285
4286        exec.depth_stencil = Some(depth_stencil);
4287
4288        self
4289    }
4290
4291    /// Sets multiview view and correlation masks.
4292    ///
4293    /// See [`VkRenderPassMultiviewCreateInfo`](https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkRenderPassMultiviewCreateInfo.html#_description).
4294    pub fn set_multiview(mut self, view_mask: u32, correlated_view_mask: u32) -> Self {
4295        let pass = self.pass.as_mut();
4296        let exec = pass.execs.last_mut().unwrap();
4297
4298        exec.correlated_view_mask = correlated_view_mask;
4299        exec.view_mask = view_mask;
4300
4301        self
4302    }
4303
4304    /// Sets the [`renderArea`](https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/VkRenderPassBeginInfo.html#_c_specification)
4305    /// field when beginning a render pass.
4306    ///
4307    /// NOTE: Setting this value will cause the viewport and scissor to be unset, which is not the default
4308    /// behavior. When this value is set you should call `set_viewport` and `set_scissor` on the subpass.
4309    ///
4310    /// If not set, this value defaults to the first loaded, resolved, or stored attachment dimensions and
4311    /// sets the viewport and scissor to the same values, with a `0..1` depth if not specified by
4312    /// `set_depth_stencil`.
4313    pub fn set_render_area(mut self, x: i32, y: i32, width: u32, height: u32) -> Self {
4314        self.pass.as_mut().execs.last_mut().unwrap().render_area = Some(Area {
4315            height,
4316            width,
4317            x,
4318            y,
4319        });
4320
4321        self
4322    }
4323
4324    /// Specifies `VK_ATTACHMENT_STORE_OP_STORE` for the render pass attachment, and stores the
4325    /// rendered pixels into an image.
4326    pub fn store_color(
4327        self,
4328        attachment_idx: AttachmentIndex,
4329        image: impl Into<AnyImageNode>,
4330    ) -> Self {
4331        let image: AnyImageNode = image.into();
4332        let image_info = image.get(self.pass.graph);
4333        let image_view_info: ImageViewInfo = image_info.into();
4334
4335        self.store_color_as(attachment_idx, image, image_view_info)
4336    }
4337
4338    /// Specifies `VK_ATTACHMENT_STORE_OP_STORE` for the render pass attachment, and stores the
4339    /// rendered pixels into an image.
4340    pub fn store_color_as(
4341        mut self,
4342        attachment_idx: AttachmentIndex,
4343        image: impl Into<AnyImageNode>,
4344        image_view_info: impl Into<ImageViewInfo>,
4345    ) -> Self {
4346        let image = image.into();
4347        let image_view_info = image_view_info.into();
4348        let node_idx = image.index();
4349        let (_, sample_count) = self.image_info(node_idx);
4350
4351        self.pass
4352            .as_mut()
4353            .execs
4354            .last_mut()
4355            .unwrap()
4356            .color_stores
4357            .insert(
4358                attachment_idx,
4359                Attachment::new(image_view_info, sample_count, node_idx),
4360            );
4361
4362        debug_assert!(
4363            Attachment::are_compatible(
4364                self.pass
4365                    .as_ref()
4366                    .execs
4367                    .last()
4368                    .unwrap()
4369                    .color_attachments
4370                    .get(&attachment_idx)
4371                    .copied(),
4372                self.pass
4373                    .as_ref()
4374                    .execs
4375                    .last()
4376                    .unwrap()
4377                    .color_stores
4378                    .get(&attachment_idx)
4379                    .copied()
4380            ),
4381            "color attachment {attachment_idx} store incompatible with existing attachment"
4382        );
4383        debug_assert!(
4384            Attachment::are_compatible(
4385                self.pass
4386                    .as_ref()
4387                    .execs
4388                    .last()
4389                    .unwrap()
4390                    .color_clears
4391                    .get(&attachment_idx)
4392                    .map(|(attachment, _)| *attachment),
4393                self.pass
4394                    .as_ref()
4395                    .execs
4396                    .last()
4397                    .unwrap()
4398                    .color_stores
4399                    .get(&attachment_idx)
4400                    .copied()
4401            ),
4402            "color attachment {attachment_idx} store incompatible with existing clear"
4403        );
4404        debug_assert!(
4405            Attachment::are_compatible(
4406                self.pass
4407                    .as_ref()
4408                    .execs
4409                    .last()
4410                    .unwrap()
4411                    .color_loads
4412                    .get(&attachment_idx)
4413                    .copied(),
4414                self.pass
4415                    .as_ref()
4416                    .execs
4417                    .last()
4418                    .unwrap()
4419                    .color_stores
4420                    .get(&attachment_idx)
4421                    .copied()
4422            ),
4423            "color attachment {attachment_idx} store incompatible with existing load"
4424        );
4425
4426        let mut image_access = AccessType::ColorAttachmentWrite;
4427        let image_range = image_view_info.into();
4428
4429        // Upgrade existing read access to read-write
4430        if let Some(accesses) = self
4431            .pass
4432            .as_mut()
4433            .execs
4434            .last_mut()
4435            .unwrap()
4436            .accesses
4437            .get_mut(&node_idx)
4438        {
4439            for SubresourceAccess {
4440                access,
4441                subresource,
4442            } in accesses
4443            {
4444                let access_image_range = *subresource.as_image().unwrap();
4445                if !image_subresource_range_intersects(access_image_range, image_range) {
4446                    continue;
4447                }
4448
4449                image_access = match *access {
4450                    AccessType::ColorAttachmentRead | AccessType::ColorAttachmentReadWrite => {
4451                        AccessType::ColorAttachmentReadWrite
4452                    }
4453                    AccessType::ColorAttachmentWrite => AccessType::ColorAttachmentWrite,
4454                    _ => continue,
4455                };
4456
4457                *access = image_access;
4458
4459                // If the store access is a subset of the existing access range there is no need
4460                // to push a new access
4461                if image_subresource_range_contains(access_image_range, image_range) {
4462                    return self;
4463                }
4464            }
4465        }
4466
4467        self.pass
4468            .push_node_access(image, image_access, Subresource::Image(image_range));
4469
4470        self
4471    }
4472
4473    /// Specifies `VK_ATTACHMENT_STORE_OP_STORE` for the render pass attachment, and stores the
4474    /// rendered pixels into an image.
4475    pub fn store_depth_stencil(self, image: impl Into<AnyImageNode>) -> Self {
4476        let image: AnyImageNode = image.into();
4477        let image_info = image.get(self.pass.graph);
4478        let image_view_info: ImageViewInfo = image_info.into();
4479
4480        self.store_depth_stencil_as(image, image_view_info)
4481    }
4482
4483    /// Specifies `VK_ATTACHMENT_STORE_OP_STORE` for the render pass attachment, and stores the
4484    /// rendered pixels into an image.
4485    ///
4486    /// _NOTE:_ Order matters, call store after clear or load.
4487    pub fn store_depth_stencil_as(
4488        mut self,
4489        image: impl Into<AnyImageNode>,
4490        image_view_info: impl Into<ImageViewInfo>,
4491    ) -> Self {
4492        let image = image.into();
4493        let image_view_info = image_view_info.into();
4494        let node_idx = image.index();
4495        let (_, sample_count) = self.image_info(node_idx);
4496
4497        self.pass
4498            .as_mut()
4499            .execs
4500            .last_mut()
4501            .unwrap()
4502            .depth_stencil_store = Some(Attachment::new(image_view_info, sample_count, node_idx));
4503
4504        debug_assert!(
4505            Attachment::are_compatible(
4506                self.pass
4507                    .as_ref()
4508                    .execs
4509                    .last()
4510                    .unwrap()
4511                    .depth_stencil_attachment,
4512                self.pass.as_ref().execs.last().unwrap().depth_stencil_store
4513            ),
4514            "depth/stencil attachment store incompatible with existing attachment"
4515        );
4516        debug_assert!(
4517            Attachment::are_compatible(
4518                self.pass
4519                    .as_ref()
4520                    .execs
4521                    .last()
4522                    .unwrap()
4523                    .depth_stencil_clear
4524                    .map(|(attachment, _)| attachment),
4525                self.pass.as_ref().execs.last().unwrap().depth_stencil_store
4526            ),
4527            "depth/stencil attachment store incompatible with existing clear"
4528        );
4529        debug_assert!(
4530            Attachment::are_compatible(
4531                self.pass.as_ref().execs.last().unwrap().depth_stencil_load,
4532                self.pass.as_ref().execs.last().unwrap().depth_stencil_store
4533            ),
4534            "depth/stencil attachment store incompatible with existing load"
4535        );
4536
4537        let mut image_access = if image_view_info
4538            .aspect_mask
4539            .contains(vk::ImageAspectFlags::DEPTH | vk::ImageAspectFlags::STENCIL)
4540        {
4541            AccessType::DepthStencilAttachmentWrite
4542        } else if image_view_info
4543            .aspect_mask
4544            .contains(vk::ImageAspectFlags::DEPTH)
4545        {
4546            AccessType::DepthAttachmentWriteStencilReadOnly
4547        } else {
4548            debug_assert!(
4549                image_view_info
4550                    .aspect_mask
4551                    .contains(vk::ImageAspectFlags::STENCIL)
4552            );
4553
4554            AccessType::StencilAttachmentWriteDepthReadOnly
4555        };
4556        let image_range = image_view_info.into();
4557
4558        // Upgrade existing read access to read-write
4559        if let Some(accesses) = self
4560            .pass
4561            .as_mut()
4562            .execs
4563            .last_mut()
4564            .unwrap()
4565            .accesses
4566            .get_mut(&node_idx)
4567        {
4568            for SubresourceAccess {
4569                access,
4570                subresource,
4571            } in accesses
4572            {
4573                let access_image_range = *subresource.as_image().unwrap();
4574                if !image_subresource_range_intersects(access_image_range, image_range) {
4575                    continue;
4576                }
4577
4578                image_access = match *access {
4579                    AccessType::DepthAttachmentWriteStencilReadOnly => {
4580                        if image_view_info
4581                            .aspect_mask
4582                            .contains(vk::ImageAspectFlags::STENCIL)
4583                        {
4584                            AccessType::DepthStencilAttachmentReadWrite
4585                        } else {
4586                            AccessType::DepthAttachmentWriteStencilReadOnly
4587                        }
4588                    }
4589                    AccessType::DepthStencilAttachmentRead => {
4590                        if !image_view_info
4591                            .aspect_mask
4592                            .contains(vk::ImageAspectFlags::DEPTH)
4593                        {
4594                            AccessType::StencilAttachmentWriteDepthReadOnly
4595                        } else {
4596                            AccessType::DepthStencilAttachmentReadWrite
4597                        }
4598                    }
4599                    AccessType::DepthStencilAttachmentWrite => {
4600                        AccessType::DepthStencilAttachmentWrite
4601                    }
4602                    AccessType::StencilAttachmentWriteDepthReadOnly => {
4603                        if image_view_info
4604                            .aspect_mask
4605                            .contains(vk::ImageAspectFlags::DEPTH)
4606                        {
4607                            AccessType::DepthStencilAttachmentReadWrite
4608                        } else {
4609                            AccessType::StencilAttachmentWriteDepthReadOnly
4610                        }
4611                    }
4612                    _ => continue,
4613                };
4614
4615                *access = image_access;
4616
4617                // If the store access is a subset of the existing access range there is no need
4618                // to push a new access
4619                if image_subresource_range_contains(access_image_range, image_range) {
4620                    return self;
4621                }
4622            }
4623        }
4624
4625        self.pass
4626            .push_node_access(image, image_access, Subresource::Image(image_range));
4627
4628        self
4629    }
4630}
4631
4632impl PipelinePassRef<'_, RayTracePipeline> {
4633    /// Begin recording a ray tracing command buffer.
4634    pub fn record_ray_trace(
4635        mut self,
4636        func: impl FnOnce(RayTrace<'_>, Bindings<'_>) + Send + 'static,
4637    ) -> Self {
4638        let pipeline = Arc::clone(
4639            self.pass
4640                .as_ref()
4641                .execs
4642                .last()
4643                .unwrap()
4644                .pipeline
4645                .as_ref()
4646                .unwrap()
4647                .unwrap_ray_trace(),
4648        );
4649
4650        #[cfg(debug_assertions)]
4651        let dynamic_stack_size = pipeline.info.dynamic_stack_size;
4652
4653        self.pass.push_execute(move |device, cmd_buf, bindings| {
4654            func(
4655                RayTrace {
4656                    cmd_buf,
4657                    device,
4658
4659                    #[cfg(debug_assertions)]
4660                    dynamic_stack_size,
4661
4662                    pipeline,
4663                },
4664                bindings,
4665            );
4666        });
4667
4668        self
4669    }
4670}
4671
4672/// Recording interface for ray tracing commands.
4673///
4674/// This structure provides a strongly-typed set of methods which allow ray trace shader code to be
4675/// executed. An instance of `RayTrace` is provided to the closure parameter of
4676/// [`PipelinePassRef::record_ray_trace`] which may be accessed by binding a [`RayTracePipeline`] to
4677/// a render pass.
4678///
4679/// # Examples
4680///
4681/// Basic usage:
4682///
4683/// ```no_run
4684/// # use std::sync::Arc;
4685/// # use ash::vk;
4686/// # use screen_13::driver::DriverError;
4687/// # use screen_13::driver::device::{Device, DeviceInfo};
4688/// # use screen_13::driver::ray_trace::{RayTracePipeline, RayTracePipelineInfo, RayTraceShaderGroup};
4689/// # use screen_13::driver::shader::Shader;
4690/// # use screen_13::graph::RenderGraph;
4691/// # fn main() -> Result<(), DriverError> {
4692/// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
4693/// # let info = RayTracePipelineInfo::default();
4694/// # let my_miss_code = [0u8; 1];
4695/// # let my_ray_trace_pipeline = Arc::new(RayTracePipeline::create(&device, info,
4696///     [Shader::new_miss(my_miss_code.as_slice())],
4697///     [RayTraceShaderGroup::new_general(0)],
4698/// )?);
4699/// # let mut my_graph = RenderGraph::new();
4700/// my_graph.begin_pass("my ray trace pass")
4701///         .bind_pipeline(&my_ray_trace_pipeline)
4702///         .record_ray_trace(move |ray_trace, bindings| {
4703///             // During this closure we have access to the ray trace methods!
4704///         });
4705/// # Ok(()) }
4706/// ```
4707pub struct RayTrace<'a> {
4708    cmd_buf: vk::CommandBuffer,
4709    device: &'a Device,
4710
4711    #[cfg(debug_assertions)]
4712    dynamic_stack_size: bool,
4713
4714    pipeline: Arc<RayTracePipeline>,
4715}
4716
4717impl RayTrace<'_> {
4718    /// Updates push constants.
4719    ///
4720    /// Push constants represent a high speed path to modify constant data in pipelines that is
4721    /// expected to outperform memory-backed resource updates.
4722    ///
4723    /// Push constant values can be updated incrementally, causing shader stages to read the new
4724    /// data for push constants modified by this command, while still reading the previous data for
4725    /// push constants not modified by this command.
4726    ///
4727    /// # Device limitations
4728    ///
4729    /// See
4730    /// [`device.physical_device.props.limits.max_push_constants_size`](vk::PhysicalDeviceLimits)
4731    /// for the limits of the current device. You may also check [gpuinfo.org] for a listing of
4732    /// reported limits on other devices.
4733    ///
4734    /// # Examples
4735    ///
4736    /// Basic usage:
4737    ///
4738    /// ```
4739    /// # inline_spirv::inline_spirv!(r#"
4740    /// #version 460
4741    ///
4742    /// layout(push_constant) uniform PushConstants {
4743    ///     layout(offset = 0) uint some_val;
4744    /// } push_constants;
4745    ///
4746    /// void main()
4747    /// {
4748    ///     // TODO: Add bindings to write things!
4749    /// }
4750    /// # "#, rchit, vulkan1_2);
4751    /// ```
4752    ///
4753    /// ```no_run
4754    /// # use std::sync::Arc;
4755    /// # use ash::vk;
4756    /// # use screen_13::driver::DriverError;
4757    /// # use screen_13::driver::device::{Device, DeviceInfo};
4758    /// # use screen_13::driver::buffer::{Buffer, BufferInfo};
4759    /// # use screen_13::driver::ray_trace::{RayTracePipeline, RayTracePipelineInfo, RayTraceShaderGroup};
4760    /// # use screen_13::driver::shader::Shader;
4761    /// # use screen_13::graph::RenderGraph;
4762    /// # fn main() -> Result<(), DriverError> {
4763    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
4764    /// # let shader = [0u8; 1];
4765    /// # let info = RayTracePipelineInfo::default();
4766    /// # let my_miss_code = [0u8; 1];
4767    /// # let my_ray_trace_pipeline = Arc::new(RayTracePipeline::create(&device, info,
4768    /// #     [Shader::new_miss(my_miss_code.as_slice())],
4769    /// #     [RayTraceShaderGroup::new_general(0)],
4770    /// # )?);
4771    /// # let rgen_sbt = vk::StridedDeviceAddressRegionKHR { device_address: 0, stride: 0, size: 0 };
4772    /// # let hit_sbt = vk::StridedDeviceAddressRegionKHR { device_address: 0, stride: 0, size: 0 };
4773    /// # let miss_sbt = vk::StridedDeviceAddressRegionKHR { device_address: 0, stride: 0, size: 0 };
4774    /// # let call_sbt = vk::StridedDeviceAddressRegionKHR { device_address: 0, stride: 0, size: 0 };
4775    /// # let mut my_graph = RenderGraph::new();
4776    /// my_graph.begin_pass("draw a cornell box")
4777    ///         .bind_pipeline(&my_ray_trace_pipeline)
4778    ///         .record_ray_trace(move |ray_trace, bindings| {
4779    ///             ray_trace.push_constants(&[0xcb])
4780    ///                      .trace_rays(&rgen_sbt, &hit_sbt, &miss_sbt, &call_sbt, 320, 200, 1);
4781    ///         });
4782    /// # Ok(()) }
4783    /// ```
4784    ///
4785    /// [gpuinfo.org]: https://vulkan.gpuinfo.org/displaydevicelimit.php?name=maxPushConstantsSize&platform=all
4786    pub fn push_constants(&self, data: &[u8]) -> &Self {
4787        self.push_constants_offset(0, data)
4788    }
4789
4790    /// Updates push constants starting at the given `offset`.
4791    ///
4792    /// Behaves similary to [`RayTrace::push_constants`] except that `offset` describes the position
4793    /// at which `data` updates the push constants of the currently bound pipeline. This may be used
4794    /// to update a subset or single field of previously set push constant data.
4795    ///
4796    /// # Device limitations
4797    ///
4798    /// See
4799    /// [`device.physical_device.props.limits.max_push_constants_size`](vk::PhysicalDeviceLimits)
4800    /// for the limits of the current device. You may also check [gpuinfo.org] for a listing of
4801    /// reported limits on other devices.
4802    ///
4803    /// # Examples
4804    ///
4805    /// Basic usage:
4806    ///
4807    /// ```
4808    /// # inline_spirv::inline_spirv!(r#"
4809    /// #version 460
4810    ///
4811    /// layout(push_constant) uniform PushConstants {
4812    ///     layout(offset = 0) uint some_val1;
4813    ///     layout(offset = 4) uint some_val2;
4814    /// } push_constants;
4815    ///
4816    /// void main()
4817    /// {
4818    ///     // TODO: Add bindings to write things!
4819    /// }
4820    /// # "#, rchit, vulkan1_2);
4821    /// ```
4822    ///
4823    /// ```no_run
4824    /// # use std::sync::Arc;
4825    /// # use ash::vk;
4826    /// # use screen_13::driver::DriverError;
4827    /// # use screen_13::driver::device::{Device, DeviceInfo};
4828    /// # use screen_13::driver::buffer::{Buffer, BufferInfo};
4829    /// # use screen_13::driver::ray_trace::{RayTracePipeline, RayTracePipelineInfo, RayTraceShaderGroup};
4830    /// # use screen_13::driver::shader::Shader;
4831    /// # use screen_13::graph::RenderGraph;
4832    /// # fn main() -> Result<(), DriverError> {
4833    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
4834    /// # let shader = [0u8; 1];
4835    /// # let info = RayTracePipelineInfo::default();
4836    /// # let my_miss_code = [0u8; 1];
4837    /// # let my_ray_trace_pipeline = Arc::new(RayTracePipeline::create(&device, info,
4838    /// #     [Shader::new_miss(my_miss_code.as_slice())],
4839    /// #     [RayTraceShaderGroup::new_general(0)],
4840    /// # )?);
4841    /// # let rgen_sbt = vk::StridedDeviceAddressRegionKHR { device_address: 0, stride: 0, size: 0 };
4842    /// # let hit_sbt = vk::StridedDeviceAddressRegionKHR { device_address: 0, stride: 0, size: 0 };
4843    /// # let miss_sbt = vk::StridedDeviceAddressRegionKHR { device_address: 0, stride: 0, size: 0 };
4844    /// # let call_sbt = vk::StridedDeviceAddressRegionKHR { device_address: 0, stride: 0, size: 0 };
4845    /// # let mut my_graph = RenderGraph::new();
4846    /// my_graph.begin_pass("draw a cornell box")
4847    ///         .bind_pipeline(&my_ray_trace_pipeline)
4848    ///         .record_ray_trace(move |ray_trace, bindings| {
4849    ///             ray_trace.push_constants(&[0xcb, 0xff])
4850    ///                      .trace_rays(&rgen_sbt, &hit_sbt, &miss_sbt, &call_sbt, 320, 200, 1)
4851    ///                      .push_constants_offset(4, &[0xae])
4852    ///                      .trace_rays(&rgen_sbt, &hit_sbt, &miss_sbt, &call_sbt, 320, 200, 1);
4853    ///         });
4854    /// # Ok(()) }
4855    /// ```
4856    ///
4857    /// [gpuinfo.org]: https://vulkan.gpuinfo.org/displaydevicelimit.php?name=maxPushConstantsSize&platform=all
4858    #[profiling::function]
4859    pub fn push_constants_offset(&self, offset: u32, data: &[u8]) -> &Self {
4860        for push_const in self.pipeline.push_constants.iter() {
4861            let push_const_end = push_const.offset + push_const.size;
4862            let data_end = offset + data.len() as u32;
4863            let end = data_end.min(push_const_end);
4864            let start = offset.max(push_const.offset);
4865
4866            if end > start {
4867                trace!(
4868                    "      push constants {:?} {}..{}",
4869                    push_const.stage_flags, start, end
4870                );
4871
4872                unsafe {
4873                    self.device.cmd_push_constants(
4874                        self.cmd_buf,
4875                        self.pipeline.layout,
4876                        push_const.stage_flags,
4877                        start,
4878                        &data[(start - offset) as usize..(end - offset) as usize],
4879                    );
4880                }
4881            }
4882        }
4883        self
4884    }
4885
4886    /// Set the stack size dynamically for a ray trace pipeline.
4887    ///
4888    /// See
4889    /// [`RayTracePipelineInfo::dynamic_stack_size`](crate::driver::ray_trace::RayTracePipelineInfo::dynamic_stack_size)
4890    /// and
4891    /// [`vkCmdSetRayTracingPipelineStackSizeKHR`](https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/vkCmdSetRayTracingPipelineStackSizeKHR.html).
4892    #[profiling::function]
4893    pub fn set_stack_size(&self, pipeline_stack_size: u32) -> &Self {
4894        #[cfg(debug_assertions)]
4895        assert!(self.dynamic_stack_size);
4896
4897        unsafe {
4898            // Safely use unchecked because ray_trace_ext is checked during pipeline creation
4899            self.device
4900                .ray_trace_ext
4901                .as_ref()
4902                .unwrap_unchecked()
4903                .cmd_set_ray_tracing_pipeline_stack_size(self.cmd_buf, pipeline_stack_size);
4904        }
4905
4906        self
4907    }
4908
4909    // TODO: If the rayTraversalPrimitiveCulling or rayQuery features are enabled, the SkipTrianglesKHR and SkipAABBsKHR ray flags can be specified when tracing a ray. SkipTrianglesKHR and SkipAABBsKHR are mutually exclusive.
4910
4911    /// Ray traces using the currently-bound [`RayTracePipeline`] and the given shader binding
4912    /// tables.
4913    ///
4914    /// Shader binding tables must be constructed according to this [example].
4915    ///
4916    /// # Examples
4917    ///
4918    /// Basic usage:
4919    ///
4920    /// ```no_run
4921    /// # use std::sync::Arc;
4922    /// # use ash::vk;
4923    /// # use screen_13::driver::DriverError;
4924    /// # use screen_13::driver::device::{Device, DeviceInfo};
4925    /// # use screen_13::driver::buffer::{Buffer, BufferInfo};
4926    /// # use screen_13::driver::ray_trace::{RayTracePipeline, RayTracePipelineInfo, RayTraceShaderGroup};
4927    /// # use screen_13::driver::shader::Shader;
4928    /// # use screen_13::graph::RenderGraph;
4929    /// # fn main() -> Result<(), DriverError> {
4930    /// # let device = Arc::new(Device::create_headless(DeviceInfo::default())?);
4931    /// # let shader = [0u8; 1];
4932    /// # let info = RayTracePipelineInfo::default();
4933    /// # let my_miss_code = [0u8; 1];
4934    /// # let my_ray_trace_pipeline = Arc::new(RayTracePipeline::create(&device, info,
4935    /// #     [Shader::new_miss(my_miss_code.as_slice())],
4936    /// #     [RayTraceShaderGroup::new_general(0)],
4937    /// # )?);
4938    /// # let rgen_sbt = vk::StridedDeviceAddressRegionKHR { device_address: 0, stride: 0, size: 0 };
4939    /// # let hit_sbt = vk::StridedDeviceAddressRegionKHR { device_address: 0, stride: 0, size: 0 };
4940    /// # let miss_sbt = vk::StridedDeviceAddressRegionKHR { device_address: 0, stride: 0, size: 0 };
4941    /// # let call_sbt = vk::StridedDeviceAddressRegionKHR { device_address: 0, stride: 0, size: 0 };
4942    /// # let mut my_graph = RenderGraph::new();
4943    /// my_graph.begin_pass("draw a cornell box")
4944    ///         .bind_pipeline(&my_ray_trace_pipeline)
4945    ///         .record_ray_trace(move |ray_trace, bindings| {
4946    ///             ray_trace.trace_rays(&rgen_sbt, &hit_sbt, &miss_sbt, &call_sbt, 320, 200, 1);
4947    ///         });
4948    /// # Ok(()) }
4949    /// ```
4950    ///
4951    /// [example]: https://github.com/attackgoat/screen-13/blob/master/examples/ray_trace.rs
4952    #[allow(clippy::too_many_arguments)]
4953    #[profiling::function]
4954    pub fn trace_rays(
4955        &self,
4956        raygen_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
4957        miss_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
4958        hit_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
4959        callable_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
4960        width: u32,
4961        height: u32,
4962        depth: u32,
4963    ) -> &Self {
4964        unsafe {
4965            // Safely use unchecked because ray_trace_ext is checked during pipeline creation
4966            self.device
4967                .ray_trace_ext
4968                .as_ref()
4969                .unwrap_unchecked()
4970                .cmd_trace_rays(
4971                    self.cmd_buf,
4972                    raygen_shader_binding_table,
4973                    miss_shader_binding_table,
4974                    hit_shader_binding_table,
4975                    callable_shader_binding_table,
4976                    width,
4977                    height,
4978                    depth,
4979                );
4980        }
4981
4982        self
4983    }
4984
4985    /// Ray traces using the currently-bound [`RayTracePipeline`] and the given shader binding
4986    /// tables.
4987    ///
4988    /// `indirect_device_address` is a [buffer device address] which is a pointer to a
4989    /// [`vk::TraceRaysIndirectCommandKHR`] structure containing the trace ray parameters.
4990    ///
4991    /// See [`vkCmdTraceRaysIndirectKHR`](https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/vkCmdTraceRaysIndirectKHR.html).
4992    ///
4993    /// [buffer device address]: Buffer::device_address
4994    #[profiling::function]
4995    pub fn trace_rays_indirect(
4996        &self,
4997        raygen_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
4998        miss_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
4999        hit_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
5000        callable_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
5001        indirect_device_address: vk::DeviceAddress,
5002    ) -> &Self {
5003        unsafe {
5004            // Safely use unchecked because ray_trace_ext is checked during pipeline creation
5005            self.device
5006                .ray_trace_ext
5007                .as_ref()
5008                .unwrap_unchecked()
5009                .cmd_trace_rays_indirect(
5010                    self.cmd_buf,
5011                    raygen_shader_binding_table,
5012                    miss_shader_binding_table,
5013                    hit_shader_binding_table,
5014                    callable_shader_binding_table,
5015                    indirect_device_address,
5016                )
5017        }
5018
5019        self
5020    }
5021}
5022
5023/// Describes a portion of a resource which is bound.
5024#[derive(Clone, Copy, Debug)]
5025pub enum Subresource {
5026    /// Acceleration structures are bound whole.
5027    AccelerationStructure,
5028
5029    /// Images may be partially bound.
5030    Image(vk::ImageSubresourceRange),
5031
5032    /// Buffers may be partially bound.
5033    Buffer(BufferSubresourceRange),
5034}
5035
5036impl Subresource {
5037    pub(super) fn as_image(&self) -> Option<&vk::ImageSubresourceRange> {
5038        if let Self::Image(subresource) = self {
5039            Some(subresource)
5040        } else {
5041            None
5042        }
5043    }
5044}
5045
5046impl From<()> for Subresource {
5047    fn from(_: ()) -> Self {
5048        Self::AccelerationStructure
5049    }
5050}
5051
5052impl From<vk::ImageSubresourceRange> for Subresource {
5053    fn from(subresource: vk::ImageSubresourceRange) -> Self {
5054        Self::Image(subresource)
5055    }
5056}
5057
5058impl From<BufferSubresourceRange> for Subresource {
5059    fn from(subresource: BufferSubresourceRange) -> Self {
5060        Self::Buffer(subresource)
5061    }
5062}
5063
5064#[derive(Clone, Copy, Debug)]
5065pub(super) struct SubresourceAccess {
5066    pub access: AccessType,
5067    pub subresource: Subresource,
5068}
5069
5070/// Allows for a resource to be reinterpreted as differently formatted data.
5071pub trait View: Node
5072where
5073    Self::Information: Copy,
5074    Self::Subresource: Into<Subresource>,
5075{
5076    /// The information about the resource interpretation.
5077    type Information;
5078
5079    /// The portion of the resource which is bound.
5080    type Subresource;
5081}
5082
5083impl View for AccelerationStructureNode {
5084    type Information = ();
5085    type Subresource = ();
5086}
5087
5088impl View for AccelerationStructureLeaseNode {
5089    type Information = ();
5090    type Subresource = ();
5091}
5092
5093impl View for AnyAccelerationStructureNode {
5094    type Information = ();
5095    type Subresource = ();
5096}
5097
5098impl View for AnyBufferNode {
5099    type Information = BufferSubresourceRange;
5100    type Subresource = BufferSubresourceRange;
5101}
5102
5103impl View for AnyImageNode {
5104    type Information = ImageViewInfo;
5105    type Subresource = vk::ImageSubresourceRange;
5106}
5107
5108impl View for BufferLeaseNode {
5109    type Information = BufferSubresourceRange;
5110    type Subresource = BufferSubresourceRange;
5111}
5112
5113impl View for BufferNode {
5114    type Information = BufferSubresourceRange;
5115    type Subresource = BufferSubresourceRange;
5116}
5117
5118impl View for ImageLeaseNode {
5119    type Information = ImageViewInfo;
5120    type Subresource = vk::ImageSubresourceRange;
5121}
5122
5123impl View for ImageNode {
5124    type Information = ImageViewInfo;
5125    type Subresource = vk::ImageSubresourceRange;
5126}
5127
5128impl View for SwapchainImageNode {
5129    type Information = ImageViewInfo;
5130    type Subresource = vk::ImageSubresourceRange;
5131}
5132
5133/// Describes the interpretation of a resource.
5134#[derive(Debug)]
5135pub enum ViewType {
5136    /// Acceleration structures are not reinterpreted.
5137    AccelerationStructure,
5138
5139    /// Images may be interpreted as differently formatted images.
5140    Image(ImageViewInfo),
5141
5142    /// Buffers may be interpreted as subregions of the same buffer.
5143    Buffer(Range<vk::DeviceSize>),
5144}
5145
5146impl ViewType {
5147    pub(super) fn as_buffer(&self) -> Option<&Range<vk::DeviceSize>> {
5148        match self {
5149            Self::Buffer(view_info) => Some(view_info),
5150            _ => None,
5151        }
5152    }
5153
5154    pub(super) fn as_image(&self) -> Option<&ImageViewInfo> {
5155        match self {
5156            Self::Image(view_info) => Some(view_info),
5157            _ => None,
5158        }
5159    }
5160}
5161
5162impl From<()> for ViewType {
5163    fn from(_: ()) -> Self {
5164        Self::AccelerationStructure
5165    }
5166}
5167
5168impl From<BufferSubresourceRange> for ViewType {
5169    fn from(subresource: BufferSubresourceRange) -> Self {
5170        Self::Buffer(subresource.start..subresource.end)
5171    }
5172}
5173
5174impl From<ImageViewInfo> for ViewType {
5175    fn from(info: ImageViewInfo) -> Self {
5176        Self::Image(info)
5177    }
5178}
5179
5180impl From<Range<vk::DeviceSize>> for ViewType {
5181    fn from(range: Range<vk::DeviceSize>) -> Self {
5182        Self::Buffer(range)
5183    }
5184}