use {
super::{
AccelerationStructureLeaseNode, AccelerationStructureNode, AnyAccelerationStructureNode,
AnyBufferNode, AnyImageNode, Area, Attachment, Bind, Binding, BufferLeaseNode, BufferNode,
ClearColorValue, Edge, Execution, ExecutionFunction, ExecutionPipeline, ImageLeaseNode,
ImageNode, Information, Node, NodeIndex, Pass, RenderGraph, SampleCount,
SwapchainImageNode,
},
crate::driver::{
accel_struct::{
AccelerationStructure, AccelerationStructureGeometry,
AccelerationStructureGeometryInfo, DeviceOrHostAddress,
},
buffer::{Buffer, BufferSubresourceRange},
compute::ComputePipeline,
device::Device,
graphic::{DepthStencilMode, GraphicPipeline},
image::{
Image, ImageViewInfo, image_subresource_range_contains,
image_subresource_range_intersects,
},
ray_trace::RayTracePipeline,
render_pass::ResolveMode,
},
ash::vk,
log::trace,
std::{
cell::RefCell,
marker::PhantomData,
ops::{Index, Range},
sync::Arc,
},
vk_sync::AccessType,
};
pub type AttachmentIndex = u32;
pub type BindingIndex = u32;
pub type BindingOffset = u32;
pub type DescriptorSetIndex = u32;
pub struct Acceleration<'a> {
bindings: Bindings<'a>,
cmd_buf: vk::CommandBuffer,
device: &'a Device,
}
impl Acceleration<'_> {
pub fn build_structure(
&self,
info: &AccelerationStructureGeometryInfo<(
AccelerationStructureGeometry,
vk::AccelerationStructureBuildRangeInfoKHR,
)>,
accel_struct: impl Into<AnyAccelerationStructureNode>,
scratch_addr: impl Into<DeviceOrHostAddress>,
) -> &Self {
#[derive(Default)]
struct Tls {
geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
ranges: Vec<vk::AccelerationStructureBuildRangeInfoKHR>,
}
thread_local! {
static TLS: RefCell<Tls> = Default::default();
}
let accel_struct = accel_struct.into();
let scratch_addr = scratch_addr.into().into();
TLS.with_borrow_mut(|tls| {
tls.geometries.clear();
tls.ranges.clear();
for (geometry, range) in info.geometries.iter() {
tls.geometries.push(geometry.into());
tls.ranges.push(*range);
}
unsafe {
Device::expect_accel_struct_ext(self.device).cmd_build_acceleration_structures(
self.cmd_buf,
&[vk::AccelerationStructureBuildGeometryInfoKHR::default()
.ty(info.ty)
.flags(info.flags)
.mode(vk::BuildAccelerationStructureModeKHR::BUILD)
.dst_acceleration_structure(*self.bindings[accel_struct])
.geometries(&tls.geometries)
.scratch_data(scratch_addr)],
&[&tls.ranges],
);
}
});
self
}
pub fn build_structure_indirect(
&self,
info: &AccelerationStructureGeometryInfo<AccelerationStructureGeometry>,
accel_struct: impl Into<AnyAccelerationStructureNode>,
scratch_addr: impl Into<DeviceOrHostAddress>,
range_base: vk::DeviceAddress,
range_stride: u32,
) -> &Self {
#[derive(Default)]
struct Tls {
geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
max_primitive_counts: Vec<u32>,
}
thread_local! {
static TLS: RefCell<Tls> = Default::default();
}
let accel_struct = accel_struct.into();
let scratch_addr = scratch_addr.into().into();
TLS.with_borrow_mut(|tls| {
tls.geometries.clear();
tls.max_primitive_counts.clear();
for geometry in info.geometries.iter() {
tls.geometries.push(geometry.into());
tls.max_primitive_counts.push(geometry.max_primitive_count);
}
unsafe {
Device::expect_accel_struct_ext(self.device)
.cmd_build_acceleration_structures_indirect(
self.cmd_buf,
&[vk::AccelerationStructureBuildGeometryInfoKHR::default()
.ty(info.ty)
.flags(info.flags)
.mode(vk::BuildAccelerationStructureModeKHR::BUILD)
.dst_acceleration_structure(*self.bindings[accel_struct])
.geometries(&tls.geometries)
.scratch_data(scratch_addr)],
&[range_base],
&[range_stride],
&[&tls.max_primitive_counts],
);
}
});
self
}
pub fn build_structures(&self, infos: &[AccelerationStructureBuildInfo]) -> &Self {
#[derive(Default)]
struct Tls {
geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
ranges: Vec<vk::AccelerationStructureBuildRangeInfoKHR>,
}
thread_local! {
static TLS: RefCell<Tls> = Default::default();
}
TLS.with_borrow_mut(|tls| {
tls.geometries.clear();
tls.geometries.extend(infos.iter().flat_map(|info| {
info.build_data.geometries.iter().map(|(geometry, _)| {
<&AccelerationStructureGeometry as Into<
vk::AccelerationStructureGeometryKHR,
>>::into(geometry)
})
}));
tls.ranges.clear();
tls.ranges.extend(
infos
.iter()
.flat_map(|info| info.build_data.geometries.iter().map(|(_, range)| *range)),
);
let vk_ranges = {
let mut start = 0;
let mut vk_ranges = Vec::with_capacity(infos.len());
for info in infos {
let end = start + info.build_data.geometries.len();
vk_ranges.push(&tls.ranges[start..end]);
start = end;
}
vk_ranges
};
let vk_infos = {
let mut start = 0;
let mut vk_infos = Vec::with_capacity(infos.len());
for info in infos {
let end = start + info.build_data.geometries.len();
vk_infos.push(
vk::AccelerationStructureBuildGeometryInfoKHR::default()
.ty(info.build_data.ty)
.flags(info.build_data.flags)
.mode(vk::BuildAccelerationStructureModeKHR::BUILD)
.dst_acceleration_structure(*self.bindings[info.accel_struct])
.geometries(&tls.geometries[start..end])
.scratch_data(info.scratch_addr.into()),
);
start = end;
}
vk_infos
};
unsafe {
Device::expect_accel_struct_ext(self.device).cmd_build_acceleration_structures(
self.cmd_buf,
&vk_infos,
&vk_ranges,
);
}
});
self
}
pub fn build_structures_indirect(
&self,
infos: &[AccelerationStructureIndirectBuildInfo],
) -> &Self {
#[derive(Default)]
struct Tls {
geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
max_primitive_counts: Vec<u32>,
range_bases: Vec<vk::DeviceAddress>,
range_strides: Vec<u32>,
}
thread_local! {
static TLS: RefCell<Tls> = Default::default();
}
TLS.with_borrow_mut(|tls| {
tls.geometries.clear();
tls.geometries.extend(infos.iter().flat_map(|info| {
info.build_data.geometries.iter().map(
<&AccelerationStructureGeometry as Into<
vk::AccelerationStructureGeometryKHR,
>>::into,
)
}));
tls.max_primitive_counts.clear();
tls.max_primitive_counts
.extend(infos.iter().flat_map(|info| {
info.build_data
.geometries
.iter()
.map(|geometry| geometry.max_primitive_count)
}));
tls.range_bases.clear();
tls.range_strides.clear();
let (vk_infos, vk_max_primitive_counts) = {
let mut start = 0;
let mut vk_infos = Vec::with_capacity(infos.len());
let mut vk_max_primitive_counts = Vec::with_capacity(infos.len());
for info in infos {
let end = start + info.build_data.geometries.len();
vk_infos.push(
vk::AccelerationStructureBuildGeometryInfoKHR::default()
.ty(info.build_data.ty)
.flags(info.build_data.flags)
.mode(vk::BuildAccelerationStructureModeKHR::BUILD)
.dst_acceleration_structure(*self.bindings[info.accel_struct])
.geometries(&tls.geometries[start..end])
.scratch_data(info.scratch_data.into()),
);
vk_max_primitive_counts.push(&tls.max_primitive_counts[start..end]);
start = end;
tls.range_bases.push(info.range_base);
tls.range_strides.push(info.range_stride);
}
(vk_infos, vk_max_primitive_counts)
};
unsafe {
Device::expect_accel_struct_ext(self.device)
.cmd_build_acceleration_structures_indirect(
self.cmd_buf,
&vk_infos,
&tls.range_bases,
&tls.range_strides,
&vk_max_primitive_counts,
);
}
});
self
}
pub fn update_structure(
&self,
info: &AccelerationStructureGeometryInfo<(
AccelerationStructureGeometry,
vk::AccelerationStructureBuildRangeInfoKHR,
)>,
src_accel_struct: impl Into<AnyAccelerationStructureNode>,
dst_accel_struct: impl Into<AnyAccelerationStructureNode>,
scratch_addr: impl Into<DeviceOrHostAddress>,
) -> &Self {
#[derive(Default)]
struct Tls {
geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
ranges: Vec<vk::AccelerationStructureBuildRangeInfoKHR>,
}
thread_local! {
static TLS: RefCell<Tls> = Default::default();
}
let src_accel_struct = src_accel_struct.into();
let dst_accel_struct = dst_accel_struct.into();
let scratch_addr = scratch_addr.into().into();
TLS.with_borrow_mut(|tls| {
tls.geometries.clear();
tls.ranges.clear();
for (geometry, range) in info.geometries.iter() {
tls.geometries.push(geometry.into());
tls.ranges.push(*range);
}
unsafe {
Device::expect_accel_struct_ext(self.device).cmd_build_acceleration_structures(
self.cmd_buf,
&[vk::AccelerationStructureBuildGeometryInfoKHR::default()
.ty(info.ty)
.flags(info.flags)
.mode(vk::BuildAccelerationStructureModeKHR::UPDATE)
.dst_acceleration_structure(*self.bindings[dst_accel_struct])
.src_acceleration_structure(*self.bindings[src_accel_struct])
.geometries(&tls.geometries)
.scratch_data(scratch_addr)],
&[&tls.ranges],
);
}
});
self
}
pub fn update_structure_indirect(
&self,
info: &AccelerationStructureGeometryInfo<AccelerationStructureGeometry>,
src_accel_struct: impl Into<AnyAccelerationStructureNode>,
dst_accel_struct: impl Into<AnyAccelerationStructureNode>,
scratch_addr: impl Into<DeviceOrHostAddress>,
range_base: vk::DeviceAddress,
range_stride: u32,
) -> &Self {
#[derive(Default)]
struct Tls {
geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
max_primitive_counts: Vec<u32>,
}
thread_local! {
static TLS: RefCell<Tls> = Default::default();
}
let src_accel_struct = src_accel_struct.into();
let dst_accel_struct = dst_accel_struct.into();
let scratch_addr = scratch_addr.into().into();
TLS.with_borrow_mut(|tls| {
tls.geometries.clear();
tls.max_primitive_counts.clear();
for geometry in info.geometries.iter() {
tls.geometries.push(geometry.into());
tls.max_primitive_counts.push(geometry.max_primitive_count);
}
unsafe {
Device::expect_accel_struct_ext(self.device)
.cmd_build_acceleration_structures_indirect(
self.cmd_buf,
&[vk::AccelerationStructureBuildGeometryInfoKHR::default()
.ty(info.ty)
.flags(info.flags)
.mode(vk::BuildAccelerationStructureModeKHR::UPDATE)
.src_acceleration_structure(*self.bindings[src_accel_struct])
.dst_acceleration_structure(*self.bindings[dst_accel_struct])
.geometries(&tls.geometries)
.scratch_data(scratch_addr)],
&[range_base],
&[range_stride],
&[&tls.max_primitive_counts],
);
}
});
self
}
pub fn update_structures(&self, infos: &[AccelerationStructureUpdateInfo]) -> &Self {
#[derive(Default)]
struct Tls {
geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
ranges: Vec<vk::AccelerationStructureBuildRangeInfoKHR>,
}
thread_local! {
static TLS: RefCell<Tls> = Default::default();
}
TLS.with_borrow_mut(|tls| {
tls.geometries.clear();
tls.geometries.extend(infos.iter().flat_map(|info| {
info.update_data.geometries.iter().map(|(geometry, _)| {
<&AccelerationStructureGeometry as Into<
vk::AccelerationStructureGeometryKHR,
>>::into(geometry)
})
}));
tls.ranges.clear();
tls.ranges.extend(
infos
.iter()
.flat_map(|info| info.update_data.geometries.iter().map(|(_, range)| *range)),
);
let vk_ranges = {
let mut start = 0;
let mut vk_ranges = Vec::with_capacity(infos.len());
for info in infos {
let end = start + info.update_data.geometries.len();
vk_ranges.push(&tls.ranges[start..end]);
start = end;
}
vk_ranges
};
let vk_infos = {
let mut start = 0;
let mut vk_infos = Vec::with_capacity(infos.len());
for info in infos {
let end = start + info.update_data.geometries.len();
vk_infos.push(
vk::AccelerationStructureBuildGeometryInfoKHR::default()
.ty(info.update_data.ty)
.flags(info.update_data.flags)
.mode(vk::BuildAccelerationStructureModeKHR::UPDATE)
.dst_acceleration_structure(*self.bindings[info.dst_accel_struct])
.src_acceleration_structure(*self.bindings[info.src_accel_struct])
.geometries(&tls.geometries[start..end])
.scratch_data(info.scratch_addr.into()),
);
start = end;
}
vk_infos
};
unsafe {
Device::expect_accel_struct_ext(self.device).cmd_build_acceleration_structures(
self.cmd_buf,
&vk_infos,
&vk_ranges,
);
}
});
self
}
pub fn update_structures_indirect(
&self,
infos: &[AccelerationStructureIndirectUpdateInfo],
) -> &Self {
#[derive(Default)]
struct Tls {
geometries: Vec<vk::AccelerationStructureGeometryKHR<'static>>,
max_primitive_counts: Vec<u32>,
range_bases: Vec<vk::DeviceAddress>,
range_strides: Vec<u32>,
}
thread_local! {
static TLS: RefCell<Tls> = Default::default();
}
TLS.with_borrow_mut(|tls| {
tls.geometries.clear();
tls.geometries.extend(infos.iter().flat_map(|info| {
info.update_data.geometries.iter().map(
<&AccelerationStructureGeometry as Into<
vk::AccelerationStructureGeometryKHR,
>>::into,
)
}));
tls.max_primitive_counts.clear();
tls.max_primitive_counts
.extend(infos.iter().flat_map(|info| {
info.update_data
.geometries
.iter()
.map(|geometry| geometry.max_primitive_count)
}));
tls.range_bases.clear();
tls.range_strides.clear();
let (vk_infos, vk_max_primitive_counts) = {
let mut start = 0;
let mut vk_infos = Vec::with_capacity(infos.len());
let mut vk_max_primitive_counts = Vec::with_capacity(infos.len());
for info in infos {
let end = start + info.update_data.geometries.len();
vk_infos.push(
vk::AccelerationStructureBuildGeometryInfoKHR::default()
.ty(info.update_data.ty)
.flags(info.update_data.flags)
.mode(vk::BuildAccelerationStructureModeKHR::UPDATE)
.src_acceleration_structure(*self.bindings[info.src_accel_struct])
.dst_acceleration_structure(*self.bindings[info.dst_accel_struct])
.geometries(&tls.geometries[start..end])
.scratch_data(info.scratch_addr.into()),
);
vk_max_primitive_counts.push(&tls.max_primitive_counts[start..end]);
start = end;
tls.range_bases.push(info.range_base);
tls.range_strides.push(info.range_stride);
}
(vk_infos, vk_max_primitive_counts)
};
unsafe {
Device::expect_accel_struct_ext(self.device)
.cmd_build_acceleration_structures_indirect(
self.cmd_buf,
&vk_infos,
&tls.range_bases,
&tls.range_strides,
&vk_max_primitive_counts,
);
}
});
self
}
}
#[derive(Clone, Debug)]
pub struct AccelerationStructureBuildInfo {
pub accel_struct: AnyAccelerationStructureNode,
pub build_data: AccelerationStructureGeometryInfo<(
AccelerationStructureGeometry,
vk::AccelerationStructureBuildRangeInfoKHR,
)>,
pub scratch_addr: DeviceOrHostAddress,
}
impl AccelerationStructureBuildInfo {
pub fn new(
accel_struct: impl Into<AnyAccelerationStructureNode>,
build_data: AccelerationStructureGeometryInfo<(
AccelerationStructureGeometry,
vk::AccelerationStructureBuildRangeInfoKHR,
)>,
scratch_addr: impl Into<DeviceOrHostAddress>,
) -> Self {
let accel_struct = accel_struct.into();
let scratch_addr = scratch_addr.into();
Self {
accel_struct,
build_data,
scratch_addr,
}
}
}
#[derive(Clone, Debug)]
pub struct AccelerationStructureIndirectBuildInfo {
pub accel_struct: AnyAccelerationStructureNode,
pub build_data: AccelerationStructureGeometryInfo<AccelerationStructureGeometry>,
pub range_base: vk::DeviceAddress,
pub range_stride: u32,
pub scratch_data: DeviceOrHostAddress,
}
impl AccelerationStructureIndirectBuildInfo {
pub fn new(
accel_struct: impl Into<AnyAccelerationStructureNode>,
build_data: AccelerationStructureGeometryInfo<AccelerationStructureGeometry>,
range_base: vk::DeviceAddress,
range_stride: u32,
scratch_data: impl Into<DeviceOrHostAddress>,
) -> Self {
let accel_struct = accel_struct.into();
let scratch_data = scratch_data.into();
Self {
accel_struct,
build_data,
range_base,
range_stride,
scratch_data,
}
}
}
#[derive(Clone, Debug)]
pub struct AccelerationStructureIndirectUpdateInfo {
pub dst_accel_struct: AnyAccelerationStructureNode,
pub range_base: vk::DeviceAddress,
pub range_stride: u32,
pub scratch_addr: DeviceOrHostAddress,
pub src_accel_struct: AnyAccelerationStructureNode,
pub update_data: AccelerationStructureGeometryInfo<AccelerationStructureGeometry>,
}
impl AccelerationStructureIndirectUpdateInfo {
pub fn new(
src_accel_struct: impl Into<AnyAccelerationStructureNode>,
dst_accel_struct: impl Into<AnyAccelerationStructureNode>,
update_data: AccelerationStructureGeometryInfo<AccelerationStructureGeometry>,
range_base: vk::DeviceAddress,
range_stride: u32,
scratch_addr: impl Into<DeviceOrHostAddress>,
) -> Self {
let src_accel_struct = src_accel_struct.into();
let dst_accel_struct = dst_accel_struct.into();
let scratch_addr = scratch_addr.into();
Self {
dst_accel_struct,
range_base,
range_stride,
scratch_addr,
src_accel_struct,
update_data,
}
}
}
#[derive(Clone, Debug)]
pub struct AccelerationStructureUpdateInfo {
pub dst_accel_struct: AnyAccelerationStructureNode,
pub scratch_addr: DeviceOrHostAddress,
pub src_accel_struct: AnyAccelerationStructureNode,
pub update_data: AccelerationStructureGeometryInfo<(
AccelerationStructureGeometry,
vk::AccelerationStructureBuildRangeInfoKHR,
)>,
}
impl AccelerationStructureUpdateInfo {
pub fn new(
src_accel_struct: impl Into<AnyAccelerationStructureNode>,
dst_accel_struct: impl Into<AnyAccelerationStructureNode>,
update_data: AccelerationStructureGeometryInfo<(
AccelerationStructureGeometry,
vk::AccelerationStructureBuildRangeInfoKHR,
)>,
scratch_addr: impl Into<DeviceOrHostAddress>,
) -> Self {
let src_accel_struct = src_accel_struct.into();
let dst_accel_struct = dst_accel_struct.into();
let scratch_addr = scratch_addr.into();
Self {
dst_accel_struct,
scratch_addr,
src_accel_struct,
update_data,
}
}
}
pub trait Access {
const DEFAULT_READ: AccessType;
const DEFAULT_WRITE: AccessType;
}
impl Access for ComputePipeline {
const DEFAULT_READ: AccessType = AccessType::ComputeShaderReadOther;
const DEFAULT_WRITE: AccessType = AccessType::ComputeShaderWrite;
}
impl Access for GraphicPipeline {
const DEFAULT_READ: AccessType = AccessType::AnyShaderReadSampledImageOrUniformTexelBuffer;
const DEFAULT_WRITE: AccessType = AccessType::AnyShaderWrite;
}
impl Access for RayTracePipeline {
const DEFAULT_READ: AccessType =
AccessType::RayTracingShaderReadSampledImageOrUniformTexelBuffer;
const DEFAULT_WRITE: AccessType = AccessType::AnyShaderWrite;
}
macro_rules! bind {
($name:ident) => {
paste::paste! {
impl<'a> Bind<PassRef<'a>, PipelinePassRef<'a, [<$name Pipeline>]>> for &'a Arc<[<$name Pipeline>]> {
fn bind(self, mut pass: PassRef<'a>) -> PipelinePassRef<'a, [<$name Pipeline>]> {
let pass_ref = pass.as_mut();
if pass_ref.execs.last().unwrap().pipeline.is_some() {
pass_ref.execs.push(Default::default());
}
pass_ref.execs.last_mut().unwrap().pipeline = Some(ExecutionPipeline::$name(Arc::clone(self)));
PipelinePassRef {
__: PhantomData,
pass,
}
}
}
impl<'a> Bind<PassRef<'a>, PipelinePassRef<'a, [<$name Pipeline>]>> for Arc<[<$name Pipeline>]> {
fn bind(self, mut pass: PassRef<'a>) -> PipelinePassRef<'a, [<$name Pipeline>]> {
let pass_ref = pass.as_mut();
if pass_ref.execs.last().unwrap().pipeline.is_some() {
pass_ref.execs.push(Default::default());
}
pass_ref.execs.last_mut().unwrap().pipeline = Some(ExecutionPipeline::$name(self));
PipelinePassRef {
__: PhantomData,
pass,
}
}
}
impl<'a> Bind<PassRef<'a>, PipelinePassRef<'a, [<$name Pipeline>]>> for [<$name Pipeline>] {
fn bind(self, mut pass: PassRef<'a>) -> PipelinePassRef<'a, [<$name Pipeline>]> {
let pass_ref = pass.as_mut();
if pass_ref.execs.last().unwrap().pipeline.is_some() {
pass_ref.execs.push(Default::default());
}
pass_ref.execs.last_mut().unwrap().pipeline = Some(ExecutionPipeline::$name(Arc::new(self)));
PipelinePassRef {
__: PhantomData,
pass,
}
}
}
impl ExecutionPipeline {
#[allow(unused)]
pub(super) fn [<is_ $name:snake>](&self) -> bool {
matches!(self, Self::$name(_))
}
#[allow(unused)]
pub(super) fn [<unwrap_ $name:snake>](&self) -> &Arc<[<$name Pipeline>]> {
if let Self::$name(binding) = self {
&binding
} else {
panic!();
}
}
}
}
};
}
bind!(Compute);
bind!(Graphic);
bind!(RayTrace);
#[derive(Clone, Copy, Debug)]
pub struct Bindings<'a> {
bindings: &'a [Binding],
exec: &'a Execution,
}
impl<'a> Bindings<'a> {
pub(super) fn new(bindings: &'a [Binding], exec: &'a Execution) -> Self {
Self { bindings, exec }
}
fn binding_ref(&self, node_idx: usize) -> &Binding {
debug_assert!(
self.exec.accesses.contains_key(&node_idx),
"unexpected node access: call access, read, or write first"
);
&self.bindings[node_idx]
}
}
macro_rules! index {
($name:ident, $handle:ident) => {
paste::paste! {
impl<'a> Index<[<$name Node>]> for Bindings<'a>
{
type Output = $handle;
fn index(&self, node: [<$name Node>]) -> &Self::Output {
&*self.binding_ref(node.idx).[<as_ $name:snake>]().unwrap()
}
}
}
};
}
index!(AccelerationStructure, AccelerationStructure);
index!(AccelerationStructureLease, AccelerationStructure);
index!(Buffer, Buffer);
index!(BufferLease, Buffer);
index!(Image, Image);
index!(ImageLease, Image);
index!(SwapchainImage, Image);
impl Index<AnyAccelerationStructureNode> for Bindings<'_> {
type Output = AccelerationStructure;
fn index(&self, node: AnyAccelerationStructureNode) -> &Self::Output {
let node_idx = match node {
AnyAccelerationStructureNode::AccelerationStructure(node) => node.idx,
AnyAccelerationStructureNode::AccelerationStructureLease(node) => node.idx,
};
let binding = self.binding_ref(node_idx);
match node {
AnyAccelerationStructureNode::AccelerationStructure(_) => {
binding.as_acceleration_structure().unwrap()
}
AnyAccelerationStructureNode::AccelerationStructureLease(_) => {
binding.as_acceleration_structure_lease().unwrap()
}
}
}
}
impl Index<AnyBufferNode> for Bindings<'_> {
type Output = Buffer;
fn index(&self, node: AnyBufferNode) -> &Self::Output {
let node_idx = match node {
AnyBufferNode::Buffer(node) => node.idx,
AnyBufferNode::BufferLease(node) => node.idx,
};
let binding = self.binding_ref(node_idx);
match node {
AnyBufferNode::Buffer(_) => binding.as_buffer().unwrap(),
AnyBufferNode::BufferLease(_) => binding.as_buffer_lease().unwrap(),
}
}
}
impl Index<AnyImageNode> for Bindings<'_> {
type Output = Image;
fn index(&self, node: AnyImageNode) -> &Self::Output {
let node_idx = match node {
AnyImageNode::Image(node) => node.idx,
AnyImageNode::ImageLease(node) => node.idx,
AnyImageNode::SwapchainImage(node) => node.idx,
};
let binding = self.binding_ref(node_idx);
match node {
AnyImageNode::Image(_) => binding.as_image().unwrap(),
AnyImageNode::ImageLease(_) => binding.as_image_lease().unwrap(),
AnyImageNode::SwapchainImage(_) => binding.as_swapchain_image().unwrap(),
}
}
}
pub struct Compute<'a> {
bindings: Bindings<'a>,
cmd_buf: vk::CommandBuffer,
device: &'a Device,
pipeline: Arc<ComputePipeline>,
}
impl Compute<'_> {
#[profiling::function]
pub fn dispatch(&self, group_count_x: u32, group_count_y: u32, group_count_z: u32) -> &Self {
unsafe {
self.device
.cmd_dispatch(self.cmd_buf, group_count_x, group_count_y, group_count_z);
}
self
}
#[profiling::function]
pub fn dispatch_base(
&self,
base_group_x: u32,
base_group_y: u32,
base_group_z: u32,
group_count_x: u32,
group_count_y: u32,
group_count_z: u32,
) -> &Self {
unsafe {
self.device.cmd_dispatch_base(
self.cmd_buf,
base_group_x,
base_group_y,
base_group_z,
group_count_x,
group_count_y,
group_count_z,
);
}
self
}
#[profiling::function]
pub fn dispatch_indirect(
&self,
args_buf: impl Into<AnyBufferNode>,
args_offset: vk::DeviceSize,
) -> &Self {
let args_buf = args_buf.into();
unsafe {
self.device
.cmd_dispatch_indirect(self.cmd_buf, *self.bindings[args_buf], args_offset);
}
self
}
pub fn push_constants(&self, data: &[u8]) -> &Self {
self.push_constants_offset(0, data)
}
#[profiling::function]
pub fn push_constants_offset(&self, offset: u32, data: &[u8]) -> &Self {
if let Some(push_const) = self.pipeline.push_constants {
let push_const_end = push_const.offset + push_const.size;
let data_end = offset + data.len() as u32;
let end = data_end.min(push_const_end);
let start = offset.max(push_const.offset);
if end > start {
trace!(
" push constants {:?} {}..{}",
push_const.stage_flags, start, end
);
unsafe {
self.device.cmd_push_constants(
self.cmd_buf,
self.pipeline.layout,
vk::ShaderStageFlags::COMPUTE,
push_const.offset,
&data[(start - offset) as usize..(end - offset) as usize],
);
}
}
}
self
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum Descriptor {
ArrayBinding(DescriptorSetIndex, BindingIndex, BindingOffset),
Binding(DescriptorSetIndex, BindingIndex),
}
impl Descriptor {
pub(super) fn into_tuple(self) -> (DescriptorSetIndex, BindingIndex, BindingOffset) {
match self {
Self::ArrayBinding(descriptor_set_idx, binding_idx, binding_offset) => {
(descriptor_set_idx, binding_idx, binding_offset)
}
Self::Binding(descriptor_set_idx, binding_idx) => (descriptor_set_idx, binding_idx, 0),
}
}
pub(super) fn set(self) -> DescriptorSetIndex {
let (res, _, _) = self.into_tuple();
res
}
}
impl From<BindingIndex> for Descriptor {
fn from(val: BindingIndex) -> Self {
Self::Binding(0, val)
}
}
impl From<(DescriptorSetIndex, BindingIndex)> for Descriptor {
fn from(tuple: (DescriptorSetIndex, BindingIndex)) -> Self {
Self::Binding(tuple.0, tuple.1)
}
}
impl From<(BindingIndex, [BindingOffset; 1])> for Descriptor {
fn from(tuple: (BindingIndex, [BindingOffset; 1])) -> Self {
Self::ArrayBinding(0, tuple.0, tuple.1[0])
}
}
impl From<(DescriptorSetIndex, BindingIndex, [BindingOffset; 1])> for Descriptor {
fn from(tuple: (DescriptorSetIndex, BindingIndex, [BindingOffset; 1])) -> Self {
Self::ArrayBinding(tuple.0, tuple.1, tuple.2[0])
}
}
pub struct Draw<'a> {
bindings: Bindings<'a>,
cmd_buf: vk::CommandBuffer,
device: &'a Device,
pipeline: Arc<GraphicPipeline>,
}
impl Draw<'_> {
pub fn bind_index_buffer(
&self,
buffer: impl Into<AnyBufferNode>,
index_ty: vk::IndexType,
) -> &Self {
self.bind_index_buffer_offset(buffer, index_ty, 0)
}
#[profiling::function]
pub fn bind_index_buffer_offset(
&self,
buffer: impl Into<AnyBufferNode>,
index_ty: vk::IndexType,
offset: vk::DeviceSize,
) -> &Self {
let buffer = buffer.into();
unsafe {
self.device.cmd_bind_index_buffer(
self.cmd_buf,
*self.bindings[buffer],
offset,
index_ty,
);
}
self
}
pub fn bind_vertex_buffer(&self, buffer: impl Into<AnyBufferNode>) -> &Self {
self.bind_vertex_buffer_offset(buffer, 0)
}
#[profiling::function]
pub fn bind_vertex_buffer_offset(
&self,
buffer: impl Into<AnyBufferNode>,
offset: vk::DeviceSize,
) -> &Self {
use std::slice::from_ref;
let buffer = buffer.into();
unsafe {
self.device.cmd_bind_vertex_buffers(
self.cmd_buf,
0,
from_ref(&self.bindings[buffer]),
from_ref(&offset),
);
}
self
}
#[profiling::function]
pub fn bind_vertex_buffers<B>(
&self,
first_binding: u32,
buffer_offsets: impl IntoIterator<Item = (B, vk::DeviceSize)>,
) -> &Self
where
B: Into<AnyBufferNode>,
{
thread_local! {
static BUFFERS_OFFSETS: RefCell<(Vec<vk::Buffer>, Vec<vk::DeviceSize>)> = Default::default();
}
BUFFERS_OFFSETS.with_borrow_mut(|(buffers, offsets)| {
buffers.clear();
offsets.clear();
for (buffer, offset) in buffer_offsets {
let buffer = buffer.into();
buffers.push(*self.bindings[buffer]);
offsets.push(offset);
}
unsafe {
self.device.cmd_bind_vertex_buffers(
self.cmd_buf,
first_binding,
buffers.as_slice(),
offsets.as_slice(),
);
}
});
self
}
#[profiling::function]
pub fn draw(
&self,
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
) -> &Self {
unsafe {
self.device.cmd_draw(
self.cmd_buf,
vertex_count,
instance_count,
first_vertex,
first_instance,
);
}
self
}
#[profiling::function]
pub fn draw_indexed(
&self,
index_count: u32,
instance_count: u32,
first_index: u32,
vertex_offset: i32,
first_instance: u32,
) -> &Self {
unsafe {
self.device.cmd_draw_indexed(
self.cmd_buf,
index_count,
instance_count,
first_index,
vertex_offset,
first_instance,
);
}
self
}
#[profiling::function]
pub fn draw_indexed_indirect(
&self,
buffer: impl Into<AnyBufferNode>,
offset: vk::DeviceSize,
draw_count: u32,
stride: u32,
) -> &Self {
let buffer = buffer.into();
unsafe {
self.device.cmd_draw_indexed_indirect(
self.cmd_buf,
*self.bindings[buffer],
offset,
draw_count,
stride,
);
}
self
}
#[profiling::function]
pub fn draw_indexed_indirect_count(
&self,
buffer: impl Into<AnyBufferNode>,
offset: vk::DeviceSize,
count_buf: impl Into<AnyBufferNode>,
count_buf_offset: vk::DeviceSize,
max_draw_count: u32,
stride: u32,
) -> &Self {
let buffer = buffer.into();
let count_buf = count_buf.into();
unsafe {
self.device.cmd_draw_indexed_indirect_count(
self.cmd_buf,
*self.bindings[buffer],
offset,
*self.bindings[count_buf],
count_buf_offset,
max_draw_count,
stride,
);
}
self
}
#[profiling::function]
pub fn draw_indirect(
&self,
buffer: impl Into<AnyBufferNode>,
offset: vk::DeviceSize,
draw_count: u32,
stride: u32,
) -> &Self {
let buffer = buffer.into();
unsafe {
self.device.cmd_draw_indirect(
self.cmd_buf,
*self.bindings[buffer],
offset,
draw_count,
stride,
);
}
self
}
#[profiling::function]
pub fn draw_indirect_count(
&self,
buffer: impl Into<AnyBufferNode>,
offset: vk::DeviceSize,
count_buf: impl Into<AnyBufferNode>,
count_buf_offset: vk::DeviceSize,
max_draw_count: u32,
stride: u32,
) -> &Self {
let buffer = buffer.into();
let count_buf = count_buf.into();
unsafe {
self.device.cmd_draw_indirect_count(
self.cmd_buf,
*self.bindings[buffer],
offset,
*self.bindings[count_buf],
count_buf_offset,
max_draw_count,
stride,
);
}
self
}
pub fn push_constants(&self, data: &[u8]) -> &Self {
self.push_constants_offset(0, data)
}
#[profiling::function]
pub fn push_constants_offset(&self, offset: u32, data: &[u8]) -> &Self {
for push_const in self.pipeline.push_constants.iter() {
let push_const_end = push_const.offset + push_const.size;
let data_end = offset + data.len() as u32;
let end = data_end.min(push_const_end);
let start = offset.max(push_const.offset);
if end > start {
trace!(
" push constants {:?} {}..{}",
push_const.stage_flags, start, end
);
unsafe {
self.device.cmd_push_constants(
self.cmd_buf,
self.pipeline.layout,
push_const.stage_flags,
start,
&data[(start - offset) as usize..(end - offset) as usize],
);
}
}
}
self
}
#[profiling::function]
pub fn set_scissor(&self, x: i32, y: i32, width: u32, height: u32) -> &Self {
unsafe {
self.device.cmd_set_scissor(
self.cmd_buf,
0,
&[vk::Rect2D {
extent: vk::Extent2D { width, height },
offset: vk::Offset2D { x, y },
}],
);
}
self
}
#[profiling::function]
pub fn set_scissors<S>(
&self,
first_scissor: u32,
scissors: impl IntoIterator<Item = S>,
) -> &Self
where
S: Into<vk::Rect2D>,
{
thread_local! {
static SCISSORS: RefCell<Vec<vk::Rect2D>> = Default::default();
}
SCISSORS.with_borrow_mut(|scissors_vec| {
scissors_vec.clear();
for scissor in scissors {
scissors_vec.push(scissor.into());
}
unsafe {
self.device
.cmd_set_scissor(self.cmd_buf, first_scissor, scissors_vec.as_slice());
}
});
self
}
#[profiling::function]
pub fn set_viewport(
&self,
x: f32,
y: f32,
width: f32,
height: f32,
depth: Range<f32>,
) -> &Self {
unsafe {
self.device.cmd_set_viewport(
self.cmd_buf,
0,
&[vk::Viewport {
x,
y,
width,
height,
min_depth: depth.start,
max_depth: depth.end,
}],
);
}
self
}
#[profiling::function]
pub fn set_viewports<V>(
&self,
first_viewport: u32,
viewports: impl IntoIterator<Item = V>,
) -> &Self
where
V: Into<vk::Viewport>,
{
thread_local! {
static VIEWPORTS: RefCell<Vec<vk::Viewport>> = Default::default();
}
VIEWPORTS.with_borrow_mut(|viewports_vec| {
viewports_vec.clear();
for viewport in viewports {
viewports_vec.push(viewport.into());
}
unsafe {
self.device.cmd_set_viewport(
self.cmd_buf,
first_viewport,
viewports_vec.as_slice(),
);
}
});
self
}
}
pub struct PassRef<'a> {
pub(super) exec_idx: usize,
pub(super) graph: &'a mut RenderGraph,
pub(super) pass_idx: usize,
}
impl<'a> PassRef<'a> {
pub(super) fn new(graph: &'a mut RenderGraph, name: String) -> PassRef<'a> {
let pass_idx = graph.passes.len();
graph.passes.push(Pass {
execs: vec![Default::default()], name,
});
Self {
exec_idx: 0,
graph,
pass_idx,
}
}
pub fn access_node(mut self, node: impl Node + Information, access: AccessType) -> Self {
self.access_node_mut(node, access);
self
}
pub fn access_node_mut(&mut self, node: impl Node + Information, access: AccessType) {
self.assert_bound_graph_node(node);
let idx = node.index();
let binding = &self.graph.bindings[idx];
let node_access_range = if let Some(buf) = binding.as_driver_buffer() {
Subresource::Buffer((0..buf.info.size).into())
} else if let Some(image) = binding.as_driver_image() {
Subresource::Image(image.info.default_view_info().into())
} else {
Subresource::AccelerationStructure
};
self.push_node_access(node, access, node_access_range);
}
pub fn access_node_subrange<N>(
mut self,
node: N,
access: AccessType,
subresource: impl Into<N::Subresource>,
) -> Self
where
N: View,
{
self.access_node_subrange_mut(node, access, subresource);
self
}
pub fn access_node_subrange_mut<N>(
&mut self,
node: N,
access: AccessType,
subresource: impl Into<N::Subresource>,
) where
N: View,
{
self.push_node_access(node, access, subresource.into().into());
}
fn as_mut(&mut self) -> &mut Pass {
&mut self.graph.passes[self.pass_idx]
}
fn as_ref(&self) -> &Pass {
&self.graph.passes[self.pass_idx]
}
fn assert_bound_graph_node(&self, node: impl Node) {
let idx = node.index();
assert!(self.graph.bindings[idx].is_bound());
}
pub fn bind_node<'b, B>(&'b mut self, binding: B) -> <B as Edge<RenderGraph>>::Result
where
B: Edge<RenderGraph>,
B: Bind<&'b mut RenderGraph, <B as Edge<RenderGraph>>::Result>,
{
self.graph.bind_node(binding)
}
pub fn bind_pipeline<B>(self, binding: B) -> <B as Edge<Self>>::Result
where
B: Edge<Self>,
B: Bind<Self, <B as Edge<Self>>::Result>,
{
binding.bind(self)
}
pub fn node_info<N>(&self, node: N) -> <N as Information>::Info
where
N: Information,
{
node.get(self.graph)
}
fn push_execute(
&mut self,
func: impl FnOnce(&Device, vk::CommandBuffer, Bindings<'_>) + Send + 'static,
) {
let pass = self.as_mut();
let exec = {
let last_exec = pass.execs.last_mut().unwrap();
last_exec.func = Some(ExecutionFunction(Box::new(func)));
Execution {
pipeline: last_exec.pipeline.clone(),
..Default::default()
}
};
pass.execs.push(exec);
self.exec_idx += 1;
}
fn push_node_access(&mut self, node: impl Node, access: AccessType, subresource: Subresource) {
let node_idx = node.index();
self.assert_bound_graph_node(node);
let access = SubresourceAccess {
access,
subresource,
};
self.as_mut()
.execs
.last_mut()
.unwrap()
.accesses
.entry(node_idx)
.and_modify(|accesses| accesses.push(access))
.or_insert(vec![access]);
}
pub fn read_node(mut self, node: impl Node + Information) -> Self {
self.read_node_mut(node);
self
}
pub fn read_node_mut(&mut self, node: impl Node + Information) {
self.access_node_mut(
node,
AccessType::AnyShaderReadSampledImageOrUniformTexelBuffer,
);
}
pub fn record_acceleration(
mut self,
func: impl FnOnce(Acceleration<'_>, Bindings<'_>) + Send + 'static,
) -> Self {
self.push_execute(move |device, cmd_buf, bindings| {
func(
Acceleration {
bindings,
cmd_buf,
device,
},
bindings,
);
});
self
}
pub fn record_cmd_buf(
mut self,
func: impl FnOnce(&Device, vk::CommandBuffer, Bindings<'_>) + Send + 'static,
) -> Self {
self.push_execute(func);
self
}
pub fn submit_pass(self) -> &'a mut RenderGraph {
if self.exec_idx == 0 {
self.graph.passes.pop();
}
self.graph
}
pub fn write_node(mut self, node: impl Node + Information) -> Self {
self.write_node_mut(node);
self
}
pub fn write_node_mut(&mut self, node: impl Node + Information) {
self.access_node_mut(node, AccessType::AnyShaderWrite);
}
}
pub struct PipelinePassRef<'a, T>
where
T: Access,
{
__: PhantomData<T>,
pass: PassRef<'a>,
}
impl<'a, T> PipelinePassRef<'a, T>
where
T: Access,
{
pub fn access_descriptor<N>(
self,
descriptor: impl Into<Descriptor>,
node: N,
access: AccessType,
) -> Self
where
N: Information,
N: View,
ViewType: From<<N as View>::Information>,
<N as View>::Information: From<<N as Information>::Info>,
<N as View>::Subresource: From<<N as View>::Information>,
{
let view_info = node.get(self.pass.graph);
self.access_descriptor_as(descriptor, node, access, view_info)
}
pub fn access_descriptor_as<N>(
self,
descriptor: impl Into<Descriptor>,
node: N,
access: AccessType,
view_info: impl Into<N::Information>,
) -> Self
where
N: View,
<N as View>::Information: Into<ViewType>,
<N as View>::Subresource: From<<N as View>::Information>,
{
let view_info = view_info.into();
let subresource = <N as View>::Subresource::from(view_info);
self.access_descriptor_subrange(descriptor, node, access, view_info, subresource)
}
pub fn access_descriptor_subrange<N>(
mut self,
descriptor: impl Into<Descriptor>,
node: N,
access: AccessType,
view_info: impl Into<N::Information>,
subresource: impl Into<N::Subresource>,
) -> Self
where
N: View,
<N as View>::Information: Into<ViewType>,
{
self.pass
.push_node_access(node, access, subresource.into().into());
self.push_node_view_bind(node, view_info.into(), descriptor.into());
self
}
pub fn access_node(mut self, node: impl Node + Information, access: AccessType) -> Self {
self.access_node_mut(node, access);
self
}
pub fn access_node_mut(&mut self, node: impl Node + Information, access: AccessType) {
self.pass.assert_bound_graph_node(node);
let idx = node.index();
let binding = &self.pass.graph.bindings[idx];
let node_access_range = if let Some(buf) = binding.as_driver_buffer() {
Subresource::Buffer((0..buf.info.size).into())
} else if let Some(image) = binding.as_driver_image() {
Subresource::Image(image.info.default_view_info().into())
} else {
Subresource::AccelerationStructure
};
self.pass.push_node_access(node, access, node_access_range);
}
pub fn access_node_subrange<N>(
mut self,
node: N,
access: AccessType,
subresource: impl Into<N::Subresource>,
) -> Self
where
N: View,
{
self.access_node_subrange_mut(node, access, subresource);
self
}
pub fn access_node_subrange_mut<N>(
&mut self,
node: N,
access: AccessType,
subresource: impl Into<N::Subresource>,
) where
N: View,
{
self.pass
.push_node_access(node, access, subresource.into().into());
}
pub fn bind_node<'b, B>(&'b mut self, binding: B) -> <B as Edge<RenderGraph>>::Result
where
B: Edge<RenderGraph>,
B: Bind<&'b mut RenderGraph, <B as Edge<RenderGraph>>::Result>,
{
self.pass.graph.bind_node(binding)
}
pub fn node_info<N>(&self, node: N) -> <N as Information>::Info
where
N: Information,
{
node.get(self.pass.graph)
}
fn push_node_view_bind(
&mut self,
node: impl Node,
view_info: impl Into<ViewType>,
binding: Descriptor,
) {
let node_idx = node.index();
self.pass.assert_bound_graph_node(node);
assert!(
self.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.bindings
.insert(binding, (node_idx, Some(view_info.into())))
.is_none(),
"descriptor {binding:?} has already been bound"
);
}
pub fn read_descriptor<N>(self, descriptor: impl Into<Descriptor>, node: N) -> Self
where
N: Information,
N: View,
ViewType: From<<N as View>::Information>,
<N as View>::Information: From<<N as Information>::Info>,
<N as View>::Subresource: From<<N as View>::Information>,
{
let view_info = node.get(self.pass.graph);
self.read_descriptor_as(descriptor, node, view_info)
}
pub fn read_descriptor_as<N>(
self,
descriptor: impl Into<Descriptor>,
node: N,
view_info: impl Into<N::Information>,
) -> Self
where
N: View,
<N as View>::Information: Into<ViewType>,
<N as View>::Subresource: From<<N as View>::Information>,
{
let view_info = view_info.into();
let subresource = <N as View>::Subresource::from(view_info);
self.read_descriptor_subrange(descriptor, node, view_info, subresource)
}
pub fn read_descriptor_subrange<N>(
self,
descriptor: impl Into<Descriptor>,
node: N,
view_info: impl Into<N::Information>,
subresource: impl Into<N::Subresource>,
) -> Self
where
N: View,
<N as View>::Information: Into<ViewType>,
{
let access = <T as Access>::DEFAULT_READ;
self.access_descriptor_subrange(descriptor, node, access, view_info, subresource)
}
pub fn read_node(mut self, node: impl Node + Information) -> Self {
self.read_node_mut(node);
self
}
pub fn read_node_mut(&mut self, node: impl Node + Information) {
let access = <T as Access>::DEFAULT_READ;
self.access_node_mut(node, access);
}
pub fn read_node_subrange<N>(mut self, node: N, subresource: impl Into<N::Subresource>) -> Self
where
N: View,
{
self.read_node_subrange_mut(node, subresource);
self
}
pub fn read_node_subrange_mut<N>(&mut self, node: N, subresource: impl Into<N::Subresource>)
where
N: View,
{
let access = <T as Access>::DEFAULT_READ;
self.access_node_subrange_mut(node, access, subresource);
}
pub fn submit_pass(self) -> &'a mut RenderGraph {
self.pass.submit_pass()
}
pub fn write_descriptor<N>(self, descriptor: impl Into<Descriptor>, node: N) -> Self
where
N: Information,
N: View,
<N as View>::Information: Into<ViewType>,
<N as View>::Information: From<<N as Information>::Info>,
<N as View>::Subresource: From<<N as View>::Information>,
{
let view_info = node.get(self.pass.graph);
self.write_descriptor_as(descriptor, node, view_info)
}
pub fn write_descriptor_as<N>(
self,
descriptor: impl Into<Descriptor>,
node: N,
view_info: impl Into<N::Information>,
) -> Self
where
N: View,
<N as View>::Information: Into<ViewType>,
<N as View>::Subresource: From<<N as View>::Information>,
{
let view_info = view_info.into();
let subresource = <N as View>::Subresource::from(view_info);
self.write_descriptor_subrange(descriptor, node, view_info, subresource)
}
pub fn write_descriptor_subrange<N>(
self,
descriptor: impl Into<Descriptor>,
node: N,
view_info: impl Into<N::Information>,
subresource: impl Into<N::Subresource>,
) -> Self
where
N: View,
<N as View>::Information: Into<ViewType>,
{
let access = <T as Access>::DEFAULT_WRITE;
self.access_descriptor_subrange(descriptor, node, access, view_info, subresource)
}
pub fn write_node(mut self, node: impl Node + Information) -> Self {
self.write_node_mut(node);
self
}
pub fn write_node_mut(&mut self, node: impl Node + Information) {
let access = <T as Access>::DEFAULT_WRITE;
self.access_node_mut(node, access);
}
pub fn write_node_subrange<N>(mut self, node: N, subresource: impl Into<N::Subresource>) -> Self
where
N: View,
{
self.write_node_subrange_mut(node, subresource);
self
}
pub fn write_node_subrange_mut<N>(&mut self, node: N, subresource: impl Into<N::Subresource>)
where
N: View,
{
let access = <T as Access>::DEFAULT_WRITE;
self.access_node_subrange_mut(node, access, subresource);
}
}
impl PipelinePassRef<'_, ComputePipeline> {
pub fn record_compute(
mut self,
func: impl FnOnce(Compute<'_>, Bindings<'_>) + Send + 'static,
) -> Self {
let pipeline = Arc::clone(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.pipeline
.as_ref()
.unwrap()
.unwrap_compute(),
);
self.pass.push_execute(move |device, cmd_buf, bindings| {
func(
Compute {
bindings,
cmd_buf,
device,
pipeline,
},
bindings,
);
});
self
}
}
impl PipelinePassRef<'_, GraphicPipeline> {
pub fn attach_color(
self,
attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
) -> Self {
let image: AnyImageNode = image.into();
let image_info = image.get(self.pass.graph);
let image_view_info: ImageViewInfo = image_info.into();
self.attach_color_as(attachment_idx, image, image_view_info)
}
pub fn attach_color_as(
mut self,
attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
image_view_info: impl Into<ImageViewInfo>,
) -> Self {
let image = image.into();
let image_view_info = image_view_info.into();
let node_idx = image.index();
let (_, sample_count) = self.image_info(node_idx);
debug_assert!(
!self
.pass
.as_ref()
.execs
.last()
.unwrap()
.color_clears
.contains_key(&attachment_idx),
"color attachment {attachment_idx} already attached via clear"
);
debug_assert!(
!self
.pass
.as_ref()
.execs
.last()
.unwrap()
.color_loads
.contains_key(&attachment_idx),
"color attachment {attachment_idx} already attached via load"
);
self.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.color_attachments
.insert(
attachment_idx,
Attachment::new(image_view_info, sample_count, node_idx),
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_resolves
.get(&attachment_idx)
.map(|(attachment, _)| *attachment),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_attachments
.get(&attachment_idx)
.copied()
),
"color attachment {attachment_idx} incompatible with existing resolve"
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_stores
.get(&attachment_idx)
.copied(),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_attachments
.get(&attachment_idx)
.copied()
),
"color attachment {attachment_idx} incompatible with existing store"
);
self.pass.push_node_access(
image,
AccessType::ColorAttachmentWrite,
Subresource::Image(image_view_info.into()),
);
self
}
pub fn attach_depth_stencil(self, image: impl Into<AnyImageNode>) -> Self {
let image: AnyImageNode = image.into();
let image_info = image.get(self.pass.graph);
let image_view_info: ImageViewInfo = image_info.into();
self.attach_depth_stencil_as(image, image_view_info)
}
pub fn attach_depth_stencil_as(
mut self,
image: impl Into<AnyImageNode>,
image_view_info: impl Into<ImageViewInfo>,
) -> Self {
let image = image.into();
let image_view_info = image_view_info.into();
let node_idx = image.index();
let (_, sample_count) = self.image_info(node_idx);
debug_assert!(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_clear
.is_none(),
"depth/stencil attachment already attached via clear"
);
debug_assert!(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_load
.is_none(),
"depth/stencil attachment already attached via load"
);
self.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.depth_stencil_attachment =
Some(Attachment::new(image_view_info, sample_count, node_idx));
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_resolve
.map(|(attachment, ..)| attachment),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_attachment
),
"depth/stencil attachment incompatible with existing resolve"
);
debug_assert!(
Attachment::are_compatible(
self.pass.as_ref().execs.last().unwrap().depth_stencil_store,
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_attachment
),
"depth/stencil attachment incompatible with existing store"
);
self.pass.push_node_access(
image,
if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH | vk::ImageAspectFlags::STENCIL)
{
AccessType::DepthStencilAttachmentWrite
} else if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH)
{
AccessType::DepthAttachmentWriteStencilReadOnly
} else {
AccessType::StencilAttachmentWriteDepthReadOnly
},
Subresource::Image(image_view_info.into()),
);
self
}
pub fn clear_color(
self,
attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
) -> Self {
self.clear_color_value(attachment_idx, image, [0.0, 0.0, 0.0, 0.0])
}
pub fn clear_color_value(
self,
attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
color: impl Into<ClearColorValue>,
) -> Self {
let image: AnyImageNode = image.into();
let image_info = image.get(self.pass.graph);
let image_view_info: ImageViewInfo = image_info.into();
self.clear_color_value_as(attachment_idx, image, color, image_view_info)
}
pub fn clear_color_value_as(
mut self,
attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
color: impl Into<ClearColorValue>,
image_view_info: impl Into<ImageViewInfo>,
) -> Self {
let image = image.into();
let image_view_info = image_view_info.into();
let node_idx = image.index();
let (_, sample_count) = self.image_info(node_idx);
let color = color.into();
debug_assert!(
!self
.pass
.as_ref()
.execs
.last()
.unwrap()
.color_attachments
.contains_key(&attachment_idx),
"color attachment {attachment_idx} already attached"
);
debug_assert!(
!self
.pass
.as_ref()
.execs
.last()
.unwrap()
.color_loads
.contains_key(&attachment_idx),
"color attachment {attachment_idx} already attached via load"
);
self.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.color_clears
.insert(
attachment_idx,
(
Attachment::new(image_view_info, sample_count, node_idx),
color,
),
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_resolves
.get(&attachment_idx)
.map(|(attachment, _)| *attachment),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_clears
.get(&attachment_idx)
.map(|(attachment, _)| *attachment)
),
"color attachment {attachment_idx} clear incompatible with existing resolve"
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_stores
.get(&attachment_idx)
.copied(),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_clears
.get(&attachment_idx)
.map(|(attachment, _)| *attachment)
),
"color attachment {attachment_idx} clear incompatible with existing store"
);
let mut image_access = AccessType::ColorAttachmentWrite;
let image_range = image_view_info.into();
if let Some(accesses) = self
.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.accesses
.get_mut(&node_idx)
{
for SubresourceAccess {
access,
subresource,
} in accesses
{
let access_image_range = *subresource.as_image().unwrap();
if !image_subresource_range_intersects(access_image_range, image_range) {
continue;
}
image_access = match *access {
AccessType::ColorAttachmentRead | AccessType::ColorAttachmentReadWrite => {
AccessType::ColorAttachmentReadWrite
}
AccessType::ColorAttachmentWrite => AccessType::ColorAttachmentWrite,
_ => continue,
};
*access = image_access;
if image_subresource_range_contains(access_image_range, image_range) {
return self;
}
}
}
self.pass
.push_node_access(image, image_access, Subresource::Image(image_range));
self
}
pub fn clear_depth_stencil(self, image: impl Into<AnyImageNode>) -> Self {
self.clear_depth_stencil_value(image, 1.0, 0)
}
pub fn clear_depth_stencil_value(
self,
image: impl Into<AnyImageNode>,
depth: f32,
stencil: u32,
) -> Self {
let image: AnyImageNode = image.into();
let image_info = image.get(self.pass.graph);
let image_view_info: ImageViewInfo = image_info.into();
self.clear_depth_stencil_value_as(image, depth, stencil, image_view_info)
}
pub fn clear_depth_stencil_value_as(
mut self,
image: impl Into<AnyImageNode>,
depth: f32,
stencil: u32,
image_view_info: impl Into<ImageViewInfo>,
) -> Self {
let image = image.into();
let image_view_info = image_view_info.into();
let node_idx = image.index();
let (_, sample_count) = self.image_info(node_idx);
debug_assert!(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_attachment
.is_none(),
"depth/stencil attachment already attached"
);
debug_assert!(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_load
.is_none(),
"depth/stencil attachment already attached via load"
);
self.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.depth_stencil_clear = Some((
Attachment::new(image_view_info, sample_count, node_idx),
vk::ClearDepthStencilValue { depth, stencil },
));
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_resolve
.map(|(attachment, ..)| attachment),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_clear
.map(|(attachment, _)| attachment)
),
"depth/stencil attachment clear incompatible with existing resolve"
);
debug_assert!(
Attachment::are_compatible(
self.pass.as_ref().execs.last().unwrap().depth_stencil_store,
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_clear
.map(|(attachment, _)| attachment)
),
"depth/stencil attachment clear incompatible with existing store"
);
let mut image_access = if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH | vk::ImageAspectFlags::STENCIL)
{
AccessType::DepthStencilAttachmentWrite
} else if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH)
{
AccessType::DepthAttachmentWriteStencilReadOnly
} else {
debug_assert!(
image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::STENCIL)
);
AccessType::StencilAttachmentWriteDepthReadOnly
};
let image_range = image_view_info.into();
if let Some(accesses) = self
.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.accesses
.get_mut(&node_idx)
{
for SubresourceAccess {
access,
subresource,
} in accesses
{
let access_image_range = *subresource.as_image().unwrap();
if !image_subresource_range_intersects(access_image_range, image_range) {
continue;
}
image_access = match *access {
AccessType::DepthAttachmentWriteStencilReadOnly => {
if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::STENCIL)
{
AccessType::DepthStencilAttachmentReadWrite
} else {
AccessType::DepthAttachmentWriteStencilReadOnly
}
}
AccessType::DepthStencilAttachmentRead => {
if !image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH)
{
AccessType::StencilAttachmentWriteDepthReadOnly
} else {
AccessType::DepthAttachmentWriteStencilReadOnly
}
}
AccessType::DepthStencilAttachmentWrite => {
AccessType::DepthStencilAttachmentWrite
}
AccessType::StencilAttachmentWriteDepthReadOnly => {
if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH)
{
AccessType::DepthStencilAttachmentReadWrite
} else {
AccessType::StencilAttachmentWriteDepthReadOnly
}
}
_ => continue,
};
*access = image_access;
if image_subresource_range_contains(access_image_range, image_range) {
return self;
}
}
}
self.pass
.push_node_access(image, image_access, Subresource::Image(image_range));
self
}
fn image_info(&self, node_idx: NodeIndex) -> (vk::Format, SampleCount) {
let image_info = self.pass.graph.bindings[node_idx]
.as_driver_image()
.unwrap()
.info;
(image_info.fmt, image_info.sample_count)
}
pub fn load_color(
self,
attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
) -> Self {
let image: AnyImageNode = image.into();
let image_info = image.get(self.pass.graph);
let image_view_info: ImageViewInfo = image_info.into();
self.load_color_as(attachment_idx, image, image_view_info)
}
pub fn load_color_as(
mut self,
attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
image_view_info: impl Into<ImageViewInfo>,
) -> Self {
let image = image.into();
let image_view_info = image_view_info.into();
let node_idx = image.index();
let (_, sample_count) = self.image_info(node_idx);
debug_assert!(
!self
.pass
.as_ref()
.execs
.last()
.unwrap()
.color_attachments
.contains_key(&attachment_idx),
"color attachment {attachment_idx} already attached"
);
debug_assert!(
!self
.pass
.as_ref()
.execs
.last()
.unwrap()
.color_clears
.contains_key(&attachment_idx),
"color attachment {attachment_idx} already attached via clear"
);
self.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.color_loads
.insert(
attachment_idx,
Attachment::new(image_view_info, sample_count, node_idx),
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_resolves
.get(&attachment_idx)
.map(|(attachment, _)| *attachment),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_loads
.get(&attachment_idx)
.copied()
),
"color attachment {attachment_idx} load incompatible with existing resolve"
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_stores
.get(&attachment_idx)
.copied(),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_loads
.get(&attachment_idx)
.copied()
),
"color attachment {attachment_idx} load incompatible with existing store"
);
let mut image_access = AccessType::ColorAttachmentRead;
let image_range = image_view_info.into();
if let Some(accesses) = self
.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.accesses
.get_mut(&node_idx)
{
for SubresourceAccess {
access,
subresource,
} in accesses
{
let access_image_range = *subresource.as_image().unwrap();
if !image_subresource_range_intersects(access_image_range, image_range) {
continue;
}
image_access = match *access {
AccessType::ColorAttachmentRead => AccessType::ColorAttachmentRead,
AccessType::ColorAttachmentReadWrite | AccessType::ColorAttachmentWrite => {
AccessType::ColorAttachmentReadWrite
}
_ => continue,
};
*access = image_access;
if image_subresource_range_contains(access_image_range, image_range) {
return self;
}
}
}
self.pass
.push_node_access(image, image_access, Subresource::Image(image_range));
self
}
pub fn load_depth_stencil(self, image: impl Into<AnyImageNode>) -> Self {
let image: AnyImageNode = image.into();
let image_info = image.get(self.pass.graph);
let image_view_info: ImageViewInfo = image_info.into();
self.load_depth_stencil_as(image, image_view_info)
}
pub fn load_depth_stencil_as(
mut self,
image: impl Into<AnyImageNode>,
image_view_info: impl Into<ImageViewInfo>,
) -> Self {
let image = image.into();
let image_view_info = image_view_info.into();
let node_idx = image.index();
let (_, sample_count) = self.image_info(node_idx);
debug_assert!(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_attachment
.is_none(),
"depth/stencil attachment already attached"
);
debug_assert!(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_clear
.is_none(),
"depth/stencil attachment already attached via clear"
);
self.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.depth_stencil_load = Some(Attachment::new(image_view_info, sample_count, node_idx));
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_resolve
.map(|(attachment, ..)| attachment),
self.pass.as_ref().execs.last().unwrap().depth_stencil_load
),
"depth/stencil attachment load incompatible with existing resolve"
);
debug_assert!(
Attachment::are_compatible(
self.pass.as_ref().execs.last().unwrap().depth_stencil_store,
self.pass.as_ref().execs.last().unwrap().depth_stencil_load
),
"depth/stencil attachment load incompatible with existing store"
);
let mut image_access = AccessType::DepthStencilAttachmentRead;
let image_range = image_view_info.into();
if let Some(accesses) = self
.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.accesses
.get_mut(&node_idx)
{
for SubresourceAccess {
access,
subresource,
} in accesses
{
let access_image_range = *subresource.as_image().unwrap();
if !image_subresource_range_intersects(access_image_range, image_range) {
continue;
}
image_access = match *access {
AccessType::DepthAttachmentWriteStencilReadOnly => {
AccessType::DepthAttachmentWriteStencilReadOnly
}
AccessType::DepthStencilAttachmentRead => {
AccessType::DepthStencilAttachmentRead
}
AccessType::DepthStencilAttachmentWrite => {
AccessType::DepthStencilAttachmentReadWrite
}
AccessType::StencilAttachmentWriteDepthReadOnly => {
AccessType::StencilAttachmentWriteDepthReadOnly
}
_ => continue,
};
*access = image_access;
if image_subresource_range_contains(access_image_range, image_range) {
return self;
}
}
}
self.pass
.push_node_access(image, image_access, Subresource::Image(image_range));
self
}
pub fn record_subpass(
mut self,
func: impl FnOnce(Draw<'_>, Bindings<'_>) + Send + 'static,
) -> Self {
let pipeline = Arc::clone(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.pipeline
.as_ref()
.unwrap()
.unwrap_graphic(),
);
self.pass.push_execute(move |device, cmd_buf, bindings| {
func(
Draw {
bindings,
cmd_buf,
device,
pipeline,
},
bindings,
);
});
self
}
pub fn resolve_color(
self,
src_attachment_idx: AttachmentIndex,
dst_attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
) -> Self {
let image: AnyImageNode = image.into();
let image_info = image.get(self.pass.graph);
let image_view_info: ImageViewInfo = image_info.into();
self.resolve_color_as(
src_attachment_idx,
dst_attachment_idx,
image,
image_view_info,
)
}
pub fn resolve_color_as(
mut self,
src_attachment_idx: AttachmentIndex,
dst_attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
image_view_info: impl Into<ImageViewInfo>,
) -> Self {
let image = image.into();
let image_view_info = image_view_info.into();
let node_idx = image.index();
let (_, sample_count) = self.image_info(node_idx);
self.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.color_resolves
.insert(
dst_attachment_idx,
(
Attachment::new(image_view_info, sample_count, node_idx),
src_attachment_idx,
),
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_attachments
.get(&dst_attachment_idx)
.copied(),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_resolves
.get(&dst_attachment_idx)
.map(|(attachment, _)| *attachment)
),
"color attachment {dst_attachment_idx} resolve incompatible with existing attachment"
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_clears
.get(&dst_attachment_idx)
.map(|(attachment, _)| *attachment),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_resolves
.get(&dst_attachment_idx)
.map(|(attachment, _)| *attachment)
),
"color attachment {dst_attachment_idx} resolve incompatible with existing clear"
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_loads
.get(&dst_attachment_idx)
.copied(),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_resolves
.get(&dst_attachment_idx)
.map(|(attachment, _)| *attachment)
),
"color attachment {dst_attachment_idx} resolve incompatible with existing load"
);
let mut image_access = AccessType::ColorAttachmentWrite;
let image_range = image_view_info.into();
if let Some(accesses) = self
.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.accesses
.get_mut(&node_idx)
{
for SubresourceAccess {
access,
subresource,
} in accesses
{
let access_image_range = *subresource.as_image().unwrap();
if !image_subresource_range_intersects(access_image_range, image_range) {
continue;
}
image_access = match *access {
AccessType::ColorAttachmentRead | AccessType::ColorAttachmentReadWrite => {
AccessType::ColorAttachmentReadWrite
}
AccessType::ColorAttachmentWrite => AccessType::ColorAttachmentWrite,
_ => continue,
};
*access = image_access;
if image_subresource_range_contains(access_image_range, image_range) {
return self;
}
}
}
self.pass
.push_node_access(image, image_access, Subresource::Image(image_range));
self
}
pub fn resolve_depth_stencil(
self,
dst_attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
depth_mode: Option<ResolveMode>,
stencil_mode: Option<ResolveMode>,
) -> Self {
let image: AnyImageNode = image.into();
let image_info = image.get(self.pass.graph);
let image_view_info: ImageViewInfo = image_info.into();
self.resolve_depth_stencil_as(
dst_attachment_idx,
image,
image_view_info,
depth_mode,
stencil_mode,
)
}
pub fn resolve_depth_stencil_as(
mut self,
dst_attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
image_view_info: impl Into<ImageViewInfo>,
depth_mode: Option<ResolveMode>,
stencil_mode: Option<ResolveMode>,
) -> Self {
let image = image.into();
let image_view_info = image_view_info.into();
let node_idx = image.index();
let (_, sample_count) = self.image_info(node_idx);
self.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.depth_stencil_resolve = Some((
Attachment::new(image_view_info, sample_count, node_idx),
dst_attachment_idx,
depth_mode,
stencil_mode,
));
let mut image_access = if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH | vk::ImageAspectFlags::STENCIL)
{
AccessType::DepthStencilAttachmentWrite
} else if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH)
{
AccessType::DepthAttachmentWriteStencilReadOnly
} else {
debug_assert!(
image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::STENCIL)
);
AccessType::StencilAttachmentWriteDepthReadOnly
};
let image_range = image_view_info.into();
if let Some(accesses) = self
.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.accesses
.get_mut(&node_idx)
{
for SubresourceAccess {
access,
subresource,
} in accesses
{
let access_image_range = *subresource.as_image().unwrap();
if !image_subresource_range_intersects(access_image_range, image_range) {
continue;
}
image_access = match *access {
AccessType::DepthAttachmentWriteStencilReadOnly => {
if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::STENCIL)
{
AccessType::DepthStencilAttachmentReadWrite
} else {
AccessType::DepthAttachmentWriteStencilReadOnly
}
}
AccessType::DepthStencilAttachmentRead => {
if !image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH)
{
AccessType::StencilAttachmentWriteDepthReadOnly
} else {
AccessType::DepthStencilAttachmentReadWrite
}
}
AccessType::DepthStencilAttachmentWrite => {
AccessType::DepthStencilAttachmentWrite
}
AccessType::StencilAttachmentWriteDepthReadOnly => {
if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH)
{
AccessType::DepthStencilAttachmentReadWrite
} else {
AccessType::StencilAttachmentWriteDepthReadOnly
}
}
_ => continue,
};
*access = image_access;
if image_subresource_range_contains(access_image_range, image_range) {
return self;
}
}
}
self.pass
.push_node_access(image, image_access, Subresource::Image(image_range));
self
}
pub fn set_depth_stencil(mut self, depth_stencil: DepthStencilMode) -> Self {
let pass = self.pass.as_mut();
let exec = pass.execs.last_mut().unwrap();
assert!(exec.depth_stencil.is_none());
exec.depth_stencil = Some(depth_stencil);
self
}
pub fn set_multiview(mut self, view_mask: u32, correlated_view_mask: u32) -> Self {
let pass = self.pass.as_mut();
let exec = pass.execs.last_mut().unwrap();
exec.correlated_view_mask = correlated_view_mask;
exec.view_mask = view_mask;
self
}
pub fn set_render_area(mut self, x: i32, y: i32, width: u32, height: u32) -> Self {
self.pass.as_mut().execs.last_mut().unwrap().render_area = Some(Area {
height,
width,
x,
y,
});
self
}
pub fn store_color(
self,
attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
) -> Self {
let image: AnyImageNode = image.into();
let image_info = image.get(self.pass.graph);
let image_view_info: ImageViewInfo = image_info.into();
self.store_color_as(attachment_idx, image, image_view_info)
}
pub fn store_color_as(
mut self,
attachment_idx: AttachmentIndex,
image: impl Into<AnyImageNode>,
image_view_info: impl Into<ImageViewInfo>,
) -> Self {
let image = image.into();
let image_view_info = image_view_info.into();
let node_idx = image.index();
let (_, sample_count) = self.image_info(node_idx);
self.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.color_stores
.insert(
attachment_idx,
Attachment::new(image_view_info, sample_count, node_idx),
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_attachments
.get(&attachment_idx)
.copied(),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_stores
.get(&attachment_idx)
.copied()
),
"color attachment {attachment_idx} store incompatible with existing attachment"
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_clears
.get(&attachment_idx)
.map(|(attachment, _)| *attachment),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_stores
.get(&attachment_idx)
.copied()
),
"color attachment {attachment_idx} store incompatible with existing clear"
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_loads
.get(&attachment_idx)
.copied(),
self.pass
.as_ref()
.execs
.last()
.unwrap()
.color_stores
.get(&attachment_idx)
.copied()
),
"color attachment {attachment_idx} store incompatible with existing load"
);
let mut image_access = AccessType::ColorAttachmentWrite;
let image_range = image_view_info.into();
if let Some(accesses) = self
.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.accesses
.get_mut(&node_idx)
{
for SubresourceAccess {
access,
subresource,
} in accesses
{
let access_image_range = *subresource.as_image().unwrap();
if !image_subresource_range_intersects(access_image_range, image_range) {
continue;
}
image_access = match *access {
AccessType::ColorAttachmentRead | AccessType::ColorAttachmentReadWrite => {
AccessType::ColorAttachmentReadWrite
}
AccessType::ColorAttachmentWrite => AccessType::ColorAttachmentWrite,
_ => continue,
};
*access = image_access;
if image_subresource_range_contains(access_image_range, image_range) {
return self;
}
}
}
self.pass
.push_node_access(image, image_access, Subresource::Image(image_range));
self
}
pub fn store_depth_stencil(self, image: impl Into<AnyImageNode>) -> Self {
let image: AnyImageNode = image.into();
let image_info = image.get(self.pass.graph);
let image_view_info: ImageViewInfo = image_info.into();
self.store_depth_stencil_as(image, image_view_info)
}
pub fn store_depth_stencil_as(
mut self,
image: impl Into<AnyImageNode>,
image_view_info: impl Into<ImageViewInfo>,
) -> Self {
let image = image.into();
let image_view_info = image_view_info.into();
let node_idx = image.index();
let (_, sample_count) = self.image_info(node_idx);
self.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.depth_stencil_store = Some(Attachment::new(image_view_info, sample_count, node_idx));
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_attachment,
self.pass.as_ref().execs.last().unwrap().depth_stencil_store
),
"depth/stencil attachment store incompatible with existing attachment"
);
debug_assert!(
Attachment::are_compatible(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.depth_stencil_clear
.map(|(attachment, _)| attachment),
self.pass.as_ref().execs.last().unwrap().depth_stencil_store
),
"depth/stencil attachment store incompatible with existing clear"
);
debug_assert!(
Attachment::are_compatible(
self.pass.as_ref().execs.last().unwrap().depth_stencil_load,
self.pass.as_ref().execs.last().unwrap().depth_stencil_store
),
"depth/stencil attachment store incompatible with existing load"
);
let mut image_access = if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH | vk::ImageAspectFlags::STENCIL)
{
AccessType::DepthStencilAttachmentWrite
} else if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH)
{
AccessType::DepthAttachmentWriteStencilReadOnly
} else {
debug_assert!(
image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::STENCIL)
);
AccessType::StencilAttachmentWriteDepthReadOnly
};
let image_range = image_view_info.into();
if let Some(accesses) = self
.pass
.as_mut()
.execs
.last_mut()
.unwrap()
.accesses
.get_mut(&node_idx)
{
for SubresourceAccess {
access,
subresource,
} in accesses
{
let access_image_range = *subresource.as_image().unwrap();
if !image_subresource_range_intersects(access_image_range, image_range) {
continue;
}
image_access = match *access {
AccessType::DepthAttachmentWriteStencilReadOnly => {
if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::STENCIL)
{
AccessType::DepthStencilAttachmentReadWrite
} else {
AccessType::DepthAttachmentWriteStencilReadOnly
}
}
AccessType::DepthStencilAttachmentRead => {
if !image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH)
{
AccessType::StencilAttachmentWriteDepthReadOnly
} else {
AccessType::DepthStencilAttachmentReadWrite
}
}
AccessType::DepthStencilAttachmentWrite => {
AccessType::DepthStencilAttachmentWrite
}
AccessType::StencilAttachmentWriteDepthReadOnly => {
if image_view_info
.aspect_mask
.contains(vk::ImageAspectFlags::DEPTH)
{
AccessType::DepthStencilAttachmentReadWrite
} else {
AccessType::StencilAttachmentWriteDepthReadOnly
}
}
_ => continue,
};
*access = image_access;
if image_subresource_range_contains(access_image_range, image_range) {
return self;
}
}
}
self.pass
.push_node_access(image, image_access, Subresource::Image(image_range));
self
}
}
impl PipelinePassRef<'_, RayTracePipeline> {
pub fn record_ray_trace(
mut self,
func: impl FnOnce(RayTrace<'_>, Bindings<'_>) + Send + 'static,
) -> Self {
let pipeline = Arc::clone(
self.pass
.as_ref()
.execs
.last()
.unwrap()
.pipeline
.as_ref()
.unwrap()
.unwrap_ray_trace(),
);
#[cfg(debug_assertions)]
let dynamic_stack_size = pipeline.info.dynamic_stack_size;
self.pass.push_execute(move |device, cmd_buf, bindings| {
func(
RayTrace {
cmd_buf,
device,
#[cfg(debug_assertions)]
dynamic_stack_size,
pipeline,
},
bindings,
);
});
self
}
}
pub struct RayTrace<'a> {
cmd_buf: vk::CommandBuffer,
device: &'a Device,
#[cfg(debug_assertions)]
dynamic_stack_size: bool,
pipeline: Arc<RayTracePipeline>,
}
impl RayTrace<'_> {
pub fn push_constants(&self, data: &[u8]) -> &Self {
self.push_constants_offset(0, data)
}
#[profiling::function]
pub fn push_constants_offset(&self, offset: u32, data: &[u8]) -> &Self {
for push_const in self.pipeline.push_constants.iter() {
let push_const_end = push_const.offset + push_const.size;
let data_end = offset + data.len() as u32;
let end = data_end.min(push_const_end);
let start = offset.max(push_const.offset);
if end > start {
trace!(
" push constants {:?} {}..{}",
push_const.stage_flags, start, end
);
unsafe {
self.device.cmd_push_constants(
self.cmd_buf,
self.pipeline.layout,
push_const.stage_flags,
start,
&data[(start - offset) as usize..(end - offset) as usize],
);
}
}
}
self
}
#[profiling::function]
pub fn set_stack_size(&self, pipeline_stack_size: u32) -> &Self {
#[cfg(debug_assertions)]
assert!(self.dynamic_stack_size);
unsafe {
self.device
.ray_trace_ext
.as_ref()
.unwrap_unchecked()
.cmd_set_ray_tracing_pipeline_stack_size(self.cmd_buf, pipeline_stack_size);
}
self
}
#[allow(clippy::too_many_arguments)]
#[profiling::function]
pub fn trace_rays(
&self,
raygen_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
miss_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
hit_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
callable_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
width: u32,
height: u32,
depth: u32,
) -> &Self {
unsafe {
self.device
.ray_trace_ext
.as_ref()
.unwrap_unchecked()
.cmd_trace_rays(
self.cmd_buf,
raygen_shader_binding_table,
miss_shader_binding_table,
hit_shader_binding_table,
callable_shader_binding_table,
width,
height,
depth,
);
}
self
}
#[profiling::function]
pub fn trace_rays_indirect(
&self,
raygen_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
miss_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
hit_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
callable_shader_binding_table: &vk::StridedDeviceAddressRegionKHR,
indirect_device_address: vk::DeviceAddress,
) -> &Self {
unsafe {
self.device
.ray_trace_ext
.as_ref()
.unwrap_unchecked()
.cmd_trace_rays_indirect(
self.cmd_buf,
raygen_shader_binding_table,
miss_shader_binding_table,
hit_shader_binding_table,
callable_shader_binding_table,
indirect_device_address,
)
}
self
}
}
#[derive(Clone, Copy, Debug)]
pub enum Subresource {
AccelerationStructure,
Image(vk::ImageSubresourceRange),
Buffer(BufferSubresourceRange),
}
impl Subresource {
pub(super) fn as_image(&self) -> Option<&vk::ImageSubresourceRange> {
if let Self::Image(subresource) = self {
Some(subresource)
} else {
None
}
}
}
impl From<()> for Subresource {
fn from(_: ()) -> Self {
Self::AccelerationStructure
}
}
impl From<vk::ImageSubresourceRange> for Subresource {
fn from(subresource: vk::ImageSubresourceRange) -> Self {
Self::Image(subresource)
}
}
impl From<BufferSubresourceRange> for Subresource {
fn from(subresource: BufferSubresourceRange) -> Self {
Self::Buffer(subresource)
}
}
#[derive(Clone, Copy, Debug)]
pub(super) struct SubresourceAccess {
pub access: AccessType,
pub subresource: Subresource,
}
pub trait View: Node
where
Self::Information: Copy,
Self::Subresource: Into<Subresource>,
{
type Information;
type Subresource;
}
impl View for AccelerationStructureNode {
type Information = ();
type Subresource = ();
}
impl View for AccelerationStructureLeaseNode {
type Information = ();
type Subresource = ();
}
impl View for AnyAccelerationStructureNode {
type Information = ();
type Subresource = ();
}
impl View for AnyBufferNode {
type Information = BufferSubresourceRange;
type Subresource = BufferSubresourceRange;
}
impl View for AnyImageNode {
type Information = ImageViewInfo;
type Subresource = vk::ImageSubresourceRange;
}
impl View for BufferLeaseNode {
type Information = BufferSubresourceRange;
type Subresource = BufferSubresourceRange;
}
impl View for BufferNode {
type Information = BufferSubresourceRange;
type Subresource = BufferSubresourceRange;
}
impl View for ImageLeaseNode {
type Information = ImageViewInfo;
type Subresource = vk::ImageSubresourceRange;
}
impl View for ImageNode {
type Information = ImageViewInfo;
type Subresource = vk::ImageSubresourceRange;
}
impl View for SwapchainImageNode {
type Information = ImageViewInfo;
type Subresource = vk::ImageSubresourceRange;
}
#[derive(Debug)]
pub enum ViewType {
AccelerationStructure,
Image(ImageViewInfo),
Buffer(Range<vk::DeviceSize>),
}
impl ViewType {
pub(super) fn as_buffer(&self) -> Option<&Range<vk::DeviceSize>> {
match self {
Self::Buffer(view_info) => Some(view_info),
_ => None,
}
}
pub(super) fn as_image(&self) -> Option<&ImageViewInfo> {
match self {
Self::Image(view_info) => Some(view_info),
_ => None,
}
}
}
impl From<()> for ViewType {
fn from(_: ()) -> Self {
Self::AccelerationStructure
}
}
impl From<BufferSubresourceRange> for ViewType {
fn from(subresource: BufferSubresourceRange) -> Self {
Self::Buffer(subresource.start..subresource.end)
}
}
impl From<ImageViewInfo> for ViewType {
fn from(info: ImageViewInfo) -> Self {
Self::Image(info)
}
}
impl From<Range<vk::DeviceSize>> for ViewType {
fn from(range: Range<vk::DeviceSize>) -> Self {
Self::Buffer(range)
}
}