use super::super::pass::MeshPass;
use super::super::types::{
CLUSTER_GRID_X, CLUSTER_GRID_Y, CLUSTER_GRID_Z, DrawIndexedIndirect, LightGrid, TOTAL_CLUSTERS,
};
use crate::render::wgpu::rendergraph::PassExecutionContext;
impl MeshPass {
pub(in super::super) fn execute_pass_node<'r, 'e>(
&mut self,
context: PassExecutionContext<'r, 'e, crate::ecs::world::World>,
) -> crate::render::wgpu::rendergraph::Result<
Vec<crate::render::wgpu::rendergraph::SubGraphRunCommand<'r>>,
> {
let (shadow_depth_view, _, _) = context.get_depth_attachment("shadow_depth")?;
let (spotlight_shadow_atlas_view, _, _) =
context.get_depth_attachment("spotlight_shadow_atlas")?;
let (color_view, color_load, color_store) = context.get_color_attachment("color")?;
let (depth_view, depth_load, _depth_store) = context.get_depth_attachment("depth")?;
let (entity_id_view, entity_id_load, _entity_id_store) =
context.get_color_attachment("entity_id")?;
let (view_normals_view, view_normals_load, _view_normals_store) =
context.get_color_attachment("view_normals")?;
let workgroup_size = 256;
let dispatch_size = self.state().object_count.div_ceil(workgroup_size);
if self.state().hierarchy_dirty && self.state().max_hierarchy_depth > 0 {
self.dispatch_transform_compute(
context.encoder,
context.queue,
self.state().object_count,
);
self.state_mut().hierarchy_dirty = false;
}
if self.state().instanced_compute_dirty
&& !self.state().instanced_transform_ranges.is_empty()
{
self.dispatch_instanced_transform_compute(
context.encoder,
context.queue,
context.configs,
);
self.state_mut().instanced_compute_dirty = false;
}
if self.state().camera_changed {
let mut compute_pass =
context
.encoder
.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("Cluster Bounds Pass"),
timestamp_writes: None,
});
compute_pass.set_pipeline(&self.cluster_bounds_pipeline);
compute_pass.set_bind_group(0, &self.gpu().cluster_bounds_bind_group, &[]);
let dispatch_x = CLUSTER_GRID_X.div_ceil(8);
let dispatch_y = CLUSTER_GRID_Y.div_ceil(8);
compute_pass.dispatch_workgroups(dispatch_x, dispatch_y, CLUSTER_GRID_Z);
}
let light_grid_size = (std::mem::size_of::<LightGrid>() * TOTAL_CLUSTERS as usize) as u64;
context.encoder.copy_buffer_to_buffer(
&self.gpu().light_grid_reset_buffer,
0,
&self.gpu().light_grid_buffer,
0,
light_grid_size,
);
if let Some(ref cluster_assign_bind_group) = self.gpu().cluster_assign_bind_group {
let mut compute_pass =
context
.encoder
.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("Cluster Light Assignment Pass"),
timestamp_writes: None,
});
compute_pass.set_pipeline(&self.cluster_assign_pipeline);
compute_pass.set_bind_group(0, cluster_assign_bind_group, &[]);
let dispatch_x = CLUSTER_GRID_X.div_ceil(8);
let dispatch_y = CLUSTER_GRID_Y.div_ceil(8);
compute_pass.dispatch_workgroups(dispatch_x, dispatch_y, CLUSTER_GRID_Z);
}
if self.state().object_count > 0
&& let Some(ref phase1_culling_bind_group) = self.gpu().phase1_culling_bind_group
{
if self.state().indirect_reset_count > 0 {
let copy_size = (self.state().indirect_reset_count
* std::mem::size_of::<DrawIndexedIndirect>())
as u64;
context.encoder.copy_buffer_to_buffer(
&self.gpu().phase1_indirect_reset_buffer,
0,
&self.gpu().phase1_indirect_buffer,
0,
copy_size,
);
}
{
let mut compute_pass =
context
.encoder
.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("Phase 1 Culling Pass (Frustum Only)"),
timestamp_writes: None,
});
compute_pass.set_pipeline(&self.culling_pipeline);
compute_pass.set_bind_group(0, phase1_culling_bind_group, &[]);
compute_pass.dispatch_workgroups(dispatch_size, 1, 1);
}
if !self.state().opaque_instances.is_empty()
|| !self.state().opaque_double_sided_instances.is_empty()
|| !self.state().transparent_instances.is_empty()
|| !self.state().instanced_opaque_batches.is_empty()
|| !self
.state()
.instanced_opaque_double_sided_batches
.is_empty()
{
let mut depth_prepass =
context
.encoder
.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Depth Prepass"),
color_attachments: &[],
depth_stencil_attachment: Some(
wgpu::RenderPassDepthStencilAttachment {
view: depth_view,
depth_ops: Some(wgpu::Operations {
load: depth_load,
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
},
),
timestamp_writes: None,
occlusion_query_set: None,
multiview_mask: None,
});
depth_prepass.set_pipeline(&self.depth_prepass_pipeline);
depth_prepass.set_bind_group(0, &self.uniform_bind_group, &[]);
depth_prepass.set_bind_group(1, &self.gpu().phase1_instance_bind_group, &[]);
depth_prepass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
depth_prepass
.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
let opaque_batch_count = self.state().opaque_instances.len();
for (batch_index, &(mesh_id, _material_id, _start, _end)) in
self.state().opaque_instances.iter().enumerate()
{
let mesh_data = &self.mesh_data[mesh_id as usize];
let indirect_offset =
(batch_index * std::mem::size_of::<DrawIndexedIndirect>()) as u64;
depth_prepass
.draw_indexed_indirect(&self.gpu().phase1_indirect_buffer, indirect_offset);
let _ = mesh_data;
}
for (batch_index, &(mesh_id, _material_id, _start, _end)) in self
.state()
.opaque_double_sided_instances
.iter()
.enumerate()
{
let mesh_data = &self.mesh_data[mesh_id as usize];
let indirect_offset = ((opaque_batch_count + batch_index)
* std::mem::size_of::<DrawIndexedIndirect>())
as u64;
depth_prepass
.draw_indexed_indirect(&self.gpu().phase1_indirect_buffer, indirect_offset);
let _ = mesh_data;
}
let prepass_instanced_offset = self.state().opaque_instances.len()
+ self.state().opaque_double_sided_instances.len()
+ self.state().transparent_instances.len()
+ self.state().overlay_opaque_instances.len()
+ self.state().overlay_opaque_double_sided_instances.len()
+ self.state().overlay_transparent_instances.len();
for (batch_index, &(_mesh_id, _material_id, _start, _end)) in
self.state().instanced_opaque_batches.iter().enumerate()
{
let indirect_offset = ((prepass_instanced_offset + batch_index)
* std::mem::size_of::<DrawIndexedIndirect>())
as u64;
depth_prepass
.draw_indexed_indirect(&self.gpu().phase1_indirect_buffer, indirect_offset);
}
let instanced_ds_offset =
prepass_instanced_offset + self.state().instanced_opaque_batches.len();
for (batch_index, &(_mesh_id, _material_id, _start, _end)) in self
.state()
.instanced_opaque_double_sided_batches
.iter()
.enumerate()
{
let indirect_offset = ((instanced_ds_offset + batch_index)
* std::mem::size_of::<DrawIndexedIndirect>())
as u64;
depth_prepass
.draw_indexed_indirect(&self.gpu().phase1_indirect_buffer, indirect_offset);
}
}
}
let has_depth_occluders = !self.state().opaque_instances.is_empty()
|| !self.state().opaque_double_sided_instances.is_empty()
|| !self.state().instanced_opaque_batches.is_empty()
|| !self
.state()
.instanced_opaque_double_sided_batches
.is_empty();
if has_depth_occluders && self.hiz_pass.hiz_view().is_some() && !self.preserve_hiz {
self.hiz_pass
.rebuild_bind_groups(context.device, depth_view);
self.hiz_pass.execute(context.encoder);
}
let world_state = self.state();
let brdf_lut_view = world_state
.ibl_brdf_lut_view
.as_ref()
.unwrap_or(&self.brdf_lut_view);
let irradiance_view = world_state
.ibl_irradiance_view
.as_ref()
.unwrap_or(&self.irradiance_view);
let prefiltered_view = world_state
.ibl_prefiltered_view
.as_ref()
.unwrap_or(&self.prefiltered_view);
let irradiance_b_view = world_state
.ibl_irradiance_b_view
.as_ref()
.unwrap_or(&self.irradiance_b_view);
let prefiltered_b_view = world_state
.ibl_prefiltered_b_view
.as_ref()
.unwrap_or(&self.prefiltered_b_view);
self.scene_bind_group = context
.device
.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Scene Bind Group (Per-World IBL)"),
layout: &self.scene_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(shadow_depth_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&self.shadow_sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(spotlight_shadow_atlas_view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::Sampler(&self.shadow_sampler),
},
wgpu::BindGroupEntry {
binding: 4,
resource: self.spotlight_shadow_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 5,
resource: wgpu::BindingResource::TextureView(brdf_lut_view),
},
wgpu::BindGroupEntry {
binding: 6,
resource: wgpu::BindingResource::Sampler(&self.ibl_sampler),
},
wgpu::BindGroupEntry {
binding: 7,
resource: wgpu::BindingResource::TextureView(irradiance_view),
},
wgpu::BindGroupEntry {
binding: 8,
resource: wgpu::BindingResource::Sampler(&self.ibl_sampler),
},
wgpu::BindGroupEntry {
binding: 9,
resource: wgpu::BindingResource::TextureView(prefiltered_view),
},
wgpu::BindGroupEntry {
binding: 10,
resource: wgpu::BindingResource::Sampler(&self.ibl_sampler),
},
wgpu::BindGroupEntry {
binding: 11,
resource: wgpu::BindingResource::TextureView(
&self.point_shadow_cubemap_view,
),
},
wgpu::BindGroupEntry {
binding: 12,
resource: wgpu::BindingResource::Sampler(&self.point_shadow_sampler),
},
wgpu::BindGroupEntry {
binding: 13,
resource: self.point_shadow_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 14,
resource: wgpu::BindingResource::TextureView(irradiance_b_view),
},
wgpu::BindGroupEntry {
binding: 15,
resource: wgpu::BindingResource::TextureView(prefiltered_b_view),
},
],
});
if has_depth_occluders
&& self.state().object_count > 0
&& let Some(ref culling_bind_group) = self.gpu().culling_bind_group
{
if self.state().indirect_reset_count > 0 {
let copy_size = (self.state().indirect_reset_count
* std::mem::size_of::<DrawIndexedIndirect>())
as u64;
context.encoder.copy_buffer_to_buffer(
&self.gpu().indirect_reset_buffer,
0,
&self.gpu().indirect_buffer,
0,
copy_size,
);
}
let mut compute_pass =
context
.encoder
.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("Phase 2 Culling Pass (With Occlusion)"),
timestamp_writes: None,
});
compute_pass.set_pipeline(&self.culling_pipeline);
compute_pass.set_bind_group(0, culling_bind_group, &[]);
compute_pass.dispatch_workgroups(dispatch_size, 1, 1);
}
let instanced_batch_offset = self.state().opaque_instances.len()
+ self.state().opaque_double_sided_instances.len()
+ self.state().transparent_instances.len()
+ self.state().overlay_opaque_instances.len()
+ self.state().overlay_opaque_double_sided_instances.len()
+ self.state().overlay_transparent_instances.len();
let draw_indirect_buffer = if has_depth_occluders {
&self.gpu().indirect_buffer
} else {
&self.gpu().phase1_indirect_buffer
};
let draw_instance_bind_group = if has_depth_occluders {
&self.gpu().instance_bind_group
} else {
&self.gpu().phase1_instance_bind_group
};
if !self.state().opaque_instances.is_empty()
|| !self.state().instanced_opaque_batches.is_empty()
{
let opaque_depth_load = if self.gpu().phase1_culling_bind_group.is_some() {
wgpu::LoadOp::Load
} else {
depth_load
};
let mut opaque_pass = context
.encoder
.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Opaque Mesh Pass"),
color_attachments: &[
Some(wgpu::RenderPassColorAttachment {
view: color_view,
resolve_target: None,
ops: wgpu::Operations {
load: color_load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
Some(wgpu::RenderPassColorAttachment {
view: entity_id_view,
resolve_target: None,
ops: wgpu::Operations {
load: entity_id_load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
Some(wgpu::RenderPassColorAttachment {
view: view_normals_view,
resolve_target: None,
ops: wgpu::Operations {
load: view_normals_load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: depth_view,
depth_ops: Some(wgpu::Operations {
load: opaque_depth_load,
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
multiview_mask: None,
});
opaque_pass.set_pipeline(&self.opaque_pipeline);
opaque_pass.set_bind_group(0, &self.uniform_bind_group, &[]);
opaque_pass.set_bind_group(1, draw_instance_bind_group, &[]);
opaque_pass.set_bind_group(3, &self.scene_bind_group, &[]);
opaque_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
opaque_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
MeshPass::draw_batches(
&mut opaque_pass,
&self.state().opaque_instances,
0,
&self.state().material_bind_groups,
draw_indirect_buffer,
);
if !self.state().instanced_opaque_batches.is_empty() {
MeshPass::draw_batches(
&mut opaque_pass,
&self.state().instanced_opaque_batches,
instanced_batch_offset,
&self.state().material_bind_groups,
draw_indirect_buffer,
);
}
drop(opaque_pass);
}
if !self.state().opaque_double_sided_instances.is_empty()
|| !self
.state()
.instanced_opaque_double_sided_batches
.is_empty()
{
let mut opaque_double_sided_pass =
context
.encoder
.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Opaque Double-Sided Mesh Pass"),
color_attachments: &[
Some(wgpu::RenderPassColorAttachment {
view: color_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
Some(wgpu::RenderPassColorAttachment {
view: entity_id_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
Some(wgpu::RenderPassColorAttachment {
view: view_normals_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
multiview_mask: None,
});
opaque_double_sided_pass.set_pipeline(&self.opaque_double_sided_pipeline);
opaque_double_sided_pass.set_bind_group(0, &self.uniform_bind_group, &[]);
opaque_double_sided_pass.set_bind_group(1, draw_instance_bind_group, &[]);
opaque_double_sided_pass.set_bind_group(3, &self.scene_bind_group, &[]);
opaque_double_sided_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
opaque_double_sided_pass
.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
MeshPass::draw_batches(
&mut opaque_double_sided_pass,
&self.state().opaque_double_sided_instances,
self.state().opaque_instances.len(),
&self.state().material_bind_groups,
draw_indirect_buffer,
);
if !self
.state()
.instanced_opaque_double_sided_batches
.is_empty()
{
MeshPass::draw_batches(
&mut opaque_double_sided_pass,
&self.state().instanced_opaque_double_sided_batches,
instanced_batch_offset + self.state().instanced_opaque_batches.len(),
&self.state().material_bind_groups,
draw_indirect_buffer,
);
}
drop(opaque_double_sided_pass);
}
if !self.state().transparent_instances.is_empty()
|| !self.state().instanced_transparent_batches.is_empty()
{
let mut oit_pass = context
.encoder
.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("OIT Mesh Pass"),
color_attachments: &[
Some(wgpu::RenderPassColorAttachment {
view: &self.oit_accum_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
Some(wgpu::RenderPassColorAttachment {
view: &self.oit_reveal_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::WHITE),
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
multiview_mask: None,
});
oit_pass.set_pipeline(&self.oit_pipeline);
oit_pass.set_bind_group(0, &self.uniform_bind_group, &[]);
oit_pass.set_bind_group(1, draw_instance_bind_group, &[]);
oit_pass.set_bind_group(3, &self.scene_bind_group, &[]);
oit_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
oit_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
MeshPass::draw_batches(
&mut oit_pass,
&self.state().transparent_instances,
self.state().opaque_instances.len()
+ self.state().opaque_double_sided_instances.len(),
&self.state().material_bind_groups,
draw_indirect_buffer,
);
if !self.state().instanced_transparent_batches.is_empty() {
MeshPass::draw_batches(
&mut oit_pass,
&self.state().instanced_transparent_batches,
instanced_batch_offset
+ self.state().instanced_opaque_batches.len()
+ self.state().instanced_opaque_double_sided_batches.len(),
&self.state().material_bind_groups,
draw_indirect_buffer,
);
}
drop(oit_pass);
let mut composite_pass =
context
.encoder
.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("OIT Composite Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: color_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: color_store,
},
depth_slice: None,
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
multiview_mask: None,
});
composite_pass.set_pipeline(&self.oit_composite_pipeline);
composite_pass.set_bind_group(0, &self.oit_composite_bind_group, &[]);
composite_pass.draw(0..3, 0..1);
drop(composite_pass);
}
if !self.state().overlay_opaque_instances.is_empty() {
let mut overlay_opaque_pass =
context
.encoder
.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Overlay Opaque Mesh Pass"),
color_attachments: &[
Some(wgpu::RenderPassColorAttachment {
view: color_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
Some(wgpu::RenderPassColorAttachment {
view: entity_id_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
Some(wgpu::RenderPassColorAttachment {
view: view_normals_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.overlay_depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(0.0),
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
multiview_mask: None,
});
overlay_opaque_pass.set_pipeline(&self.overlay_opaque_pipeline);
overlay_opaque_pass.set_bind_group(0, &self.overlay_uniform_bind_group, &[]);
overlay_opaque_pass.set_bind_group(1, draw_instance_bind_group, &[]);
overlay_opaque_pass.set_bind_group(3, &self.scene_bind_group, &[]);
overlay_opaque_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
overlay_opaque_pass
.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
MeshPass::draw_batches(
&mut overlay_opaque_pass,
&self.state().overlay_opaque_instances,
self.state().opaque_instances.len()
+ self.state().opaque_double_sided_instances.len()
+ self.state().transparent_instances.len(),
&self.state().material_bind_groups,
draw_indirect_buffer,
);
drop(overlay_opaque_pass);
}
if !self
.state()
.overlay_opaque_double_sided_instances
.is_empty()
{
let mut overlay_opaque_double_sided_pass =
context
.encoder
.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Overlay Opaque Double-Sided Mesh Pass"),
color_attachments: &[
Some(wgpu::RenderPassColorAttachment {
view: color_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
Some(wgpu::RenderPassColorAttachment {
view: entity_id_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
Some(wgpu::RenderPassColorAttachment {
view: view_normals_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.overlay_depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
multiview_mask: None,
});
overlay_opaque_double_sided_pass
.set_pipeline(&self.overlay_opaque_double_sided_pipeline);
overlay_opaque_double_sided_pass.set_bind_group(
0,
&self.overlay_uniform_bind_group,
&[],
);
overlay_opaque_double_sided_pass.set_bind_group(1, draw_instance_bind_group, &[]);
overlay_opaque_double_sided_pass.set_bind_group(3, &self.scene_bind_group, &[]);
overlay_opaque_double_sided_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
overlay_opaque_double_sided_pass
.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
MeshPass::draw_batches(
&mut overlay_opaque_double_sided_pass,
&self.state().overlay_opaque_double_sided_instances,
self.state().opaque_instances.len()
+ self.state().opaque_double_sided_instances.len()
+ self.state().transparent_instances.len()
+ self.state().overlay_opaque_instances.len(),
&self.state().material_bind_groups,
draw_indirect_buffer,
);
drop(overlay_opaque_double_sided_pass);
}
if !self.state().overlay_transparent_instances.is_empty() {
let mut overlay_oit_pass =
context
.encoder
.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Overlay OIT Mesh Pass"),
color_attachments: &[
Some(wgpu::RenderPassColorAttachment {
view: &self.oit_accum_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
Some(wgpu::RenderPassColorAttachment {
view: &self.oit_reveal_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::WHITE),
store: wgpu::StoreOp::Store,
},
depth_slice: None,
}),
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.overlay_depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
multiview_mask: None,
});
overlay_oit_pass.set_pipeline(&self.oit_pipeline);
overlay_oit_pass.set_bind_group(0, &self.overlay_uniform_bind_group, &[]);
overlay_oit_pass.set_bind_group(1, draw_instance_bind_group, &[]);
overlay_oit_pass.set_bind_group(3, &self.scene_bind_group, &[]);
overlay_oit_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
overlay_oit_pass
.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
MeshPass::draw_batches(
&mut overlay_oit_pass,
&self.state().overlay_transparent_instances,
self.state().opaque_instances.len()
+ self.state().opaque_double_sided_instances.len()
+ self.state().transparent_instances.len()
+ self.state().overlay_opaque_instances.len()
+ self.state().overlay_opaque_double_sided_instances.len(),
&self.state().material_bind_groups,
draw_indirect_buffer,
);
drop(overlay_oit_pass);
let mut overlay_composite_pass =
context
.encoder
.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Overlay OIT Composite Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: color_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: color_store,
},
depth_slice: None,
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
multiview_mask: None,
});
overlay_composite_pass.set_pipeline(&self.oit_composite_pipeline);
overlay_composite_pass.set_bind_group(0, &self.oit_composite_bind_group, &[]);
overlay_composite_pass.draw(0..3, 0..1);
drop(overlay_composite_pass);
}
Ok(context.into_sub_graph_commands())
}
}