nightshade 0.13.1

A cross-platform data-oriented game engine.
Documentation
use crate::ecs::world::World;
use crate::render::wgpu::rendergraph::PassExecutionContext;

use super::{DrawIndexedIndirect, WaterMeshPass};

impl WaterMeshPass {
    pub(super) fn execute_pass<'r, 'e>(
        &mut self,
        context: PassExecutionContext<'r, 'e, World>,
    ) -> crate::render::wgpu::rendergraph::Result<
        Vec<crate::render::wgpu::rendergraph::SubGraphRunCommand<'r>>,
    > {
        if self.batch_count == 0 || self.object_count == 0 {
            return Ok(context.into_sub_graph_commands());
        }

        let (color_view, color_load, color_store) = context.get_color_attachment("color")?;
        let (depth_view, depth_load, depth_store) = context.get_depth_attachment("depth")?;

        if let Some(ref culling_bind_group) = self.culling_bind_group {
            if self.indirect_reset_count > 0 {
                let copy_size =
                    (self.indirect_reset_count * std::mem::size_of::<DrawIndexedIndirect>()) as u64;
                context.encoder.copy_buffer_to_buffer(
                    &self.indirect_reset_buffer,
                    0,
                    &self.indirect_buffer,
                    0,
                    copy_size,
                );
            }

            {
                let mut compute_pass =
                    context
                        .encoder
                        .begin_compute_pass(&wgpu::ComputePassDescriptor {
                            label: Some("Water Mesh Culling Pass"),
                            timestamp_writes: None,
                        });

                compute_pass.set_pipeline(&self.culling_pipeline);
                compute_pass.set_bind_group(0, culling_bind_group, &[]);

                let workgroup_size = 64;
                let dispatch_size = self.object_count.div_ceil(workgroup_size);
                compute_pass.dispatch_workgroups(dispatch_size, 1, 1);
            }
        }

        {
            let mut render_pass = context
                .encoder
                .begin_render_pass(&wgpu::RenderPassDescriptor {
                    label: Some("Water Mesh Render Pass"),
                    color_attachments: &[Some(wgpu::RenderPassColorAttachment {
                        view: color_view,
                        resolve_target: None,
                        ops: wgpu::Operations {
                            load: color_load,
                            store: color_store,
                        },
                        depth_slice: None,
                    })],
                    depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
                        view: depth_view,
                        depth_ops: Some(wgpu::Operations {
                            load: depth_load,
                            store: depth_store,
                        }),
                        stencil_ops: None,
                    }),
                    timestamp_writes: None,
                    occlusion_query_set: None,
                    multiview_mask: None,
                });

            render_pass.set_bind_group(0, &self.render_bind_group, &[]);
            render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
            render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint32);

            if !self.horizontal_batches.is_empty() {
                render_pass.set_pipeline(&self.horizontal_pipeline);
                for &batch_index in &self.horizontal_batches {
                    let indirect_offset =
                        (batch_index * std::mem::size_of::<DrawIndexedIndirect>()) as u64;
                    render_pass.draw_indexed_indirect(&self.indirect_buffer, indirect_offset);
                }
            }

            if !self.vertical_batches.is_empty() {
                render_pass.set_pipeline(&self.vertical_pipeline);
                for &batch_index in &self.vertical_batches {
                    let indirect_offset =
                        (batch_index * std::mem::size_of::<DrawIndexedIndirect>()) as u64;
                    render_pass.draw_indexed_indirect(&self.indirect_buffer, indirect_offset);
                }
            }

            if !self.volumetric_batches.is_empty() {
                render_pass.set_pipeline(&self.volumetric_pipeline);
                for &(batch_index, _) in &self.volumetric_batches {
                    let indirect_offset =
                        (batch_index * std::mem::size_of::<DrawIndexedIndirect>()) as u64;
                    render_pass.draw_indexed_indirect(&self.indirect_buffer, indirect_offset);
                }
            }
        }

        Ok(context.into_sub_graph_commands())
    }
}