use crate::ecs::world::World;
use crate::render::wgpu::rendergraph::{PassExecutionContext, PassNode};
use wgpu::{BindGroup, BindGroupLayout, ComputePipeline, RenderPipeline};
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
const INITIAL_LINE_CAPACITY: u32 = 1024;
const INITIAL_BV_CAPACITY: u32 = 1024;
const INITIAL_NORMAL_CAPACITY: u32 = 1024;
const BUFFER_GROWTH_FACTOR: f32 = 2.0;
pub const MAX_LINES: u32 = 1_000_000;
pub const MAX_BOUNDING_VOLUMES: u32 = 100_000;
pub const MAX_NORMALS: u32 = 1_000_000;
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct DrawIndexedIndirectCommand {
pub index_count: u32,
pub instance_count: u32,
pub first_index: u32,
pub vertex_offset: i32,
pub first_instance: u32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct GpuBoundingVolumeData {
pub center: [f32; 3],
pub _pad0: f32,
pub half_extents: [f32; 3],
pub _pad1: f32,
pub orientation: [f32; 4],
pub transform: [[f32; 4]; 4],
pub color: [f32; 4],
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct BoundingVolumeParams {
pub bounding_volume_count: u32,
pub line_offset: u32,
pub _padding: [u32; 2],
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct GpuNormalData {
pub position: [f32; 3],
pub _pad0: f32,
pub normal: [f32; 3],
pub _pad1: f32,
pub transform: [[f32; 4]; 4],
pub color: [f32; 4],
pub length: f32,
pub _pad2: [f32; 3],
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct NormalParams {
pub normal_count: u32,
pub line_offset: u32,
pub _padding: [u32; 2],
}
#[repr(C, align(16))]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct GpuLineData {
pub start: [f32; 4],
pub end: [f32; 4],
pub color: [f32; 4],
pub entity_id: u32,
pub visible: u32,
pub _padding: [u32; 2],
}
#[repr(C, align(16))]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct CullingData {
pub view_proj: [[f32; 4]; 4],
pub frustum_planes: [[f32; 4]; 6],
pub line_count: u32,
pub _padding: [u32; 15],
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct LineVertex {
pub position: [f32; 3],
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct LineUniform {
pub view_proj: [[f32; 4]; 4],
}
pub struct LinesPass {
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
line_buffer: wgpu::Buffer,
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
storage_bind_group_layout: BindGroupLayout,
storage_bind_group: BindGroup,
pipeline: RenderPipeline,
draw_commands_buffer: wgpu::Buffer,
draw_count_buffer: wgpu::Buffer,
culling_bind_group_layout: BindGroupLayout,
culling_bind_group: BindGroup,
culling_pipeline: ComputePipeline,
culling_data_buffer: wgpu::Buffer,
line_count: u32,
current_capacity: u32,
bv_buffer: wgpu::Buffer,
bv_params_buffer: wgpu::Buffer,
bv_bind_group_layout: BindGroupLayout,
bv_bind_group: BindGroup,
bv_pipeline: ComputePipeline,
bv_count: u32,
bv_capacity: u32,
bv_line_count: u32,
normal_buffer: wgpu::Buffer,
normal_params_buffer: wgpu::Buffer,
normal_bind_group_layout: BindGroupLayout,
normal_bind_group: BindGroup,
normal_pipeline: ComputePipeline,
normal_count: u32,
normal_capacity: u32,
normal_line_count: u32,
}
impl LinesPass {
pub fn new(device: &wgpu::Device, format: wgpu::TextureFormat) -> Self {
let vertices = [
LineVertex {
position: [0.0, 0.0, 0.0],
},
LineVertex {
position: [1.0, 0.0, 0.0],
},
];
let vertex_buffer = wgpu::util::DeviceExt::create_buffer_init(
device,
&wgpu::util::BufferInitDescriptor {
label: Some("Line Vertex Buffer"),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsages::VERTEX,
},
);
let indices = [0u32, 1u32];
let index_buffer = wgpu::util::DeviceExt::create_buffer_init(
device,
&wgpu::util::BufferInitDescriptor {
label: Some("Line Index Buffer"),
contents: bytemuck::cast_slice(&indices),
usage: wgpu::BufferUsages::INDEX,
},
);
let line_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Line Buffer"),
size: (std::mem::size_of::<GpuLineData>() * INITIAL_LINE_CAPACITY as usize) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Line Uniform Buffer"),
size: std::mem::size_of::<LineUniform>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("Line Bind Group Layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}],
label: Some("Line Bind Group"),
});
let storage_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("Line Storage Bind Group Layout"),
});
let storage_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &storage_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: line_buffer.as_entire_binding(),
}],
label: Some("Line Storage Bind Group"),
});
let shader =
device.create_shader_module(wgpu::include_wgsl!("../../shaders/line_gpu.wgsl"));
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Line Pipeline Layout"),
bind_group_layouts: &[Some(&bind_group_layout), Some(&storage_bind_group_layout)],
immediate_size: 0,
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Line Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: &[wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<LineVertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![0 => Float32x3],
}],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::LineList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None,
unclipped_depth: false,
polygon_mode: wgpu::PolygonMode::Fill,
conservative: false,
},
depth_stencil: Some(wgpu::DepthStencilState {
format: DEPTH_FORMAT,
depth_write_enabled: Some(true),
depth_compare: Some(wgpu::CompareFunction::GreaterEqual),
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState::default(),
multiview_mask: None,
cache: None,
});
let draw_commands_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Line Draw Commands Buffer"),
size: (std::mem::size_of::<DrawIndexedIndirectCommand>() * MAX_LINES as usize) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::INDIRECT
| wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let draw_count_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Line Draw Count Buffer"),
size: std::mem::size_of::<u32>() as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::INDIRECT
| wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let culling_data_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Line Culling Data Buffer"),
size: std::mem::size_of::<CullingData>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let culling_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Line Culling Shader"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(include_str!(
"../../shaders/line_culling_gpu.wgsl"
))),
});
let culling_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Line Culling Bind Group Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
let culling_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Line Culling Bind Group"),
layout: &culling_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: line_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: draw_commands_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: draw_count_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: culling_data_buffer.as_entire_binding(),
},
],
});
let culling_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Line Culling Pipeline Layout"),
bind_group_layouts: &[Some(&culling_bind_group_layout)],
immediate_size: 0,
});
let culling_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("Line Culling Pipeline"),
layout: Some(&culling_pipeline_layout),
module: &culling_shader,
entry_point: Some("main"),
compilation_options: Default::default(),
cache: None,
});
let bv_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Bounding Volume Buffer"),
size: (std::mem::size_of::<GpuBoundingVolumeData>() * INITIAL_BV_CAPACITY as usize)
as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let bv_params_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Bounding Volume Params Buffer"),
size: std::mem::size_of::<BoundingVolumeParams>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let bv_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Bounding Volume Bind Group Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
let bv_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Bounding Volume Bind Group"),
layout: &bv_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: bv_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: line_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: bv_params_buffer.as_entire_binding(),
},
],
});
let bv_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Bounding Volume Lines Shader"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(include_str!(
"../../shaders/bounding_volume_lines.wgsl"
))),
});
let bv_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Bounding Volume Pipeline Layout"),
bind_group_layouts: &[Some(&bv_bind_group_layout)],
immediate_size: 0,
});
let bv_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("Bounding Volume Lines Pipeline"),
layout: Some(&bv_pipeline_layout),
module: &bv_shader,
entry_point: Some("main"),
compilation_options: Default::default(),
cache: None,
});
let normal_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Normal Buffer"),
size: (std::mem::size_of::<GpuNormalData>() * INITIAL_NORMAL_CAPACITY as usize) as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let normal_params_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Normal Params Buffer"),
size: std::mem::size_of::<NormalParams>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let normal_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Normal Bind Group Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
let normal_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Normal Bind Group"),
layout: &normal_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: normal_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: line_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: normal_params_buffer.as_entire_binding(),
},
],
});
let normal_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Normal Lines Shader"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(include_str!(
"../../shaders/normal_lines.wgsl"
))),
});
let normal_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Normal Pipeline Layout"),
bind_group_layouts: &[Some(&normal_bind_group_layout)],
immediate_size: 0,
});
let normal_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("Normal Lines Pipeline"),
layout: Some(&normal_pipeline_layout),
module: &normal_shader,
entry_point: Some("main"),
compilation_options: Default::default(),
cache: None,
});
Self {
vertex_buffer,
index_buffer,
line_buffer,
uniform_buffer,
bind_group,
storage_bind_group_layout,
storage_bind_group,
pipeline,
draw_commands_buffer,
draw_count_buffer,
culling_bind_group_layout,
culling_bind_group,
culling_pipeline,
culling_data_buffer,
line_count: 0,
current_capacity: INITIAL_LINE_CAPACITY,
bv_buffer,
bv_params_buffer,
bv_bind_group_layout,
bv_bind_group,
bv_pipeline,
bv_count: 0,
bv_capacity: INITIAL_BV_CAPACITY,
bv_line_count: 0,
normal_buffer,
normal_params_buffer,
normal_bind_group_layout,
normal_bind_group,
normal_pipeline,
normal_count: 0,
normal_capacity: INITIAL_NORMAL_CAPACITY,
normal_line_count: 0,
}
}
pub fn ensure_capacity(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
required_lines: u32,
) {
if required_lines <= self.current_capacity {
return;
}
let new_capacity = (required_lines as f32 * BUFFER_GROWTH_FACTOR).ceil() as u32;
let new_capacity = new_capacity.min(MAX_LINES);
tracing::info!(
"Growing Line buffers from {} to {} lines ({} -> {} bytes)",
self.current_capacity,
new_capacity,
self.current_capacity as u64 * std::mem::size_of::<GpuLineData>() as u64,
new_capacity as u64 * std::mem::size_of::<GpuLineData>() as u64
);
let new_line_buffer_size = std::mem::size_of::<GpuLineData>() * new_capacity as usize;
let new_line_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Line Buffer (Resized)"),
size: new_line_buffer_size as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let old_line_data_size = std::mem::size_of::<GpuLineData>() * self.line_count as usize;
if old_line_data_size > 0 {
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Line Buffer Resize Copy"),
});
encoder.copy_buffer_to_buffer(
&self.line_buffer,
0,
&new_line_buffer,
0,
old_line_data_size as u64,
);
queue.submit(std::iter::once(encoder.finish()));
}
self.line_buffer = new_line_buffer;
self.storage_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &self.storage_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: self.line_buffer.as_entire_binding(),
}],
label: Some("Line Storage Bind Group"),
});
self.culling_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Line Culling Bind Group"),
layout: &self.culling_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: self.line_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: self.draw_commands_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: self.draw_count_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: self.culling_data_buffer.as_entire_binding(),
},
],
});
self.bv_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Bounding Volume Bind Group"),
layout: &self.bv_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: self.bv_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: self.line_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: self.bv_params_buffer.as_entire_binding(),
},
],
});
self.normal_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Normal Bind Group"),
layout: &self.normal_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: self.normal_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: self.line_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: self.normal_params_buffer.as_entire_binding(),
},
],
});
self.current_capacity = new_capacity;
}
pub fn ensure_bv_capacity(&mut self, device: &wgpu::Device, required_bvs: u32) {
if required_bvs <= self.bv_capacity {
return;
}
let new_capacity = (required_bvs as f32 * BUFFER_GROWTH_FACTOR).ceil() as u32;
let new_capacity = new_capacity.min(MAX_BOUNDING_VOLUMES);
self.bv_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Bounding Volume Buffer (Resized)"),
size: (std::mem::size_of::<GpuBoundingVolumeData>() * new_capacity as usize) as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
self.bv_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Bounding Volume Bind Group"),
layout: &self.bv_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: self.bv_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: self.line_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: self.bv_params_buffer.as_entire_binding(),
},
],
});
self.bv_capacity = new_capacity;
}
pub fn update_bounding_volumes(
&mut self,
queue: &wgpu::Queue,
bv_data: &[GpuBoundingVolumeData],
) {
self.bv_count = bv_data.len() as u32;
self.bv_line_count = self.bv_count * 12;
if !bv_data.is_empty() {
queue.write_buffer(&self.bv_buffer, 0, bytemuck::cast_slice(bv_data));
let params = BoundingVolumeParams {
bounding_volume_count: self.bv_count,
line_offset: self.line_count,
_padding: [0, 0],
};
queue.write_buffer(&self.bv_params_buffer, 0, bytemuck::cast_slice(&[params]));
}
}
pub fn generate_bv_lines(&self, encoder: &mut wgpu::CommandEncoder) {
if self.bv_count == 0 {
return;
}
let mut compute_pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("Bounding Volume Lines Pass"),
timestamp_writes: None,
});
compute_pass.set_pipeline(&self.bv_pipeline);
compute_pass.set_bind_group(0, &self.bv_bind_group, &[]);
let workgroup_size = 64;
let dispatch_size = self.bv_count.div_ceil(workgroup_size);
compute_pass.dispatch_workgroups(dispatch_size, 1, 1);
}
pub fn ensure_normal_capacity(&mut self, device: &wgpu::Device, required_normals: u32) {
if required_normals <= self.normal_capacity {
return;
}
let new_capacity = (required_normals as f32 * BUFFER_GROWTH_FACTOR).ceil() as u32;
let new_capacity = new_capacity.min(MAX_NORMALS);
self.normal_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Normal Buffer (Resized)"),
size: (std::mem::size_of::<GpuNormalData>() * new_capacity as usize) as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
self.normal_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Normal Bind Group"),
layout: &self.normal_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: self.normal_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: self.line_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: self.normal_params_buffer.as_entire_binding(),
},
],
});
self.normal_capacity = new_capacity;
}
pub fn update_normals(&mut self, queue: &wgpu::Queue, normal_data: &[GpuNormalData]) {
self.normal_count = normal_data.len() as u32;
self.normal_line_count = self.normal_count;
if !normal_data.is_empty() {
queue.write_buffer(&self.normal_buffer, 0, bytemuck::cast_slice(normal_data));
let params = NormalParams {
normal_count: self.normal_count,
line_offset: self.line_count + self.bv_line_count,
_padding: [0, 0],
};
queue.write_buffer(
&self.normal_params_buffer,
0,
bytemuck::cast_slice(&[params]),
);
}
}
pub fn generate_normal_lines(&self, encoder: &mut wgpu::CommandEncoder) {
if self.normal_count == 0 {
return;
}
let mut compute_pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("Normal Lines Pass"),
timestamp_writes: None,
});
compute_pass.set_pipeline(&self.normal_pipeline);
compute_pass.set_bind_group(0, &self.normal_bind_group, &[]);
let workgroup_size = 256;
let dispatch_size = self.normal_count.div_ceil(workgroup_size);
compute_pass.dispatch_workgroups(dispatch_size, 1, 1);
}
pub fn total_line_count(&self) -> u32 {
self.line_count + self.bv_line_count + self.normal_line_count
}
pub fn update_uniforms(&self, queue: &wgpu::Queue, view_proj: crate::ecs::world::Mat4) {
let uniform = LineUniform {
view_proj: view_proj.into(),
};
queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[uniform]));
}
pub fn update_culling_data(
&self,
queue: &wgpu::Queue,
view_proj: crate::ecs::world::Mat4,
frustum_planes: [crate::ecs::world::Vec4; 6],
) {
let culling_data = CullingData {
view_proj: view_proj.into(),
frustum_planes: frustum_planes.map(|v| [v.x, v.y, v.z, v.w]),
line_count: self.total_line_count(),
_padding: [0; 15],
};
queue.write_buffer(
&self.culling_data_buffer,
0,
bytemuck::cast_slice(&[culling_data]),
);
}
pub fn update_line_data(&mut self, queue: &wgpu::Queue, line_data: &[GpuLineData]) {
if !line_data.is_empty() {
let clamped_count = (line_data.len() as u32).min(self.current_capacity);
self.line_count = clamped_count;
queue.write_buffer(
&self.line_buffer,
0,
bytemuck::cast_slice(&line_data[..clamped_count as usize]),
);
} else {
self.line_count = 0;
}
}
pub fn cull_lines(&self, encoder: &mut wgpu::CommandEncoder) {
let total = self.total_line_count();
if total > 0 {
encoder.clear_buffer(&self.draw_count_buffer, 0, None);
let commands_size =
(total as u64) * std::mem::size_of::<DrawIndexedIndirectCommand>() as u64;
encoder.clear_buffer(&self.draw_commands_buffer, 0, Some(commands_size));
let mut compute_pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("Line Culling Pass"),
timestamp_writes: None,
});
compute_pass.set_pipeline(&self.culling_pipeline);
compute_pass.set_bind_group(0, &self.culling_bind_group, &[]);
let workgroup_size = 256;
let dispatch_size = total.div_ceil(workgroup_size);
let max_dispatch = 65535;
let final_dispatch = dispatch_size.min(max_dispatch);
compute_pass.dispatch_workgroups(final_dispatch, 1, 1);
}
}
}
impl PassNode<World> for LinesPass {
fn name(&self) -> &str {
"lines_pass"
}
fn reads(&self) -> Vec<&str> {
vec![]
}
fn writes(&self) -> Vec<&str> {
vec![]
}
fn reads_writes(&self) -> Vec<&str> {
vec!["color", "depth"]
}
fn prepare(&mut self, _device: &wgpu::Device, queue: &wgpu::Queue, world: &World) {
if let Some(camera_matrices) =
crate::ecs::camera::queries::query_active_camera_matrices(world)
{
let view_proj = camera_matrices.projection * camera_matrices.view;
self.update_uniforms(queue, view_proj);
let frustum_planes = extract_frustum_planes(&view_proj);
self.update_culling_data(queue, view_proj, frustum_planes);
}
}
fn execute<'r, 'e>(
&mut self,
context: PassExecutionContext<'r, 'e, World>,
) -> crate::render::wgpu::rendergraph::Result<
Vec<crate::render::wgpu::rendergraph::SubGraphRunCommand<'r>>,
> {
let total_lines = self.total_line_count();
if total_lines == 0 {
return Ok(context.into_sub_graph_commands());
}
self.generate_bv_lines(context.encoder);
self.generate_normal_lines(context.encoder);
let (color_view, color_load, color_store) = context.get_color_attachment("color")?;
let (depth_view, depth_load, depth_store) = context.get_depth_attachment("depth")?;
let culling_enabled = context.configs.resources.graphics.gpu_culling_enabled;
if culling_enabled {
self.cull_lines(context.encoder);
}
let mut render_pass = context
.encoder
.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Lines Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: color_view,
resolve_target: None,
ops: wgpu::Operations {
load: color_load,
store: color_store,
},
depth_slice: None,
})],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: depth_view,
depth_ops: Some(wgpu::Operations {
load: depth_load,
store: depth_store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
multiview_mask: None,
});
render_pass.set_pipeline(&self.pipeline);
render_pass.set_bind_group(0, &self.bind_group, &[]);
render_pass.set_bind_group(1, &self.storage_bind_group, &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
if culling_enabled {
if cfg!(target_os = "macos")
|| cfg!(target_arch = "wasm32")
|| cfg!(feature = "openxr")
|| cfg!(target_os = "android")
{
render_pass.multi_draw_indexed_indirect(&self.draw_commands_buffer, 0, total_lines);
} else {
render_pass.multi_draw_indexed_indirect_count(
&self.draw_commands_buffer,
0,
&self.draw_count_buffer,
0,
MAX_LINES,
);
}
} else {
render_pass.draw_indexed(0..2, 0, 0..total_lines);
}
drop(render_pass);
Ok(context.into_sub_graph_commands())
}
}
fn extract_frustum_planes(view_proj: &crate::ecs::world::Mat4) -> [crate::ecs::world::Vec4; 6] {
let mut planes = [crate::ecs::world::Vec4::zeros(); 6];
let row0 = crate::ecs::world::Vec4::new(
view_proj[(0, 0)],
view_proj[(0, 1)],
view_proj[(0, 2)],
view_proj[(0, 3)],
);
let row1 = crate::ecs::world::Vec4::new(
view_proj[(1, 0)],
view_proj[(1, 1)],
view_proj[(1, 2)],
view_proj[(1, 3)],
);
let row2 = crate::ecs::world::Vec4::new(
view_proj[(2, 0)],
view_proj[(2, 1)],
view_proj[(2, 2)],
view_proj[(2, 3)],
);
let row3 = crate::ecs::world::Vec4::new(
view_proj[(3, 0)],
view_proj[(3, 1)],
view_proj[(3, 2)],
view_proj[(3, 3)],
);
planes[0] = row3 + row0;
planes[1] = row3 - row0;
planes[2] = row3 + row1;
planes[3] = row3 - row1;
planes[4] = row3 + row2;
planes[5] = row3 - row2;
for plane in &mut planes {
let normal_length = (plane.x * plane.x + plane.y * plane.y + plane.z * plane.z).sqrt();
if normal_length > 1e-6 {
*plane /= normal_length;
}
}
planes
}
pub fn sync_lines_data(
lines_pass: &mut LinesPass,
device: &wgpu::Device,
queue: &wgpu::Queue,
world: &World,
) {
let line_entities = world.core.query_entities(
crate::ecs::world::LINES
| crate::ecs::world::GLOBAL_TRANSFORM
| crate::ecs::world::VISIBILITY,
);
let mut line_data = Vec::new();
for entity in line_entities {
if let Some(visibility) = world.core.get_visibility(entity)
&& visibility.visible
&& let Some(lines_component) = world.core.get_lines(entity)
&& let Some(global_transform) = world.core.get_global_transform(entity)
{
let transform = global_transform.0;
for line in &lines_component.lines {
let start_world = transform
* crate::ecs::world::Vec4::new(line.start.x, line.start.y, line.start.z, 1.0);
let end_world = transform
* crate::ecs::world::Vec4::new(line.end.x, line.end.y, line.end.z, 1.0);
line_data.push(GpuLineData {
start: [start_world.x, start_world.y, start_world.z, start_world.w],
end: [end_world.x, end_world.y, end_world.z, end_world.w],
color: [line.color.x, line.color.y, line.color.z, line.color.w],
entity_id: entity.id,
visible: 1,
_padding: [0, 0],
});
}
}
}
let line_count = line_data.len() as u32;
let total_needed = line_count + lines_pass.bv_line_count;
if total_needed > lines_pass.current_capacity {
lines_pass.ensure_capacity(device, queue, total_needed);
}
lines_pass.update_line_data(queue, &line_data);
}
pub fn sync_bounding_volume_data(
lines_pass: &mut LinesPass,
device: &wgpu::Device,
queue: &wgpu::Queue,
world: &World,
show_bounding_volumes: bool,
show_selected_bounding_volume: bool,
selected_entity: Option<freecs::Entity>,
) {
if !show_bounding_volumes && !show_selected_bounding_volume {
lines_pass.bv_count = 0;
lines_pass.bv_line_count = 0;
return;
}
let camera_entity = world.resources.active_camera;
let mut bv_data = Vec::new();
for entity in world
.core
.query_entities(crate::ecs::world::BOUNDING_VOLUME)
{
if Some(entity) == camera_entity {
continue;
}
if let Some(render_layer) = world.core.get_render_layer(entity)
&& render_layer.0 == crate::ecs::render_layer::components::RenderLayer::OVERLAY
{
continue;
}
let is_selected = selected_entity == Some(entity);
if !show_bounding_volumes && !is_selected {
continue;
}
let Some(bounding_volume) = world.core.get_bounding_volume(entity) else {
continue;
};
let Some(global_transform) = world.core.get_global_transform(entity) else {
continue;
};
let color = if is_selected {
[1.0, 0.45, 0.0, 1.0]
} else {
[0.0, 1.0, 1.0, 1.0]
};
let obb = &bounding_volume.obb;
let m = &global_transform.0;
bv_data.push(GpuBoundingVolumeData {
center: [obb.center.x, obb.center.y, obb.center.z],
_pad0: 0.0,
half_extents: [obb.half_extents.x, obb.half_extents.y, obb.half_extents.z],
_pad1: 0.0,
orientation: [
obb.orientation.i,
obb.orientation.j,
obb.orientation.k,
obb.orientation.w,
],
transform: [
[m[(0, 0)], m[(1, 0)], m[(2, 0)], m[(3, 0)]],
[m[(0, 1)], m[(1, 1)], m[(2, 1)], m[(3, 1)]],
[m[(0, 2)], m[(1, 2)], m[(2, 2)], m[(3, 2)]],
[m[(0, 3)], m[(1, 3)], m[(2, 3)], m[(3, 3)]],
],
color,
});
}
let bv_count = bv_data.len() as u32;
if bv_count > lines_pass.bv_capacity {
lines_pass.ensure_bv_capacity(device, bv_count);
}
let total_lines_needed = lines_pass.line_count + bv_count * 12;
if total_lines_needed > lines_pass.current_capacity {
lines_pass.ensure_capacity(device, queue, total_lines_needed);
}
lines_pass.update_bounding_volumes(queue, &bv_data);
}
fn sphere_in_frustum(
center: &crate::ecs::world::Vec3,
radius: f32,
frustum_planes: &[crate::ecs::world::Vec4; 6],
) -> bool {
for plane in frustum_planes {
let distance = plane.x * center.x + plane.y * center.y + plane.z * center.z + plane.w;
if distance < -radius {
return false;
}
}
true
}
pub fn sync_normal_data(
lines_pass: &mut LinesPass,
device: &wgpu::Device,
queue: &wgpu::Queue,
world: &World,
show_normals: bool,
normal_line_length: f32,
normal_line_color: [f32; 4],
) {
if !show_normals {
lines_pass.normal_count = 0;
lines_pass.normal_line_count = 0;
return;
}
let camera_matrices = match crate::ecs::camera::queries::query_active_camera_matrices(world) {
Some(m) => m,
None => {
lines_pass.normal_count = 0;
lines_pass.normal_line_count = 0;
return;
}
};
let view_proj = camera_matrices.projection * camera_matrices.view;
let frustum_planes = extract_frustum_planes(&view_proj);
let camera_entity = world.resources.active_camera;
let mut normal_data = Vec::new();
for entity in world
.core
.query_entities(crate::ecs::world::RENDER_MESH | crate::ecs::world::GLOBAL_TRANSFORM)
{
if Some(entity) == camera_entity {
continue;
}
if let Some(render_layer) = world.core.get_render_layer(entity)
&& render_layer.0 == crate::ecs::render_layer::components::RenderLayer::OVERLAY
{
continue;
}
let Some(render_mesh) = world.core.get_render_mesh(entity) else {
continue;
};
let Some(global_transform) = world.core.get_global_transform(entity) else {
continue;
};
if let Some(bounding_volume) = world.core.get_bounding_volume(entity) {
let world_center = global_transform.0
* crate::ecs::world::Vec4::new(
bounding_volume.obb.center.x,
bounding_volume.obb.center.y,
bounding_volume.obb.center.z,
1.0,
);
let world_center =
crate::ecs::world::Vec3::new(world_center.x, world_center.y, world_center.z);
let scale = nalgebra_glm::length(&crate::ecs::world::Vec3::new(
global_transform.0[(0, 0)],
global_transform.0[(1, 0)],
global_transform.0[(2, 0)],
));
let world_radius = bounding_volume.sphere_radius * scale;
if !sphere_in_frustum(&world_center, world_radius, &frustum_planes) {
continue;
}
}
let mesh = {
let mut found_mesh = None;
for (name, mesh) in
crate::ecs::prefab::resources::mesh_cache_iter(&world.resources.mesh_cache)
{
if *name == render_mesh.name {
found_mesh = Some(mesh);
break;
}
}
match found_mesh {
Some(m) => m,
None => continue,
}
};
let m = &global_transform.0;
let transform = [
[m[(0, 0)], m[(1, 0)], m[(2, 0)], m[(3, 0)]],
[m[(0, 1)], m[(1, 1)], m[(2, 1)], m[(3, 1)]],
[m[(0, 2)], m[(1, 2)], m[(2, 2)], m[(3, 2)]],
[m[(0, 3)], m[(1, 3)], m[(2, 3)], m[(3, 3)]],
];
for vertex in &mesh.vertices {
normal_data.push(GpuNormalData {
position: vertex.position,
_pad0: 0.0,
normal: vertex.normal,
_pad1: 0.0,
transform,
color: normal_line_color,
length: normal_line_length,
_pad2: [0.0, 0.0, 0.0],
});
}
}
let normal_count = normal_data.len() as u32;
if normal_count > lines_pass.normal_capacity {
lines_pass.ensure_normal_capacity(device, normal_count);
}
let total_lines_needed = lines_pass.line_count + lines_pass.bv_line_count + normal_count;
if total_lines_needed > lines_pass.current_capacity {
lines_pass.ensure_capacity(device, queue, total_lines_needed);
}
lines_pass.update_normals(queue, &normal_data);
}