use crate::ecs::generational_registry::registry_entry_by_name;
use crate::ecs::mesh::components::{
Mesh, Vertex, create_cone_mesh, create_cube_mesh, create_cylinder_mesh, create_plane_mesh,
create_sphere_mesh, create_subdivided_plane_mesh, create_torus_mesh,
};
use crate::ecs::prefab::resources::mesh_cache_iter;
use crate::render::wgpu::passes::geometry::HizPass;
use std::collections::{HashMap, HashSet};
use wgpu::util::DeviceExt;
use super::world_state::{BatchRange, WorldGpuBuffers, WorldRenderState};
use super::types::{
BUFFER_GROWTH_FACTOR, BUFFER_SHRINK_THRESHOLD, CLUSTER_GRID_X, CLUSTER_GRID_Y, CLUSTER_GRID_Z,
ClusterBounds, ClusterUniforms, CullingUniforms, DrawIndexedIndirect,
INITIAL_INDEX_BUFFER_SIZE, INITIAL_VERTEX_BUFFER_SIZE, InstancedTransformComputeUniforms,
MAX_HIERARCHY_DEPTH, MAX_INSTANCES, MAX_LIGHTS, MAX_POINT_LIGHT_SHADOWS, MAX_SPOTLIGHT_SHADOWS,
MaterialTextures, MeshBoundsAABB, MeshBoundsData, MeshData, MeshLodInfo, MeshRenderStateInner,
MeshUniforms, ModelMatrix, MorphDisplacement, ObjectData, PointLightShadowData, TOTAL_CLUSTERS,
TransformComputeUniforms, compute_normal_matrix,
};
pub(super) struct MaterialBindGroupContext<'a> {
pub registered_textures: &'a HashMap<String, (wgpu::TextureView, wgpu::Sampler)>,
pub dummy_white_view: &'a wgpu::TextureView,
pub dummy_black_view: &'a wgpu::TextureView,
pub dummy_normal_view: &'a wgpu::TextureView,
pub dummy_sampler: &'a wgpu::Sampler,
pub texture_bind_group_layout: &'a wgpu::BindGroupLayout,
}
pub struct MeshPass {
pub(super) opaque_pipeline: wgpu::RenderPipeline,
pub(super) opaque_double_sided_pipeline: wgpu::RenderPipeline,
pub(super) overlay_opaque_pipeline: wgpu::RenderPipeline,
pub(super) overlay_opaque_double_sided_pipeline: wgpu::RenderPipeline,
pub(super) oit_pipeline: wgpu::RenderPipeline,
pub(super) oit_composite_pipeline: wgpu::RenderPipeline,
pub(super) oit_accum_texture: wgpu::Texture,
pub(super) oit_accum_view: wgpu::TextureView,
pub(super) oit_reveal_texture: wgpu::Texture,
pub(super) oit_reveal_view: wgpu::TextureView,
pub(super) oit_composite_bind_group: wgpu::BindGroup,
pub(super) oit_composite_bind_group_layout: wgpu::BindGroupLayout,
pub(super) oit_sampler: wgpu::Sampler,
pub(super) overlay_depth_texture: wgpu::Texture,
pub(super) overlay_depth_view: wgpu::TextureView,
pub(super) oit_texture_size: (u32, u32),
pub(super) uniform_buffer: wgpu::Buffer,
pub(super) uniform_bind_group: wgpu::BindGroup,
pub(super) overlay_uniform_buffer: wgpu::Buffer,
pub(super) overlay_uniform_bind_group: wgpu::BindGroup,
pub(super) instance_bind_group_layout: wgpu::BindGroupLayout,
pub(super) texture_bind_group_layout: wgpu::BindGroupLayout,
pub(super) scene_bind_group: wgpu::BindGroup,
pub(super) scene_bind_group_layout: wgpu::BindGroupLayout,
pub(super) shadow_sampler: wgpu::Sampler,
pub(super) spotlight_shadow_buffer: wgpu::Buffer,
pub(super) point_shadow_buffer: wgpu::Buffer,
pub(super) point_shadow_sampler: wgpu::Sampler,
pub(super) ibl_sampler: wgpu::Sampler,
pub(super) dummy_white_view: wgpu::TextureView,
pub(super) dummy_black_view: wgpu::TextureView,
pub(super) dummy_normal_view: wgpu::TextureView,
pub(super) dummy_sampler: wgpu::Sampler,
pub(super) vertex_buffer: wgpu::Buffer,
pub(super) index_buffer: wgpu::Buffer,
pub(super) vertex_buffer_size: u64,
pub(super) index_buffer_size: u64,
pub(super) meshes: HashMap<String, u32>,
pub(super) mesh_data: Vec<MeshData>,
pub(super) registered_textures: HashMap<String, (wgpu::TextureView, wgpu::Sampler)>,
pub(super) newly_registered_textures: HashSet<String>,
pub(super) current_vertex_offset: u32,
pub(super) current_index_offset: u32,
pub(super) compaction_frame_counter: u64,
pub(super) last_vertex_utilization: f32,
pub(super) last_index_utilization: f32,
pub(super) culling_pipeline: wgpu::ComputePipeline,
pub(super) culling_bind_group_layout: wgpu::BindGroupLayout,
pub(super) culling_uniform_buffer: wgpu::Buffer,
pub(super) mesh_bounds_buffer: wgpu::Buffer,
pub(super) mesh_bounds_buffer_size: usize,
pub(super) mesh_bounds_data: Vec<MeshBoundsData>,
pub(super) mesh_aabbs_buffer: wgpu::Buffer,
pub(super) mesh_aabbs_buffer_size: usize,
pub(super) mesh_aabbs_data: Vec<MeshBoundsAABB>,
pub(super) mesh_lod_buffer: wgpu::Buffer,
pub(super) mesh_lod_buffer_size: usize,
pub(super) mesh_lod_data: Vec<MeshLodInfo>,
pub(super) mesh_lod_mesh_ids: HashMap<u32, Vec<u32>>,
pub(super) hiz_pass: HizPass,
pub(super) depth_prepass_pipeline: wgpu::RenderPipeline,
pub(super) phase1_culling_uniform_buffer: wgpu::Buffer,
pub(super) morph_displacement_buffer: wgpu::Buffer,
pub(super) morph_displacement_buffer_size: u64,
pub(super) current_morph_displacement_offset: u32,
pub(super) brdf_lut_view: wgpu::TextureView,
pub(super) irradiance_view: wgpu::TextureView,
pub(super) prefiltered_view: wgpu::TextureView,
pub(super) irradiance_b_view: wgpu::TextureView,
pub(super) prefiltered_b_view: wgpu::TextureView,
pub(super) point_shadow_cubemap_view: wgpu::TextureView,
pub(super) last_prepared_world_id: Option<u64>,
pub(super) current_world_id: u64,
pub(super) world_states: HashMap<u64, WorldRenderState>,
pub(super) frame_counter: u64,
pub(super) cluster_bounds_buffer: wgpu::Buffer,
pub(super) cluster_bounds_pipeline: wgpu::ComputePipeline,
pub(super) cluster_assign_pipeline: wgpu::ComputePipeline,
pub(super) _cluster_bounds_bind_group_layout: wgpu::BindGroupLayout,
pub(super) cluster_assign_bind_group_layout: wgpu::BindGroupLayout,
pub(super) view_matrix_buffer: wgpu::Buffer,
pub(super) transform_compute_pipeline: wgpu::ComputePipeline,
pub(super) transform_compute_bind_group_layout: wgpu::BindGroupLayout,
pub(super) transform_compute_uniforms_buffer: wgpu::Buffer,
pub(super) transform_compute_staging_buffer: wgpu::Buffer,
pub(super) instanced_compute_pipeline: wgpu::ComputePipeline,
pub(super) instanced_compute_bind_group_layout: wgpu::BindGroupLayout,
pub(super) instanced_compute_uniforms_buffer: wgpu::Buffer,
pub(super) instanced_compute_staging_buffer: wgpu::Buffer,
pub preserve_hiz: bool,
cached_oit_states: HashMap<(u32, u32), CachedOitState>,
pub(crate) frame_dirty: Option<MeshRenderStateInner>,
pub(crate) dirty_mesh_names: HashSet<String>,
}
struct CachedOitState {
oit_accum_texture: wgpu::Texture,
oit_accum_view: wgpu::TextureView,
oit_reveal_texture: wgpu::Texture,
oit_reveal_view: wgpu::TextureView,
oit_composite_bind_group: wgpu::BindGroup,
overlay_depth_texture: wgpu::Texture,
overlay_depth_view: wgpu::TextureView,
}
fn create_shader_module(device: &wgpu::Device, label: &str, source: &str) -> wgpu::ShaderModule {
#[cfg(target_arch = "wasm32")]
let source = strip_specular_color_texture(source);
device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(label),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::from(source)),
})
}
#[cfg(target_arch = "wasm32")]
fn strip_specular_color_texture(source: &str) -> String {
let mut result = String::with_capacity(source.len());
let mut lines = source.lines().peekable();
while let Some(line) = lines.next() {
let trimmed = line.trim();
if trimmed == "@group(2) @binding(16)" || trimmed == "@group(2) @binding(17)" {
lines.next();
continue;
}
if trimmed.starts_with("if material.has_specular_color_texture") {
for inner in lines.by_ref() {
if inner.trim() == "}" {
break;
}
}
continue;
}
result.push_str(line);
result.push('\n');
}
result
}
fn texture_bind_group_layout_entries() -> Vec<wgpu::BindGroupLayoutEntry> {
let texture_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
binding,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
};
let sampler_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
binding,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
};
#[allow(unused_mut)]
let mut entries = vec![
texture_entry(0),
sampler_entry(1),
texture_entry(2),
sampler_entry(3),
texture_entry(4),
sampler_entry(5),
texture_entry(6),
sampler_entry(7),
texture_entry(8),
sampler_entry(9),
texture_entry(10),
sampler_entry(11),
texture_entry(12),
sampler_entry(13),
texture_entry(14),
sampler_entry(15),
];
#[cfg(not(target_arch = "wasm32"))]
{
entries.push(texture_entry(16));
entries.push(sampler_entry(17));
}
entries
}
impl MeshPass {
pub fn new(
device: &wgpu::Device,
queue: &wgpu::Queue,
color_format: wgpu::TextureFormat,
depth_format: wgpu::TextureFormat,
window_size: (u32, u32),
) -> Self {
let shader = create_shader_module(
device,
"mesh.wgsl",
include_str!("../../../shaders/mesh.wgsl"),
);
let oit_shader = create_shader_module(
device,
"mesh_oit.wgsl",
include_str!("../../../shaders/mesh_oit.wgsl"),
);
let composite_shader =
device.create_shader_module(wgpu::include_wgsl!("../../../shaders/oit_composite.wgsl"));
let depth_prepass_shader = device.create_shader_module(wgpu::include_wgsl!(
"../../../shaders/mesh_depth_prepass.wgsl"
));
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Uniform Buffer"),
size: std::mem::size_of::<MeshUniforms>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Mesh Uniform Bind Group Layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Mesh Uniform Bind Group"),
layout: &uniform_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}],
});
let overlay_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Overlay Uniform Buffer"),
size: std::mem::size_of::<MeshUniforms>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let overlay_uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Mesh Overlay Uniform Bind Group"),
layout: &uniform_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: overlay_uniform_buffer.as_entire_binding(),
}],
});
let morph_displacement_buffer_size =
(std::mem::size_of::<MorphDisplacement>() * 1000) as u64;
let morph_displacement_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Morph Displacement Buffer"),
size: morph_displacement_buffer_size,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let cluster_bounds_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Cluster Bounds Buffer"),
size: (std::mem::size_of::<ClusterBounds>() * TOTAL_CLUSTERS as usize) as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let view_matrix_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("View Matrix Buffer"),
size: std::mem::size_of::<[[f32; 4]; 4]>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let instance_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Mesh Instance Bind Group Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 4,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 5,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 6,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 7,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 8,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 9,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
let oit_accum_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("OIT Accumulation Texture"),
size: wgpu::Extent3d {
width: window_size.0,
height: window_size.1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba16Float,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
let oit_reveal_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("OIT Reveal Texture"),
size: wgpu::Extent3d {
width: window_size.0,
height: window_size.1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::R8Unorm,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
let oit_accum_view = oit_accum_texture.create_view(&wgpu::TextureViewDescriptor::default());
let oit_reveal_view =
oit_reveal_texture.create_view(&wgpu::TextureViewDescriptor::default());
let overlay_depth_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Overlay Depth Texture"),
size: wgpu::Extent3d {
width: window_size.0,
height: window_size.1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: depth_format,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
});
let overlay_depth_view =
overlay_depth_texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("OIT Sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Mesh Texture Bind Group Layout"),
entries: &texture_bind_group_layout_entries(),
});
let dummy_white_texture = device.create_texture_with_data(
queue,
&wgpu::TextureDescriptor {
label: Some("Dummy White Texture"),
size: wgpu::Extent3d {
width: 1,
height: 1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8Unorm,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
},
wgpu::util::TextureDataOrder::LayerMajor,
&[255, 255, 255, 255],
);
let dummy_white_view =
dummy_white_texture.create_view(&wgpu::TextureViewDescriptor::default());
let dummy_black_texture = device.create_texture_with_data(
queue,
&wgpu::TextureDescriptor {
label: Some("Dummy Black Texture"),
size: wgpu::Extent3d {
width: 1,
height: 1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8Unorm,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
},
wgpu::util::TextureDataOrder::LayerMajor,
&[0, 0, 0, 255],
);
let dummy_black_view =
dummy_black_texture.create_view(&wgpu::TextureViewDescriptor::default());
let dummy_normal_texture = device.create_texture_with_data(
queue,
&wgpu::TextureDescriptor {
label: Some("Dummy Normal Texture"),
size: wgpu::Extent3d {
width: 1,
height: 1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8Unorm,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
},
wgpu::util::TextureDataOrder::LayerMajor,
&[127, 127, 255, 255],
);
let dummy_normal_view =
dummy_normal_texture.create_view(&wgpu::TextureViewDescriptor::default());
let dummy_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("Dummy Texture Sampler"),
address_mode_u: wgpu::AddressMode::Repeat,
address_mode_v: wgpu::AddressMode::Repeat,
address_mode_w: wgpu::AddressMode::Repeat,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
..Default::default()
});
let dummy_shadow_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Dummy Shadow Texture"),
size: wgpu::Extent3d {
width: 1,
height: 1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Depth32Float,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
});
let dummy_shadow_view =
dummy_shadow_texture.create_view(&wgpu::TextureViewDescriptor::default());
let shadow_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("Shadow Sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let spotlight_shadow_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Spotlight Shadow Buffer"),
size: (std::mem::size_of::<crate::render::wgpu::passes::SpotlightShadowData>()
* MAX_SPOTLIGHT_SHADOWS) as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let point_shadow_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Point Light Shadow Buffer"),
size: (std::mem::size_of::<PointLightShadowData>() * MAX_POINT_LIGHT_SHADOWS) as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let point_shadow_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("Point Shadow Sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let scene_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Scene Bind Group Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Depth,
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::NonFiltering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Depth,
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::NonFiltering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 4,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 5,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 6,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 7,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::Cube,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 8,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 9,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::Cube,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 10,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 11,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: false },
view_dimension: wgpu::TextureViewDimension::CubeArray,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 12,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::NonFiltering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 13,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 14,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::Cube,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 15,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::Cube,
multisampled: false,
},
count: None,
},
],
});
let ibl_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("IBL Sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
..Default::default()
});
let dummy_brdf_lut = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Dummy BRDF LUT"),
size: wgpu::Extent3d {
width: 1,
height: 1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba16Float,
usage: wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
let dummy_brdf_lut_view =
dummy_brdf_lut.create_view(&wgpu::TextureViewDescriptor::default());
let dummy_cubemap = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Dummy IBL Cubemap"),
size: wgpu::Extent3d {
width: 1,
height: 1,
depth_or_array_layers: 6,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba16Float,
usage: wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
let dummy_cubemap_view = dummy_cubemap.create_view(&wgpu::TextureViewDescriptor {
dimension: Some(wgpu::TextureViewDimension::Cube),
..Default::default()
});
let dummy_point_shadow_cubemap = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Dummy Point Shadow Cubemap Array"),
size: wgpu::Extent3d {
width: 1,
height: 1,
depth_or_array_layers: 6 * MAX_POINT_LIGHT_SHADOWS as u32,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::R32Float,
usage: wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
let dummy_point_shadow_view =
dummy_point_shadow_cubemap.create_view(&wgpu::TextureViewDescriptor {
dimension: Some(wgpu::TextureViewDimension::CubeArray),
..Default::default()
});
let scene_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Scene Bind Group"),
layout: &scene_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&dummy_shadow_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&shadow_sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&dummy_shadow_view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::Sampler(&shadow_sampler),
},
wgpu::BindGroupEntry {
binding: 4,
resource: spotlight_shadow_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 5,
resource: wgpu::BindingResource::TextureView(&dummy_brdf_lut_view),
},
wgpu::BindGroupEntry {
binding: 6,
resource: wgpu::BindingResource::Sampler(&ibl_sampler),
},
wgpu::BindGroupEntry {
binding: 7,
resource: wgpu::BindingResource::TextureView(&dummy_cubemap_view),
},
wgpu::BindGroupEntry {
binding: 8,
resource: wgpu::BindingResource::Sampler(&ibl_sampler),
},
wgpu::BindGroupEntry {
binding: 9,
resource: wgpu::BindingResource::TextureView(&dummy_cubemap_view),
},
wgpu::BindGroupEntry {
binding: 10,
resource: wgpu::BindingResource::Sampler(&ibl_sampler),
},
wgpu::BindGroupEntry {
binding: 11,
resource: wgpu::BindingResource::TextureView(&dummy_point_shadow_view),
},
wgpu::BindGroupEntry {
binding: 12,
resource: wgpu::BindingResource::Sampler(&point_shadow_sampler),
},
wgpu::BindGroupEntry {
binding: 13,
resource: point_shadow_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 14,
resource: wgpu::BindingResource::TextureView(&dummy_cubemap_view),
},
wgpu::BindGroupEntry {
binding: 15,
resource: wgpu::BindingResource::TextureView(&dummy_cubemap_view),
},
],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Mesh Pipeline Layout"),
bind_group_layouts: &[
&uniform_bind_group_layout,
&instance_bind_group_layout,
&texture_bind_group_layout,
&scene_bind_group_layout,
],
push_constant_ranges: &[],
});
let depth_prepass_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Depth Prepass Pipeline Layout"),
bind_group_layouts: &[&uniform_bind_group_layout, &instance_bind_group_layout],
push_constant_ranges: &[],
});
let vertex_attributes = [
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: 12,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: 24,
shader_location: 2,
format: wgpu::VertexFormat::Float32x2,
},
wgpu::VertexAttribute {
offset: 32,
shader_location: 3,
format: wgpu::VertexFormat::Float32x2,
},
wgpu::VertexAttribute {
offset: 40,
shader_location: 4,
format: wgpu::VertexFormat::Float32x4,
},
wgpu::VertexAttribute {
offset: 56,
shader_location: 5,
format: wgpu::VertexFormat::Float32x4,
},
];
let vertex_buffer_layout = wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &vertex_attributes,
};
let opaque_targets: Vec<Option<wgpu::ColorTargetState>> = vec![
Some(wgpu::ColorTargetState {
format: color_format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
}),
Some(wgpu::ColorTargetState {
format: wgpu::TextureFormat::R32Float,
blend: None,
write_mask: wgpu::ColorWrites::ALL,
}),
Some(wgpu::ColorTargetState {
format: wgpu::TextureFormat::Rgba16Float,
blend: None,
write_mask: wgpu::ColorWrites::ALL,
}),
];
let opaque_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Opaque Mesh Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: std::slice::from_ref(&vertex_buffer_layout),
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &opaque_targets,
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: Some(wgpu::DepthStencilState {
format: depth_format,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::GreaterEqual,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
let opaque_double_sided_pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Opaque Double-Sided Mesh Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: std::slice::from_ref(&vertex_buffer_layout),
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &opaque_targets,
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None,
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: Some(wgpu::DepthStencilState {
format: depth_format,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::GreaterEqual,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
let overlay_opaque_pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Overlay Opaque Mesh Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: &[vertex_buffer_layout],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &opaque_targets,
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: Some(wgpu::DepthStencilState {
format: depth_format,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Greater,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
let vertex_buffer_layout2 = wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &vertex_attributes,
};
let overlay_opaque_double_sided_pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Overlay Opaque Double-Sided Mesh Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: std::slice::from_ref(&vertex_buffer_layout2),
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &opaque_targets,
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None,
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: Some(wgpu::DepthStencilState {
format: depth_format,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Greater,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
let depth_prepass_vertex_buffer_layout = wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &vertex_attributes,
};
let depth_prepass_pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Depth Prepass Pipeline"),
layout: Some(&depth_prepass_pipeline_layout),
vertex: wgpu::VertexState {
module: &depth_prepass_shader,
entry_point: Some("vs_main"),
buffers: std::slice::from_ref(&depth_prepass_vertex_buffer_layout),
compilation_options: Default::default(),
},
fragment: None,
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: Some(wgpu::DepthStencilState {
format: depth_format,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Greater,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
let oit_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("OIT Mesh Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &oit_shader,
entry_point: Some("vs_main"),
buffers: &[wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &vertex_attributes,
}],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &oit_shader,
entry_point: Some("fs_main"),
targets: &[
Some(wgpu::ColorTargetState {
format: wgpu::TextureFormat::Rgba16Float,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
}),
Some(wgpu::ColorTargetState {
format: wgpu::TextureFormat::R8Unorm,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::Zero,
dst_factor: wgpu::BlendFactor::Src,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::Zero,
dst_factor: wgpu::BlendFactor::Src,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
}),
],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None,
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: Some(wgpu::DepthStencilState {
format: depth_format,
depth_write_enabled: false,
depth_compare: wgpu::CompareFunction::Greater,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
let composite_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("OIT Composite Bind Group Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let oit_composite_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("OIT Composite Bind Group"),
layout: &composite_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&oit_accum_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&oit_reveal_view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
});
let composite_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("OIT Composite Pipeline Layout"),
bind_group_layouts: &[&composite_bind_group_layout],
push_constant_ranges: &[],
});
let oit_composite_pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("OIT Composite Pipeline"),
layout: Some(&composite_pipeline_layout),
vertex: wgpu::VertexState {
module: &composite_shader,
entry_point: Some("vs_main"),
buffers: &[],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &composite_shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format: color_format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None,
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
let vertex_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Vertex Buffer"),
size: (std::mem::size_of::<Vertex>() * INITIAL_VERTEX_BUFFER_SIZE) as u64,
usage: wgpu::BufferUsages::VERTEX
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let index_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Index Buffer"),
size: (std::mem::size_of::<u32>() * INITIAL_INDEX_BUFFER_SIZE) as u64,
usage: wgpu::BufferUsages::INDEX
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let vertex_buffer_size =
(std::mem::size_of::<Vertex>() * INITIAL_VERTEX_BUFFER_SIZE) as u64;
let index_buffer_size = (std::mem::size_of::<u32>() * INITIAL_INDEX_BUFFER_SIZE) as u64;
let culling_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Mesh Culling Shader"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(include_str!(
"../../../shaders/mesh_culling.wgsl"
))),
});
let culling_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Mesh Culling Bind Group Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 4,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 5,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 6,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: false },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 7,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 8,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
let culling_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Mesh Culling Pipeline Layout"),
bind_group_layouts: &[&culling_bind_group_layout],
push_constant_ranges: &[],
});
let culling_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("Mesh Culling Pipeline"),
layout: Some(&culling_pipeline_layout),
module: &culling_shader,
entry_point: Some("main"),
compilation_options: Default::default(),
cache: None,
});
let culling_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Culling Uniform Buffer"),
size: std::mem::size_of::<CullingUniforms>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let mesh_bounds_buffer_size = 64;
let mesh_bounds_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Bounds Buffer"),
size: (std::mem::size_of::<MeshBoundsData>() * mesh_bounds_buffer_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let mesh_aabbs_buffer_size = 64;
let mesh_aabbs_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh AABBs Buffer"),
size: (std::mem::size_of::<MeshBoundsAABB>() * mesh_aabbs_buffer_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let mesh_lod_buffer_size = 64;
let mesh_lod_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh LOD Buffer"),
size: (std::mem::size_of::<MeshLodInfo>() * mesh_lod_buffer_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let hiz_pass = HizPass::new(device);
let phase1_culling_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Phase 1 Culling Uniform Buffer"),
size: std::mem::size_of::<CullingUniforms>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let cluster_bounds_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Cluster Bounds Shader"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(include_str!(
"../../../shaders/cluster_bounds.wgsl"
))),
});
let cluster_assign_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Cluster Light Assign Shader"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(include_str!(
"../../../shaders/cluster_light_assign.wgsl"
))),
});
let cluster_bounds_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Cluster Bounds Bind Group Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
let cluster_bounds_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Cluster Bounds Pipeline Layout"),
bind_group_layouts: &[&cluster_bounds_bind_group_layout],
push_constant_ranges: &[],
});
let cluster_bounds_pipeline =
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("Cluster Bounds Pipeline"),
layout: Some(&cluster_bounds_pipeline_layout),
module: &cluster_bounds_shader,
entry_point: Some("main"),
compilation_options: Default::default(),
cache: None,
});
let cluster_assign_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Cluster Light Assign Bind Group Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 4,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 5,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
let cluster_assign_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Cluster Light Assign Pipeline Layout"),
bind_group_layouts: &[&cluster_assign_bind_group_layout],
push_constant_ranges: &[],
});
let cluster_assign_pipeline =
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("Cluster Light Assign Pipeline"),
layout: Some(&cluster_assign_pipeline_layout),
module: &cluster_assign_shader,
entry_point: Some("main"),
compilation_options: Default::default(),
cache: None,
});
let transform_compute_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Transform Compute Shader"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(include_str!(
"../../../shaders/transform_compute.wgsl"
))),
});
let transform_compute_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Transform Compute Bind Group Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
let transform_compute_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Transform Compute Pipeline Layout"),
bind_group_layouts: &[&transform_compute_bind_group_layout],
push_constant_ranges: &[],
});
let transform_compute_pipeline =
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("Transform Compute Pipeline"),
layout: Some(&transform_compute_pipeline_layout),
module: &transform_compute_shader,
entry_point: Some("main"),
compilation_options: Default::default(),
cache: None,
});
let transform_compute_uniforms_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Transform Compute Uniforms Buffer"),
size: std::mem::size_of::<TransformComputeUniforms>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let transform_compute_staging_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Transform Compute Staging Buffer"),
size: (std::mem::size_of::<TransformComputeUniforms>() * MAX_HIERARCHY_DEPTH as usize)
as u64,
usage: wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let instanced_compute_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Instanced Transform Compute Shader"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(include_str!(
"../../../shaders/instanced_transform_compute.wgsl"
))),
});
let instanced_compute_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Instanced Transform Compute Bind Group Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
let instanced_compute_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Instanced Transform Compute Pipeline Layout"),
bind_group_layouts: &[&instanced_compute_bind_group_layout],
push_constant_ranges: &[],
});
let instanced_compute_pipeline =
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("Instanced Transform Compute Pipeline"),
layout: Some(&instanced_compute_pipeline_layout),
module: &instanced_compute_shader,
entry_point: Some("main"),
compilation_options: Default::default(),
cache: None,
});
let instanced_compute_uniforms_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Instanced Transform Compute Uniforms Buffer"),
size: std::mem::size_of::<InstancedTransformComputeUniforms>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let instanced_compute_staging_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Instanced Transform Compute Staging Buffer"),
size: std::mem::size_of::<InstancedTransformComputeUniforms>() as u64,
usage: wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let mut pass = Self {
opaque_pipeline,
opaque_double_sided_pipeline,
overlay_opaque_pipeline,
overlay_opaque_double_sided_pipeline,
oit_pipeline,
oit_composite_pipeline,
oit_accum_texture,
oit_accum_view,
oit_reveal_texture,
oit_reveal_view,
oit_composite_bind_group,
oit_composite_bind_group_layout: composite_bind_group_layout,
oit_sampler: sampler,
overlay_depth_texture,
overlay_depth_view,
oit_texture_size: window_size,
uniform_buffer,
uniform_bind_group,
overlay_uniform_buffer,
overlay_uniform_bind_group,
instance_bind_group_layout,
texture_bind_group_layout,
scene_bind_group,
scene_bind_group_layout: scene_bind_group_layout.clone(),
shadow_sampler,
spotlight_shadow_buffer,
point_shadow_buffer,
point_shadow_sampler,
ibl_sampler,
dummy_white_view,
dummy_black_view,
dummy_normal_view,
dummy_sampler,
vertex_buffer,
index_buffer,
vertex_buffer_size,
index_buffer_size,
meshes: HashMap::new(),
mesh_data: Vec::new(),
registered_textures: HashMap::new(),
newly_registered_textures: HashSet::new(),
current_vertex_offset: 0,
current_index_offset: 0,
compaction_frame_counter: 0,
last_vertex_utilization: 1.0,
last_index_utilization: 1.0,
culling_pipeline,
culling_bind_group_layout,
culling_uniform_buffer,
mesh_bounds_buffer,
mesh_bounds_buffer_size,
mesh_bounds_data: Vec::new(),
mesh_aabbs_buffer,
mesh_aabbs_buffer_size,
mesh_aabbs_data: Vec::new(),
mesh_lod_buffer,
mesh_lod_buffer_size,
mesh_lod_data: Vec::new(),
mesh_lod_mesh_ids: HashMap::new(),
hiz_pass,
depth_prepass_pipeline,
phase1_culling_uniform_buffer,
morph_displacement_buffer,
morph_displacement_buffer_size,
current_morph_displacement_offset: 0,
brdf_lut_view: dummy_brdf_lut_view,
irradiance_view: dummy_cubemap_view.clone(),
prefiltered_view: dummy_cubemap_view.clone(),
irradiance_b_view: dummy_cubemap_view.clone(),
prefiltered_b_view: dummy_cubemap_view,
point_shadow_cubemap_view: dummy_point_shadow_view,
last_prepared_world_id: None,
current_world_id: 0,
world_states: HashMap::new(),
frame_counter: 0,
cluster_bounds_buffer,
cluster_bounds_pipeline,
cluster_assign_pipeline,
_cluster_bounds_bind_group_layout: cluster_bounds_bind_group_layout,
cluster_assign_bind_group_layout,
view_matrix_buffer,
transform_compute_pipeline,
transform_compute_bind_group_layout,
transform_compute_uniforms_buffer,
transform_compute_staging_buffer,
instanced_compute_pipeline,
instanced_compute_bind_group_layout,
instanced_compute_uniforms_buffer,
instanced_compute_staging_buffer,
preserve_hiz: false,
cached_oit_states: HashMap::new(),
frame_dirty: None,
dirty_mesh_names: HashSet::new(),
};
pass.add_mesh(device, queue, "Cube", create_cube_mesh());
pass.add_mesh(device, queue, "Sphere", create_sphere_mesh(1.0, 16));
pass.add_mesh(device, queue, "Sphere_LOD1", create_sphere_mesh(1.0, 8));
pass.add_mesh(device, queue, "Sphere_LOD2", create_sphere_mesh(1.0, 4));
pass.add_mesh(device, queue, "Plane", create_plane_mesh(2.0));
pass.add_mesh(
device,
queue,
"SubdividedPlane",
create_subdivided_plane_mesh(2.0, 20),
);
pass.add_mesh(
device,
queue,
"Cylinder",
create_cylinder_mesh(0.5, 1.0, 16),
);
pass.add_mesh(device, queue, "Cone", create_cone_mesh(0.5, 1.0, 16));
pass.add_mesh(device, queue, "Torus", create_torus_mesh(1.0, 0.3, 16, 16));
pass
}
#[inline]
pub(super) fn state(&self) -> &WorldRenderState {
self.world_states.get(&self.current_world_id).unwrap()
}
#[inline]
pub(super) fn state_mut(&mut self) -> &mut WorldRenderState {
self.world_states.get_mut(&self.current_world_id).unwrap()
}
#[inline]
pub(super) fn gpu(&self) -> &WorldGpuBuffers {
self.world_states
.get(&self.current_world_id)
.unwrap()
.gpu_buffers
.as_ref()
.unwrap()
}
#[inline]
pub(super) fn gpu_mut(&mut self) -> &mut WorldGpuBuffers {
self.world_states
.get_mut(&self.current_world_id)
.unwrap()
.gpu_buffers
.as_mut()
.unwrap()
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
if self.oit_texture_size == (width, height) || width == 0 || height == 0 {
return;
}
let old_size = self.oit_texture_size;
self.cached_oit_states.insert(
old_size,
CachedOitState {
oit_accum_texture: self.oit_accum_texture.clone(),
oit_accum_view: self.oit_accum_view.clone(),
oit_reveal_texture: self.oit_reveal_texture.clone(),
oit_reveal_view: self.oit_reveal_view.clone(),
oit_composite_bind_group: self.oit_composite_bind_group.clone(),
overlay_depth_texture: self.overlay_depth_texture.clone(),
overlay_depth_view: self.overlay_depth_view.clone(),
},
);
self.oit_texture_size = (width, height);
if let Some(cached) = self.cached_oit_states.remove(&(width, height)) {
self.oit_accum_texture = cached.oit_accum_texture;
self.oit_accum_view = cached.oit_accum_view;
self.oit_reveal_texture = cached.oit_reveal_texture;
self.oit_reveal_view = cached.oit_reveal_view;
self.oit_composite_bind_group = cached.oit_composite_bind_group;
self.overlay_depth_texture = cached.overlay_depth_texture;
self.overlay_depth_view = cached.overlay_depth_view;
if let Some(world_state) = self.world_states.get_mut(&self.current_world_id)
&& let Some(gpu) = world_state.gpu_buffers.as_mut()
{
gpu.culling_bind_group = None;
gpu.phase1_culling_bind_group = None;
}
if !self.preserve_hiz {
self.hiz_pass.resize(device, width, height);
}
return;
}
self.oit_accum_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("OIT Accumulation Texture"),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba16Float,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
self.oit_reveal_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("OIT Reveal Texture"),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::R8Unorm,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
self.oit_accum_view = self
.oit_accum_texture
.create_view(&wgpu::TextureViewDescriptor::default());
self.oit_reveal_view = self
.oit_reveal_texture
.create_view(&wgpu::TextureViewDescriptor::default());
self.overlay_depth_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Overlay Depth Texture"),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Depth32Float,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
});
self.overlay_depth_view = self
.overlay_depth_texture
.create_view(&wgpu::TextureViewDescriptor::default());
self.oit_composite_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("OIT Composite Bind Group"),
layout: &self.oit_composite_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&self.oit_accum_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&self.oit_sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&self.oit_reveal_view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::Sampler(&self.oit_sampler),
},
],
});
if let Some(world_state) = self.world_states.get_mut(&self.current_world_id)
&& let Some(gpu) = world_state.gpu_buffers.as_mut()
{
gpu.culling_bind_group = None;
gpu.phase1_culling_bind_group = None;
}
if !self.preserve_hiz {
self.hiz_pass.resize(device, width, height);
}
}
pub fn update_ibl_textures(
&mut self,
brdf_lut_view: wgpu::TextureView,
irradiance_view: wgpu::TextureView,
prefiltered_view: wgpu::TextureView,
) {
self.brdf_lut_view = brdf_lut_view.clone();
self.irradiance_view = irradiance_view.clone();
self.prefiltered_view = prefiltered_view.clone();
if let Some(world_state) = self.world_states.get_mut(&self.current_world_id) {
world_state.ibl_brdf_lut_view = Some(brdf_lut_view);
world_state.ibl_irradiance_view = Some(irradiance_view);
world_state.ibl_prefiltered_view = Some(prefiltered_view);
world_state.ibl_dirty = true;
}
}
pub fn update_ibl_textures_blended(
&mut self,
brdf_lut_view: wgpu::TextureView,
irradiance_a: wgpu::TextureView,
prefiltered_a: wgpu::TextureView,
irradiance_b: wgpu::TextureView,
prefiltered_b: wgpu::TextureView,
blend_factor: f32,
) {
self.brdf_lut_view = brdf_lut_view.clone();
self.irradiance_view = irradiance_a.clone();
self.prefiltered_view = prefiltered_a.clone();
self.irradiance_b_view = irradiance_b.clone();
self.prefiltered_b_view = prefiltered_b.clone();
if let Some(world_state) = self.world_states.get_mut(&self.current_world_id) {
world_state.ibl_brdf_lut_view = Some(brdf_lut_view);
world_state.ibl_irradiance_view = Some(irradiance_a);
world_state.ibl_prefiltered_view = Some(prefiltered_a);
world_state.ibl_irradiance_b_view = Some(irradiance_b);
world_state.ibl_prefiltered_b_view = Some(prefiltered_b);
world_state.ibl_blend_factor = blend_factor;
world_state.ibl_dirty = true;
}
}
pub fn update_ibl_textures_for_world(
&mut self,
world_id: u64,
brdf_lut_view: wgpu::TextureView,
irradiance_view: wgpu::TextureView,
prefiltered_view: wgpu::TextureView,
) {
if let Some(world_state) = self.world_states.get_mut(&world_id) {
world_state.ibl_brdf_lut_view = Some(brdf_lut_view);
world_state.ibl_irradiance_view = Some(irradiance_view);
world_state.ibl_prefiltered_view = Some(prefiltered_view);
world_state.ibl_dirty = true;
}
}
pub fn update_point_shadow_cubemap(&mut self, view: wgpu::TextureView) {
self.point_shadow_cubemap_view = view;
}
pub fn register_texture(
&mut self,
name: String,
view: wgpu::TextureView,
sampler: wgpu::Sampler,
) {
self.newly_registered_textures.insert(name.clone());
self.registered_textures.insert(name, (view, sampler));
}
pub fn register_texture_with_data(
&mut self,
name: String,
view: wgpu::TextureView,
sampler: wgpu::Sampler,
_rgba_data: Vec<u8>,
_width: u32,
_height: u32,
) {
self.newly_registered_textures.insert(name.clone());
self.registered_textures.insert(name, (view, sampler));
}
pub fn unregister_texture(&mut self, name: &str) {
self.registered_textures.remove(name);
}
pub fn sync_textures(
&mut self,
texture_cache: &crate::render::wgpu::texture_cache::TextureCache,
) {
if texture_cache.registry.name_to_index.is_empty() {
return;
}
self.registered_textures
.retain(|name, _| texture_cache.registry.name_to_index.contains_key(name));
}
pub fn sync_meshes_from_cache(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
mesh_cache: &crate::ecs::prefab::resources::MeshCache,
) {
for dirty_name in std::mem::take(&mut self.dirty_mesh_names) {
if let Some(&mesh_id) = self.meshes.get(&dirty_name)
&& let Some(mesh) = registry_entry_by_name(&mesh_cache.registry, &dirty_name)
{
let mesh_data = &mut self.mesh_data[mesh_id as usize];
if mesh.vertices.len() as u32 == mesh_data.vertex_count {
let vertex_data = bytemuck::cast_slice(&mesh.vertices);
let vertex_offset_bytes =
(mesh_data.vertex_offset as usize * std::mem::size_of::<Vertex>()) as u64;
queue.write_buffer(&self.vertex_buffer, vertex_offset_bytes, vertex_data);
if let Some(morph_targets) = &mesh.morph_targets {
let vertex_count = mesh.vertices.len();
let target_count = morph_targets.targets.len().min(8);
let total_displacements = vertex_count * target_count;
if total_displacements > 0 {
if mesh_data.morph_target_count == 0 {
mesh_data.morph_displacement_offset =
self.current_morph_displacement_offset;
mesh_data.morph_target_count = target_count as u32;
self.current_morph_displacement_offset +=
total_displacements as u32;
}
let mut displacements = Vec::with_capacity(total_displacements);
for target_index in 0..target_count {
let target = &morph_targets.targets[target_index];
for vertex_index in 0..vertex_count {
let position = target
.position_displacements
.get(vertex_index)
.copied()
.unwrap_or([0.0, 0.0, 0.0]);
let normal = target
.normal_displacements
.as_ref()
.and_then(|n| n.get(vertex_index))
.copied()
.unwrap_or([0.0, 0.0, 0.0]);
let tangent = target
.tangent_displacements
.as_ref()
.and_then(|t| t.get(vertex_index))
.copied()
.unwrap_or([0.0, 0.0, 0.0]);
displacements.push(MorphDisplacement {
position,
_pad0: 0.0,
normal,
_pad1: 0.0,
tangent,
_pad2: 0.0,
});
}
}
let displacement_data = bytemuck::cast_slice(&displacements);
let displacement_offset_bytes = mesh_data.morph_displacement_offset
as u64
* std::mem::size_of::<MorphDisplacement>() as u64;
let required_size =
displacement_offset_bytes + displacement_data.len() as u64;
if required_size > self.morph_displacement_buffer_size {
let new_size = (required_size as f64 * BUFFER_GROWTH_FACTOR as f64)
.ceil() as u64;
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Morph Displacement Buffer (Resized)"),
size: new_size,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
if displacement_offset_bytes > 0 {
let mut encoder = device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("Morph Displacement Buffer Reallocation"),
},
);
encoder.copy_buffer_to_buffer(
&self.morph_displacement_buffer,
0,
&new_buffer,
0,
displacement_offset_bytes,
);
queue.submit(std::iter::once(encoder.finish()));
}
self.morph_displacement_buffer = new_buffer;
self.morph_displacement_buffer_size = new_size;
self.invalidate_all_instance_bind_groups(device);
}
queue.write_buffer(
&self.morph_displacement_buffer,
displacement_offset_bytes,
displacement_data,
);
}
}
continue;
}
}
self.meshes.remove(&dirty_name);
}
let mut meshes_to_add = Vec::new();
for (name, mesh) in mesh_cache_iter(mesh_cache) {
if !self.meshes.contains_key(name) {
meshes_to_add.push((name.clone(), mesh.clone()));
}
}
for (name, mesh) in meshes_to_add {
self.add_mesh(device, queue, &name, mesh);
}
}
pub(super) fn compute_vertex_utilization(&self) -> f32 {
let actual_vertex_count: u64 = self.mesh_data.iter().map(|m| m.vertex_count as u64).sum();
let actual_vertex_bytes = actual_vertex_count * std::mem::size_of::<Vertex>() as u64;
if self.vertex_buffer_size > 0 {
actual_vertex_bytes as f32 / self.vertex_buffer_size as f32
} else {
1.0
}
}
pub(super) fn compute_index_utilization(&self) -> f32 {
let actual_index_count: u64 = self.mesh_data.iter().map(|m| m.index_count as u64).sum();
let actual_index_bytes = actual_index_count * std::mem::size_of::<u32>() as u64;
if self.index_buffer_size > 0 {
actual_index_bytes as f32 / self.index_buffer_size as f32
} else {
1.0
}
}
pub(super) fn check_and_compact_buffers(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
mesh_cache: &crate::ecs::prefab::resources::MeshCache,
) {
let actual_vertex_count: u64 = self.mesh_data.iter().map(|m| m.vertex_count as u64).sum();
let actual_index_count: u64 = self.mesh_data.iter().map(|m| m.index_count as u64).sum();
let actual_vertex_bytes = actual_vertex_count * std::mem::size_of::<Vertex>() as u64;
let actual_index_bytes = actual_index_count * std::mem::size_of::<u32>() as u64;
let vertex_utilization = if self.vertex_buffer_size > 0 {
actual_vertex_bytes as f32 / self.vertex_buffer_size as f32
} else {
1.0
};
let index_utilization = if self.index_buffer_size > 0 {
actual_index_bytes as f32 / self.index_buffer_size as f32
} else {
1.0
};
let should_compact = vertex_utilization < BUFFER_SHRINK_THRESHOLD
|| index_utilization < BUFFER_SHRINK_THRESHOLD;
if should_compact && actual_vertex_bytes > 0 && actual_index_bytes > 0 {
self.meshes.clear();
self.mesh_data.clear();
self.current_vertex_offset = 0;
self.current_index_offset = 0;
self.current_morph_displacement_offset = 0;
self.add_mesh(device, queue, "Cube", create_cube_mesh());
self.add_mesh(device, queue, "Sphere", create_sphere_mesh(1.0, 16));
self.add_mesh(device, queue, "Sphere_LOD1", create_sphere_mesh(1.0, 8));
self.add_mesh(device, queue, "Sphere_LOD2", create_sphere_mesh(1.0, 4));
self.add_mesh(device, queue, "Plane", create_plane_mesh(2.0));
self.add_mesh(
device,
queue,
"SubdividedPlane",
create_subdivided_plane_mesh(2.0, 20),
);
self.add_mesh(
device,
queue,
"Cylinder",
create_cylinder_mesh(0.5, 1.0, 16),
);
self.add_mesh(device, queue, "Cone", create_cone_mesh(0.5, 1.0, 16));
self.add_mesh(device, queue, "Torus", create_torus_mesh(1.0, 0.3, 16, 16));
for (name, mesh) in mesh_cache_iter(mesh_cache) {
if !self.meshes.contains_key(name) {
self.add_mesh(device, queue, name, mesh.clone());
}
}
let new_vertex_size = (actual_vertex_bytes as f32 * BUFFER_GROWTH_FACTOR).ceil() as u64;
let new_index_size = (actual_index_bytes as f32 * BUFFER_GROWTH_FACTOR).ceil() as u64;
if new_vertex_size < self.vertex_buffer_size {
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Vertex Buffer (Compacted)"),
size: new_vertex_size,
usage: wgpu::BufferUsages::VERTEX
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Mesh Vertex Buffer Compaction"),
});
encoder.copy_buffer_to_buffer(
&self.vertex_buffer,
0,
&new_buffer,
0,
actual_vertex_bytes,
);
queue.submit(std::iter::once(encoder.finish()));
self.vertex_buffer = new_buffer;
self.vertex_buffer_size = new_vertex_size;
}
if new_index_size < self.index_buffer_size {
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Index Buffer (Compacted)"),
size: new_index_size,
usage: wgpu::BufferUsages::INDEX
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Mesh Index Buffer Compaction"),
});
encoder.copy_buffer_to_buffer(
&self.index_buffer,
0,
&new_buffer,
0,
actual_index_bytes,
);
queue.submit(std::iter::once(encoder.finish()));
self.index_buffer = new_buffer;
self.index_buffer_size = new_index_size;
}
}
}
pub(super) fn add_mesh(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
name: &str,
mesh: Mesh,
) {
let mesh_id = self.mesh_data.len() as u32;
let vertex_data = bytemuck::cast_slice(&mesh.vertices);
let vertex_offset_bytes =
(self.current_vertex_offset as usize * std::mem::size_of::<Vertex>()) as u64;
let required_vertex_size = vertex_offset_bytes + vertex_data.len() as u64;
if required_vertex_size > self.vertex_buffer_size {
let new_size = (required_vertex_size as f32 * BUFFER_GROWTH_FACTOR).ceil() as u64;
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Vertex Buffer (Resized)"),
size: new_size,
usage: wgpu::BufferUsages::VERTEX
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Mesh Vertex Buffer Reallocation"),
});
let used_size =
(self.current_vertex_offset as usize * std::mem::size_of::<Vertex>()) as u64;
if used_size > 0 {
encoder.copy_buffer_to_buffer(&self.vertex_buffer, 0, &new_buffer, 0, used_size);
}
queue.submit(std::iter::once(encoder.finish()));
self.vertex_buffer = new_buffer;
self.vertex_buffer_size = new_size;
}
queue.write_buffer(&self.vertex_buffer, vertex_offset_bytes, vertex_data);
let index_data = bytemuck::cast_slice(&mesh.indices);
let index_offset_bytes =
(self.current_index_offset as usize * std::mem::size_of::<u32>()) as u64;
let required_index_size = index_offset_bytes + index_data.len() as u64;
if required_index_size > self.index_buffer_size {
let new_size = (required_index_size as f32 * BUFFER_GROWTH_FACTOR).ceil() as u64;
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Index Buffer (Resized)"),
size: new_size,
usage: wgpu::BufferUsages::INDEX
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Mesh Index Buffer Reallocation"),
});
let used_size =
(self.current_index_offset as usize * std::mem::size_of::<u32>()) as u64;
if used_size > 0 {
encoder.copy_buffer_to_buffer(&self.index_buffer, 0, &new_buffer, 0, used_size);
}
queue.submit(std::iter::once(encoder.finish()));
self.index_buffer = new_buffer;
self.index_buffer_size = new_size;
}
queue.write_buffer(&self.index_buffer, index_offset_bytes, index_data);
let (bounding_center, bounding_radius, aabb_min, aabb_max) = {
let mut min_x = f32::MAX;
let mut min_y = f32::MAX;
let mut min_z = f32::MAX;
let mut max_x = f32::MIN;
let mut max_y = f32::MIN;
let mut max_z = f32::MIN;
for vertex in &mesh.vertices {
min_x = min_x.min(vertex.position[0]);
min_y = min_y.min(vertex.position[1]);
min_z = min_z.min(vertex.position[2]);
max_x = max_x.max(vertex.position[0]);
max_y = max_y.max(vertex.position[1]);
max_z = max_z.max(vertex.position[2]);
}
let center_x = (min_x + max_x) * 0.5;
let center_y = (min_y + max_y) * 0.5;
let center_z = (min_z + max_z) * 0.5;
let mut max_dist_sq = 0.0f32;
for vertex in &mesh.vertices {
let dx = vertex.position[0] - center_x;
let dy = vertex.position[1] - center_y;
let dz = vertex.position[2] - center_z;
let dist_sq = dx * dx + dy * dy + dz * dz;
max_dist_sq = max_dist_sq.max(dist_sq);
}
(
[center_x, center_y, center_z],
max_dist_sq.sqrt(),
[min_x, min_y, min_z],
[max_x, max_y, max_z],
)
};
let (morph_displacement_offset, morph_target_count) =
if let Some(morph_targets) = &mesh.morph_targets {
let target_count = morph_targets.targets.len().min(8) as u32;
let offset = self.current_morph_displacement_offset;
(offset, target_count)
} else {
(0, 0)
};
let mesh_data = MeshData {
vertex_offset: self.current_vertex_offset,
vertex_count: mesh.vertices.len() as u32,
index_offset: self.current_index_offset,
index_count: mesh.indices.len() as u32,
morph_displacement_offset,
morph_target_count,
};
self.meshes.insert(name.to_string(), mesh_id);
self.mesh_data.push(mesh_data);
if mesh_id as usize >= self.mesh_bounds_buffer_size {
let new_size = ((mesh_id as usize + 1) as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize;
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Bounds Buffer (Resized)"),
size: (std::mem::size_of::<MeshBoundsData>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Mesh Bounds Buffer Reallocation"),
});
let used_size = (mesh_id as usize * std::mem::size_of::<MeshBoundsData>()) as u64;
if used_size > 0 {
encoder.copy_buffer_to_buffer(
&self.mesh_bounds_buffer,
0,
&new_buffer,
0,
used_size,
);
}
queue.submit(std::iter::once(encoder.finish()));
self.mesh_bounds_buffer = new_buffer;
self.mesh_bounds_buffer_size = new_size;
if let Some(world_state) = self.world_states.get_mut(&self.current_world_id)
&& let Some(gpu) = world_state.gpu_buffers.as_mut()
{
gpu.culling_bind_group = None;
gpu.phase1_culling_bind_group = None;
}
}
let bounds_data = MeshBoundsData {
center: bounding_center,
radius: bounding_radius,
};
queue.write_buffer(
&self.mesh_bounds_buffer,
(mesh_id as usize * std::mem::size_of::<MeshBoundsData>()) as u64,
bytemuck::bytes_of(&bounds_data),
);
self.mesh_bounds_data.push(bounds_data);
if mesh_id as usize >= self.mesh_aabbs_buffer_size {
let new_size = ((mesh_id as usize + 1) as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize;
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh AABBs Buffer (Resized)"),
size: (std::mem::size_of::<MeshBoundsAABB>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Mesh AABBs Buffer Reallocation"),
});
let used_size = (mesh_id as usize * std::mem::size_of::<MeshBoundsAABB>()) as u64;
if used_size > 0 {
encoder.copy_buffer_to_buffer(
&self.mesh_aabbs_buffer,
0,
&new_buffer,
0,
used_size,
);
}
queue.submit(std::iter::once(encoder.finish()));
self.mesh_aabbs_buffer = new_buffer;
self.mesh_aabbs_buffer_size = new_size;
if let Some(world_state) = self.world_states.get_mut(&self.current_world_id)
&& let Some(gpu) = world_state.gpu_buffers.as_mut()
{
gpu.culling_bind_group = None;
gpu.phase1_culling_bind_group = None;
}
}
let aabb_data = MeshBoundsAABB {
min: aabb_min,
_pad0: 0.0,
max: aabb_max,
_pad1: 0.0,
};
queue.write_buffer(
&self.mesh_aabbs_buffer,
(mesh_id as usize * std::mem::size_of::<MeshBoundsAABB>()) as u64,
bytemuck::bytes_of(&aabb_data),
);
self.mesh_aabbs_data.push(aabb_data);
if mesh_id as usize >= self.mesh_lod_buffer_size {
let new_size = ((mesh_id as usize + 1) as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize;
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh LOD Buffer (Resized)"),
size: (std::mem::size_of::<MeshLodInfo>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Mesh LOD Buffer Reallocation"),
});
let used_size = (mesh_id as usize * std::mem::size_of::<MeshLodInfo>()) as u64;
if used_size > 0 {
encoder.copy_buffer_to_buffer(&self.mesh_lod_buffer, 0, &new_buffer, 0, used_size);
}
queue.submit(std::iter::once(encoder.finish()));
self.mesh_lod_buffer = new_buffer;
self.mesh_lod_buffer_size = new_size;
if let Some(world_state) = self.world_states.get_mut(&self.current_world_id)
&& let Some(gpu) = world_state.gpu_buffers.as_mut()
{
gpu.culling_bind_group = None;
gpu.phase1_culling_bind_group = None;
}
}
let lod_data = MeshLodInfo {
lod_count: 1,
thresholds: [0.0, 0.0, 0.0],
};
queue.write_buffer(
&self.mesh_lod_buffer,
(mesh_id as usize * std::mem::size_of::<MeshLodInfo>()) as u64,
bytemuck::bytes_of(&lod_data),
);
self.mesh_lod_data.push(lod_data);
if let Some(morph_targets) = &mesh.morph_targets {
let vertex_count = mesh.vertices.len();
let target_count = morph_targets.targets.len().min(8);
let total_displacements = vertex_count * target_count;
if total_displacements > 0 {
let mut displacements = Vec::with_capacity(total_displacements);
for target_index in 0..target_count {
let target = &morph_targets.targets[target_index];
for vertex_index in 0..vertex_count {
let position = target
.position_displacements
.get(vertex_index)
.copied()
.unwrap_or([0.0, 0.0, 0.0]);
let normal = target
.normal_displacements
.as_ref()
.and_then(|n| n.get(vertex_index))
.copied()
.unwrap_or([0.0, 0.0, 0.0]);
let tangent = target
.tangent_displacements
.as_ref()
.and_then(|t| t.get(vertex_index))
.copied()
.unwrap_or([0.0, 0.0, 0.0]);
displacements.push(MorphDisplacement {
position,
_pad0: 0.0,
normal,
_pad1: 0.0,
tangent,
_pad2: 0.0,
});
}
}
let displacement_data = bytemuck::cast_slice(&displacements);
let displacement_offset_bytes = self.current_morph_displacement_offset as u64
* std::mem::size_of::<MorphDisplacement>() as u64;
let required_size = displacement_offset_bytes + displacement_data.len() as u64;
if required_size > self.morph_displacement_buffer_size {
let new_size =
(required_size as f64 * BUFFER_GROWTH_FACTOR as f64).ceil() as u64;
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Morph Displacement Buffer (Resized)"),
size: new_size,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
if displacement_offset_bytes > 0 {
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Morph Displacement Buffer Reallocation"),
});
encoder.copy_buffer_to_buffer(
&self.morph_displacement_buffer,
0,
&new_buffer,
0,
displacement_offset_bytes,
);
queue.submit(std::iter::once(encoder.finish()));
}
self.morph_displacement_buffer = new_buffer;
self.morph_displacement_buffer_size = new_size;
self.invalidate_all_instance_bind_groups(device);
}
queue.write_buffer(
&self.morph_displacement_buffer,
displacement_offset_bytes,
displacement_data,
);
self.current_morph_displacement_offset += total_displacements as u32;
}
}
self.current_vertex_offset += mesh.vertices.len() as u32;
self.current_index_offset += mesh.indices.len() as u32;
}
pub(super) fn draw_batches<'a>(
pass: &mut wgpu::RenderPass<'a>,
instances: &[(u32, u32, u32, u32)],
batch_offset: usize,
material_bind_groups: &'a std::collections::HashMap<u32, wgpu::BindGroup>,
indirect_buffer: &'a wgpu::Buffer,
) {
let mut current_material: Option<u32> = None;
let mut batch_start_index: usize = 0;
for (batch_index, &(_mesh_id, material_id, _start, _end)) in instances.iter().enumerate() {
if current_material != Some(material_id) {
if let Some(prev_material) = current_material {
let count = batch_index - batch_start_index;
if count > 0
&& let Some(bind_group) = material_bind_groups.get(&prev_material)
{
pass.set_bind_group(2, bind_group, &[]);
let indirect_offset = ((batch_offset + batch_start_index)
* std::mem::size_of::<DrawIndexedIndirect>())
as u64;
pass.multi_draw_indexed_indirect(
indirect_buffer,
indirect_offset,
count as u32,
);
}
}
current_material = Some(material_id);
batch_start_index = batch_index;
}
}
if let Some(material_id) = current_material {
let count = instances.len() - batch_start_index;
if count > 0
&& let Some(bind_group) = material_bind_groups.get(&material_id)
{
pass.set_bind_group(2, bind_group, &[]);
let indirect_offset = ((batch_offset + batch_start_index)
* std::mem::size_of::<DrawIndexedIndirect>())
as u64;
pass.multi_draw_indexed_indirect(indirect_buffer, indirect_offset, count as u32);
}
}
}
pub(super) fn create_material_bind_group(
device: &wgpu::Device,
material_id: u32,
cache_key: &MaterialTextures,
ctx: &MaterialBindGroupContext,
) -> (wgpu::BindGroup, MaterialTextures) {
let mut found_textures = MaterialTextures::default();
let (base_texture_view, base_sampler) = if let Some(ref name) = cache_key.base_texture
&& let Some((view, sampler)) = ctx.registered_textures.get(name)
{
found_textures.base_texture = Some(name.clone());
(view, sampler)
} else {
(ctx.dummy_white_view, ctx.dummy_sampler)
};
let (emissive_texture_view, emissive_sampler) = if let Some(ref name) =
cache_key.emissive_texture
&& let Some((view, sampler)) = ctx.registered_textures.get(name)
{
found_textures.emissive_texture = Some(name.clone());
(view, sampler)
} else {
(ctx.dummy_black_view, ctx.dummy_sampler)
};
let (normal_texture_view, normal_sampler) = if let Some(ref name) = cache_key.normal_texture
&& let Some((view, sampler)) = ctx.registered_textures.get(name)
{
found_textures.normal_texture = Some(name.clone());
(view, sampler)
} else {
(ctx.dummy_normal_view, ctx.dummy_sampler)
};
let (metallic_roughness_texture_view, metallic_roughness_sampler) = if let Some(ref name) =
cache_key.metallic_roughness_texture
&& let Some((view, sampler)) = ctx.registered_textures.get(name)
{
found_textures.metallic_roughness_texture = Some(name.clone());
(view, sampler)
} else {
(ctx.dummy_white_view, ctx.dummy_sampler)
};
let (occlusion_texture_view, occlusion_sampler) = if let Some(ref name) =
cache_key.occlusion_texture
&& let Some((view, sampler)) = ctx.registered_textures.get(name)
{
found_textures.occlusion_texture = Some(name.clone());
(view, sampler)
} else {
(ctx.dummy_white_view, ctx.dummy_sampler)
};
let (transmission_texture_view, transmission_sampler) = if let Some(ref name) =
cache_key.transmission_texture
&& let Some((view, sampler)) = ctx.registered_textures.get(name)
{
found_textures.transmission_texture = Some(name.clone());
(view, sampler)
} else {
(ctx.dummy_white_view, ctx.dummy_sampler)
};
let (thickness_texture_view, thickness_sampler) = if let Some(ref name) =
cache_key.thickness_texture
&& let Some((view, sampler)) = ctx.registered_textures.get(name)
{
found_textures.thickness_texture = Some(name.clone());
(view, sampler)
} else {
(ctx.dummy_white_view, ctx.dummy_sampler)
};
let (specular_texture_view, specular_sampler) = if let Some(ref name) =
cache_key.specular_texture
&& let Some((view, sampler)) = ctx.registered_textures.get(name)
{
found_textures.specular_texture = Some(name.clone());
(view, sampler)
} else {
(ctx.dummy_white_view, ctx.dummy_sampler)
};
#[cfg(not(target_arch = "wasm32"))]
let (specular_color_texture_view, specular_color_sampler) = if let Some(ref name) =
cache_key.specular_color_texture
&& let Some((view, sampler)) = ctx.registered_textures.get(name)
{
found_textures.specular_color_texture = Some(name.clone());
(view, sampler)
} else {
(ctx.dummy_white_view, ctx.dummy_sampler)
};
#[allow(unused_mut)]
let mut bind_group_entries = vec![
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(base_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(base_sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(emissive_texture_view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::Sampler(emissive_sampler),
},
wgpu::BindGroupEntry {
binding: 4,
resource: wgpu::BindingResource::TextureView(normal_texture_view),
},
wgpu::BindGroupEntry {
binding: 5,
resource: wgpu::BindingResource::Sampler(normal_sampler),
},
wgpu::BindGroupEntry {
binding: 6,
resource: wgpu::BindingResource::TextureView(metallic_roughness_texture_view),
},
wgpu::BindGroupEntry {
binding: 7,
resource: wgpu::BindingResource::Sampler(metallic_roughness_sampler),
},
wgpu::BindGroupEntry {
binding: 8,
resource: wgpu::BindingResource::TextureView(occlusion_texture_view),
},
wgpu::BindGroupEntry {
binding: 9,
resource: wgpu::BindingResource::Sampler(occlusion_sampler),
},
wgpu::BindGroupEntry {
binding: 10,
resource: wgpu::BindingResource::TextureView(transmission_texture_view),
},
wgpu::BindGroupEntry {
binding: 11,
resource: wgpu::BindingResource::Sampler(transmission_sampler),
},
wgpu::BindGroupEntry {
binding: 12,
resource: wgpu::BindingResource::TextureView(thickness_texture_view),
},
wgpu::BindGroupEntry {
binding: 13,
resource: wgpu::BindingResource::Sampler(thickness_sampler),
},
wgpu::BindGroupEntry {
binding: 14,
resource: wgpu::BindingResource::TextureView(specular_texture_view),
},
wgpu::BindGroupEntry {
binding: 15,
resource: wgpu::BindingResource::Sampler(specular_sampler),
},
];
#[cfg(not(target_arch = "wasm32"))]
{
bind_group_entries.push(wgpu::BindGroupEntry {
binding: 16,
resource: wgpu::BindingResource::TextureView(specular_color_texture_view),
});
bind_group_entries.push(wgpu::BindGroupEntry {
binding: 17,
resource: wgpu::BindingResource::Sampler(specular_color_sampler),
});
}
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some(&format!("Material {} Texture Bind Group", material_id)),
layout: ctx.texture_bind_group_layout,
entries: &bind_group_entries,
});
(bind_group, found_textures)
}
pub(super) fn refresh_material_bind_groups_for_world(&mut self, device: &wgpu::Device) {
let world_state = match self.world_states.get(&self.current_world_id) {
Some(state) => state,
None => return,
};
if world_state.material_bind_group_cache_key.is_empty() {
return;
}
let material_ids_to_refresh: Vec<u32> = world_state
.material_bind_group_cache_key
.iter()
.filter(|&(&material_id, cache_key)| {
let prev_found = world_state
.material_bind_group_found_textures
.get(&material_id)
.cloned()
.unwrap_or_default();
cache_key.has_newly_available_textures(&prev_found, &self.registered_textures)
})
.map(|(&id, _)| id)
.collect();
if material_ids_to_refresh.is_empty() {
return;
}
let ctx = MaterialBindGroupContext {
registered_textures: &self.registered_textures,
dummy_white_view: &self.dummy_white_view,
dummy_black_view: &self.dummy_black_view,
dummy_normal_view: &self.dummy_normal_view,
dummy_sampler: &self.dummy_sampler,
texture_bind_group_layout: &self.texture_bind_group_layout,
};
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
for material_id in material_ids_to_refresh {
let cache_key = world_state
.material_bind_group_cache_key
.get(&material_id)
.cloned()
.unwrap();
let (bind_group, found_textures) =
Self::create_material_bind_group(device, material_id, &cache_key, &ctx);
world_state
.material_bind_groups
.insert(material_id, bind_group);
world_state
.material_bind_group_found_textures
.insert(material_id, found_textures);
}
}
pub(super) fn rebuild_instance_bind_group(&mut self, device: &wgpu::Device) {
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
let gpu = world_state.gpu_buffers.as_mut().unwrap();
gpu.instance_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Mesh Instance Bind Group"),
layout: &self.instance_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: gpu.transform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: gpu.materials_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: gpu.object_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: gpu.lights_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 4,
resource: gpu.visible_indices_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 5,
resource: gpu.custom_data_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 6,
resource: self.morph_displacement_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 7,
resource: gpu.light_grid_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 8,
resource: gpu.light_indices_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 9,
resource: gpu.cluster_uniforms_buffer.as_entire_binding(),
},
],
});
gpu.phase1_instance_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Phase 1 Instance Bind Group"),
layout: &self.instance_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: gpu.transform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: gpu.materials_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: gpu.object_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: gpu.lights_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 4,
resource: gpu.phase1_visible_indices_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 5,
resource: gpu.custom_data_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 6,
resource: self.morph_displacement_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 7,
resource: gpu.light_grid_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 8,
resource: gpu.light_indices_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 9,
resource: gpu.cluster_uniforms_buffer.as_entire_binding(),
},
],
});
gpu.cluster_bounds_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Cluster Bounds Bind Group (Per-World)"),
layout: &self._cluster_bounds_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: gpu.cluster_uniforms_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: self.cluster_bounds_buffer.as_entire_binding(),
},
],
});
}
pub(super) fn invalidate_all_instance_bind_groups(&mut self, device: &wgpu::Device) {
let world_ids: Vec<u64> = self.world_states.keys().copied().collect();
let saved_world_id = self.current_world_id;
for world_id in world_ids {
if self
.world_states
.get(&world_id)
.unwrap()
.gpu_buffers
.is_some()
{
self.current_world_id = world_id;
self.rebuild_instance_bind_group(device);
}
}
self.current_world_id = saved_world_id;
}
pub(super) fn update_dirty_transforms(
&mut self,
world: &crate::ecs::world::World,
queue: &wgpu::Queue,
) -> bool {
let dirty_entities = self
.frame_dirty
.as_mut()
.map(|fd| std::mem::take(&mut fd.transform_dirty))
.unwrap_or_default();
let dirty_count = dirty_entities.len();
let local_matrices_dirty = self
.frame_dirty
.as_mut()
.map(|fd| std::mem::take(&mut fd.instanced_local_matrices_dirty))
.unwrap_or_default();
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
if !local_matrices_dirty.is_empty() {
for &entity in &local_matrices_dirty {
if let Some(&(start, count)) = world_state.instanced_transform_ranges.get(&entity)
&& let Some(instanced_mesh) = world.get_instanced_mesh(entity)
{
let local_matrices = instanced_mesh.cached_local_matrices();
let upload_count = local_matrices.len().min(count as usize);
if upload_count > 0 {
let raw: Vec<[[f32; 4]; 4]> = local_matrices[..upload_count]
.iter()
.map(|mat| (*mat).into())
.collect();
if let Some(gpu) = world_state.gpu_buffers.as_ref() {
let offset_bytes =
(start as u64) * std::mem::size_of::<[[f32; 4]; 4]>() as u64;
queue.write_buffer(
&gpu.instanced_local_matrix_buffer,
offset_bytes,
bytemuck::cast_slice(&raw),
);
}
world_state.instanced_compute_dirty = true;
}
}
}
}
let total = world_state.gpu_registry.len();
if dirty_count > 0 && total > 0 {
let threshold = (total as f32 * 0.25).ceil() as usize;
if dirty_count > threshold {
let regular_count = world_state.regular_object_count as usize;
let slot_limit = regular_count.min(world_state.cached_transforms.len());
let all_dirty = dirty_count >= regular_count;
let dirty_set: Option<&HashSet<crate::ecs::world::Entity>> = if all_dirty {
None
} else {
Some(&dirty_entities)
};
let mut any_mesh_dirty = false;
for slot in 0..slot_limit {
if let Some(entity) = world_state
.gpu_registry
.slot_to_entity
.get(slot)
.copied()
.flatten()
&& (all_dirty || dirty_set.is_some_and(|s| s.contains(&entity)))
&& let Some(transform) = world.get_global_transform(entity)
{
any_mesh_dirty = true;
world_state.cached_transforms[slot] = ModelMatrix {
model: transform.0.into(),
normal_matrix: compute_normal_matrix(&transform.0),
};
}
}
for (&entity, &(start, count)) in &world_state.instanced_transform_ranges {
if !dirty_entities.contains(&entity) {
continue;
}
world_state.instanced_compute_dirty = true;
let Some(instanced_mesh) = world.get_instanced_mesh(entity) else {
continue;
};
let custom_data_slice = instanced_mesh.custom_data_slice();
let start_idx = start as usize;
let end_idx = start_idx + count as usize;
if end_idx <= world_state.cached_custom_data.len() {
for (offset, data) in custom_data_slice.iter().enumerate() {
world_state.cached_custom_data[start_idx + offset] = data.tint;
}
}
}
let gpu = world_state.gpu_buffers.as_ref().unwrap();
let regular_count_for_upload = world_state.regular_object_count as usize;
if any_mesh_dirty
&& regular_count_for_upload > 0
&& regular_count_for_upload <= world_state.cached_transforms.len()
{
queue.write_buffer(
&gpu.transform_buffer,
0,
bytemuck::cast_slice(
&world_state.cached_transforms[..regular_count_for_upload],
),
);
}
let used_count = world_state.object_count as usize;
if used_count > 0 && used_count <= world_state.cached_custom_data.len() {
queue.write_buffer(
&gpu.custom_data_buffer,
0,
bytemuck::cast_slice(&world_state.cached_custom_data[..used_count]),
);
}
} else {
let mut mesh_updates: Vec<(u32, ModelMatrix)> = Vec::new();
let mut instanced_dirty: Vec<crate::ecs::world::Entity> = Vec::new();
for &entity in &dirty_entities {
if let Some(&slot) = world_state.gpu_registry.entity_to_slot.get(&entity)
&& let Some(transform) = world.get_global_transform(entity)
{
let model_matrix = ModelMatrix {
model: transform.0.into(),
normal_matrix: compute_normal_matrix(&transform.0),
};
if (slot as usize) < world_state.cached_transforms.len() {
world_state.cached_transforms[slot as usize] = model_matrix;
}
mesh_updates.push((slot, model_matrix));
} else if world_state.instanced_transform_ranges.contains_key(&entity) {
instanced_dirty.push(entity);
} else if world.entity_has_components(entity, crate::ecs::world::INSTANCED_MESH)
{
return false;
}
}
let gpu = world_state.gpu_buffers.as_ref().unwrap();
if !mesh_updates.is_empty() {
mesh_updates.sort_by_key(|(slot, _)| *slot);
let element_size = std::mem::size_of::<ModelMatrix>() as u64;
let mut range_start = mesh_updates[0].0;
let mut range_data: Vec<ModelMatrix> = vec![mesh_updates[0].1];
for &(slot, matrix) in mesh_updates.iter().skip(1) {
if slot == range_start + range_data.len() as u32 {
range_data.push(matrix);
} else {
queue.write_buffer(
&gpu.transform_buffer,
(range_start as u64) * element_size,
bytemuck::cast_slice(&range_data),
);
range_start = slot;
range_data.clear();
range_data.push(matrix);
}
}
queue.write_buffer(
&gpu.transform_buffer,
(range_start as u64) * element_size,
bytemuck::cast_slice(&range_data),
);
}
if !instanced_dirty.is_empty() {
world_state.instanced_compute_dirty = true;
for entity in instanced_dirty {
let &(start, count) =
world_state.instanced_transform_ranges.get(&entity).unwrap();
let Some(instanced_mesh) = world.get_instanced_mesh(entity) else {
continue;
};
let custom_data_slice = instanced_mesh.custom_data_slice();
let updated_custom: Vec<[f32; 4]> =
custom_data_slice.iter().map(|data| data.tint).collect();
let start_idx = start as usize;
let end_idx = start_idx + count as usize;
if end_idx <= world_state.cached_custom_data.len() {
world_state.cached_custom_data[start_idx..end_idx]
.copy_from_slice(&updated_custom);
}
let custom_element_size = std::mem::size_of::<[f32; 4]>() as u64;
queue.write_buffer(
&gpu.custom_data_buffer,
(start as u64) * custom_element_size,
bytemuck::cast_slice(&updated_custom),
);
}
}
}
} else if dirty_count > 0 {
for &entity in &dirty_entities {
if !world_state.instanced_transform_ranges.contains_key(&entity)
&& world.entity_has_components(entity, crate::ecs::world::INSTANCED_MESH)
{
return false;
}
}
world_state.instanced_compute_dirty = true;
if let Some(gpu) = world_state.gpu_buffers.as_ref() {
for &entity in &dirty_entities {
let Some(&(start, count)) = world_state.instanced_transform_ranges.get(&entity)
else {
continue;
};
let Some(instanced_mesh) = world.get_instanced_mesh(entity) else {
continue;
};
let custom_data_slice = instanced_mesh.custom_data_slice();
let updated_custom: Vec<[f32; 4]> =
custom_data_slice.iter().map(|data| data.tint).collect();
let start_idx = start as usize;
let end_idx = start_idx + count as usize;
if end_idx <= world_state.cached_custom_data.len() {
world_state.cached_custom_data[start_idx..end_idx]
.copy_from_slice(&updated_custom);
}
let custom_element_size = std::mem::size_of::<[f32; 4]>() as u64;
queue.write_buffer(
&gpu.custom_data_buffer,
(start as u64) * custom_element_size,
bytemuck::cast_slice(&updated_custom),
);
}
}
}
true
}
pub(super) fn can_do_incremental_update(&self) -> bool {
let Some(fd) = self.frame_dirty.as_ref() else {
return false;
};
fd.frame_initialized
&& !fd.full_rebuild_needed
&& !fd.batches_invalidated
&& !fd.instanced_meshes_changed
&& fd.entities_added.is_empty()
&& fd.entities_removed.is_empty()
&& fd.material_dirty.is_empty()
&& !fd.transform_dirty.is_empty()
}
pub(super) fn can_do_incremental_entity_update(
&self,
world: &crate::ecs::world::World,
) -> bool {
let Some(fd) = self.frame_dirty.as_ref() else {
return false;
};
if !fd.frame_initialized
|| fd.full_rebuild_needed
|| fd.instanced_meshes_changed
|| !fd.material_dirty.is_empty()
{
return false;
}
let has_adds = !fd.entities_added.is_empty();
let has_removes = !fd.entities_removed.is_empty();
if !has_adds && !has_removes {
return false;
}
let world_state = match self.world_states.get(&self.current_world_id) {
Some(state) => state,
None => return false,
};
if world_state.gpu_buffers.is_none() {
return false;
}
if has_adds {
if world_state.cached_name_to_material_id.is_empty() {
return false;
}
for entity in &fd.entities_added {
let has_mesh = world
.get_render_mesh(*entity)
.is_some_and(|mesh| self.meshes.contains_key(&mesh.name));
if !has_mesh {
return false;
}
let has_cached_material = world.get_material_ref(*entity).is_some_and(|mat_ref| {
world_state
.cached_name_to_material_id
.contains_key(&mat_ref.name)
});
if !has_cached_material {
return false;
}
}
}
true
}
pub(super) fn incremental_update_entities(
&mut self,
world: &crate::ecs::world::World,
device: &wgpu::Device,
queue: &wgpu::Queue,
) {
use crate::ecs::generational_registry::{registry_entry, registry_entry_by_name};
let fd = self.frame_dirty.as_mut().unwrap();
let entities_added = std::mem::take(&mut fd.entities_added);
let entities_removed = std::mem::take(&mut fd.entities_removed);
let dirty_transforms = std::mem::take(&mut fd.transform_dirty);
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
if !entities_removed.is_empty() {
let gpu = world_state.gpu_buffers.as_ref().unwrap();
let far_away_transform = ModelMatrix {
model: [
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[1e10, 1e10, 1e10, 1.0],
],
normal_matrix: [
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
],
};
for &entity in &entities_removed {
if let Some(slot) = world_state.gpu_registry.entity_to_slot.remove(&entity) {
if (slot as usize) < world_state.gpu_registry.slot_to_entity.len() {
world_state.gpu_registry.slot_to_entity[slot as usize] = None;
}
world_state.entity_to_transform_index.remove(&entity);
world_state.cached_material_map.remove(&entity);
if (slot as usize) < world_state.cached_objects.len() {
let mesh_id = world_state.cached_objects[slot as usize].mesh_id;
let material_id = world_state.cached_objects[slot as usize].material_id;
world_state
.free_slots_by_group
.entry((mesh_id, material_id))
.or_default()
.push(slot);
}
let transform_offset =
(slot as u64) * std::mem::size_of::<ModelMatrix>() as u64;
queue.write_buffer(
&gpu.transform_buffer,
transform_offset,
bytemuck::cast_slice(&[far_away_transform]),
);
if (slot as usize) < world_state.cached_transforms.len() {
world_state.cached_transforms[slot as usize] = far_away_transform;
}
if (slot as usize) < world_state.cached_objects.len() {
world_state.cached_objects[slot as usize].batch_id = u32::MAX;
let object_offset =
(slot as u64) * std::mem::size_of::<ObjectData>() as u64;
queue.write_buffer(
&gpu.object_buffer,
object_offset,
bytemuck::cast_slice(&[world_state.cached_objects[slot as usize]]),
);
}
if (slot as usize) < world_state.cached_custom_data.len() {
world_state.cached_custom_data[slot as usize] = [0.0, 0.0, 0.0, 0.0];
let custom_data_offset =
(slot as u64) * std::mem::size_of::<[f32; 4]>() as u64;
queue.write_buffer(
&gpu.custom_data_buffer,
custom_data_offset,
bytemuck::cast_slice(&[[0.0f32, 0.0, 0.0, 0.0]]),
);
}
}
}
}
let current_object_count = world_state.object_count as usize;
struct ClassifiedEntity {
entity: crate::ecs::world::Entity,
mesh_id: u32,
material_id: u32,
is_transparent: bool,
transform: nalgebra_glm::Mat4,
custom_data: [f32; 4],
}
let mut classified = Vec::with_capacity(entities_added.len());
for &entity in &entities_added {
if world.entity_has_components(entity, crate::ecs::world::SPRITE) {
continue;
}
let Some(mesh) = world.get_render_mesh(entity) else {
continue;
};
let Some(&mesh_id) = self.meshes.get(&mesh.name) else {
continue;
};
let material_id = if let Some(mat_ref) = world.get_material_ref(entity) {
if let Some(&cached_id) = world_state.cached_name_to_material_id.get(&mat_ref.name)
{
cached_id
} else {
0
}
} else {
0
};
let Some(transform) = world.get_global_transform(entity) else {
continue;
};
let material = world.get_material_ref(entity).and_then(|mat_ref| {
if let Some(id) = mat_ref.id {
registry_entry(
&world.resources.material_registry.registry,
id.index,
id.generation,
)
} else {
registry_entry_by_name(
&world.resources.material_registry.registry,
&mat_ref.name,
)
}
});
let material_is_transparent = material.is_some_and(|m| {
m.alpha_mode == crate::ecs::material::components::AlphaMode::Blend
|| m.transmission_factor > 0.0
});
let custom_alpha = 1.0_f32;
let is_transparent = material_is_transparent || custom_alpha < 1.0;
classified.push(ClassifiedEntity {
entity,
mesh_id,
material_id,
is_transparent,
transform: transform.0,
custom_data: [1.0, 1.0, 1.0, 1.0],
});
world_state.cached_material_map.insert(entity, material_id);
}
classified.sort_by(|a, b| {
a.is_transparent
.cmp(&b.is_transparent)
.then(a.mesh_id.cmp(&b.mesh_id))
.then(a.material_id.cmp(&b.material_id))
});
let gpu = world_state.gpu_buffers.as_ref().unwrap();
let mut reused_indices: Vec<(usize, u32)> = Vec::new();
let mut remaining_indices: Vec<usize> = Vec::new();
for (index, info) in classified.iter().enumerate() {
let group_key = (info.mesh_id, info.material_id);
if let Some(free_slot) = world_state
.free_slots_by_group
.get_mut(&group_key)
.and_then(|slots| slots.pop())
{
reused_indices.push((index, free_slot));
} else {
remaining_indices.push(index);
}
}
for &(classified_index, slot) in &reused_indices {
let info = &classified[classified_index];
let old_batch_id = world_state.cached_objects[slot as usize].batch_id;
let mesh_data = &self.mesh_data[info.mesh_id as usize];
let model_matrix = ModelMatrix {
model: info.transform.into(),
normal_matrix: compute_normal_matrix(&info.transform),
};
let object_data = ObjectData {
transform_index: slot,
mesh_id: info.mesh_id,
material_id: info.material_id,
batch_id: old_batch_id,
morph_weights: [0.0f32; 8],
morph_target_count: mesh_data.morph_target_count,
morph_displacement_offset: mesh_data.morph_displacement_offset,
mesh_vertex_offset: mesh_data.vertex_offset,
mesh_vertex_count: mesh_data.vertex_count,
entity_id: info.entity.id,
is_overlay: 0,
skip_occlusion: u32::from(info.is_transparent),
_padding0: 0,
};
let transform_offset = (slot as u64) * std::mem::size_of::<ModelMatrix>() as u64;
queue.write_buffer(
&gpu.transform_buffer,
transform_offset,
bytemuck::cast_slice(&[model_matrix]),
);
let object_offset = (slot as u64) * std::mem::size_of::<ObjectData>() as u64;
queue.write_buffer(
&gpu.object_buffer,
object_offset,
bytemuck::cast_slice(&[object_data]),
);
let custom_offset = (slot as u64) * std::mem::size_of::<[f32; 4]>() as u64;
queue.write_buffer(
&gpu.custom_data_buffer,
custom_offset,
bytemuck::cast_slice(&[info.custom_data]),
);
world_state.cached_transforms[slot as usize] = model_matrix;
world_state.cached_objects[slot as usize] = object_data;
world_state.cached_custom_data[slot as usize] = info.custom_data;
world_state.cached_entities[slot as usize] = info.entity;
world_state
.gpu_registry
.entity_to_slot
.insert(info.entity, slot);
world_state.gpu_registry.slot_to_entity[slot as usize] = Some(info.entity);
world_state
.entity_to_transform_index
.insert(info.entity, slot);
world_state
.cached_material_map
.insert(info.entity, info.material_id);
}
let mut generic_free_slots: Vec<u32> = Vec::new();
for slots in world_state.free_slots_by_group.values_mut() {
generic_free_slots.append(slots);
}
world_state.free_slots_by_group.clear();
let mut generic_reused_indices: Vec<(usize, u32)> = Vec::new();
let mut truly_appended_indices: Vec<usize> = Vec::new();
for &index in &remaining_indices {
if let Some(slot) = generic_free_slots.pop() {
generic_reused_indices.push((index, slot));
} else {
truly_appended_indices.push(index);
}
}
for slot in generic_free_slots {
if (slot as usize) < world_state.cached_objects.len() {
let obj = &world_state.cached_objects[slot as usize];
world_state
.free_slots_by_group
.entry((obj.mesh_id, obj.material_id))
.or_default()
.push(slot);
}
}
for &(classified_index, slot) in &generic_reused_indices {
let info = &classified[classified_index];
let mesh_data = &self.mesh_data[info.mesh_id as usize];
let model_matrix = ModelMatrix {
model: info.transform.into(),
normal_matrix: compute_normal_matrix(&info.transform),
};
let object_data = ObjectData {
transform_index: slot,
mesh_id: info.mesh_id,
material_id: info.material_id,
batch_id: 0,
morph_weights: [0.0f32; 8],
morph_target_count: mesh_data.morph_target_count,
morph_displacement_offset: mesh_data.morph_displacement_offset,
mesh_vertex_offset: mesh_data.vertex_offset,
mesh_vertex_count: mesh_data.vertex_count,
entity_id: info.entity.id,
is_overlay: 0,
skip_occlusion: u32::from(info.is_transparent),
_padding0: 0,
};
let transform_offset = (slot as u64) * std::mem::size_of::<ModelMatrix>() as u64;
queue.write_buffer(
&gpu.transform_buffer,
transform_offset,
bytemuck::cast_slice(&[model_matrix]),
);
let object_offset = (slot as u64) * std::mem::size_of::<ObjectData>() as u64;
queue.write_buffer(
&gpu.object_buffer,
object_offset,
bytemuck::cast_slice(&[object_data]),
);
let custom_offset = (slot as u64) * std::mem::size_of::<[f32; 4]>() as u64;
queue.write_buffer(
&gpu.custom_data_buffer,
custom_offset,
bytemuck::cast_slice(&[info.custom_data]),
);
world_state.cached_transforms[slot as usize] = model_matrix;
world_state.cached_objects[slot as usize] = object_data;
world_state.cached_custom_data[slot as usize] = info.custom_data;
world_state.cached_entities[slot as usize] = info.entity;
world_state
.gpu_registry
.entity_to_slot
.insert(info.entity, slot);
world_state.gpu_registry.slot_to_entity[slot as usize] = Some(info.entity);
world_state
.entity_to_transform_index
.insert(info.entity, slot);
world_state
.cached_material_map
.insert(info.entity, info.material_id);
}
if !dirty_transforms.is_empty() {
let dirty_count = dirty_transforms.len();
let total = world_state.gpu_registry.len();
let threshold = (total as f32 * 0.25).ceil() as usize;
if dirty_count > threshold {
let regular_count = world_state.regular_object_count as usize;
let slot_limit = regular_count.min(world_state.cached_transforms.len());
let all_dirty = dirty_count >= regular_count;
let dirty_set: Option<&HashSet<crate::ecs::world::Entity>> = if all_dirty {
None
} else {
Some(&dirty_transforms)
};
let mut any_mesh_dirty = false;
for slot in 0..slot_limit {
if let Some(entity) = world_state
.gpu_registry
.slot_to_entity
.get(slot)
.copied()
.flatten()
&& (all_dirty || dirty_set.is_some_and(|s| s.contains(&entity)))
&& let Some(transform) = world.get_global_transform(entity)
{
any_mesh_dirty = true;
world_state.cached_transforms[slot] = ModelMatrix {
model: transform.0.into(),
normal_matrix: compute_normal_matrix(&transform.0),
};
}
}
let used_count = world_state.object_count as usize;
if any_mesh_dirty
&& used_count > 0
&& used_count <= world_state.cached_transforms.len()
{
queue.write_buffer(
&gpu.transform_buffer,
0,
bytemuck::cast_slice(&world_state.cached_transforms[..used_count]),
);
}
} else {
let mut mesh_updates: Vec<(u32, ModelMatrix)> = Vec::new();
for entity in dirty_transforms {
if let Some(&slot) = world_state.gpu_registry.entity_to_slot.get(&entity)
&& let Some(transform) = world.get_global_transform(entity)
{
let model_matrix = ModelMatrix {
model: transform.0.into(),
normal_matrix: compute_normal_matrix(&transform.0),
};
if (slot as usize) < world_state.cached_transforms.len() {
world_state.cached_transforms[slot as usize] = model_matrix;
}
mesh_updates.push((slot, model_matrix));
}
}
if !mesh_updates.is_empty() {
mesh_updates.sort_by_key(|(slot, _)| *slot);
let element_size = std::mem::size_of::<ModelMatrix>() as u64;
let mut range_start = mesh_updates[0].0;
let mut range_data: Vec<ModelMatrix> = vec![mesh_updates[0].1];
for &(slot, matrix) in mesh_updates.iter().skip(1) {
if slot == range_start + range_data.len() as u32 {
range_data.push(matrix);
} else {
queue.write_buffer(
&gpu.transform_buffer,
(range_start as u64) * element_size,
bytemuck::cast_slice(&range_data),
);
range_start = slot;
range_data.clear();
range_data.push(matrix);
}
}
queue.write_buffer(
&gpu.transform_buffer,
(range_start as u64) * element_size,
bytemuck::cast_slice(&range_data),
);
}
}
}
if !truly_appended_indices.is_empty() {
let new_total = current_object_count + truly_appended_indices.len();
let needs_grow = new_total
> world_state
.gpu_buffers
.as_ref()
.unwrap()
.transform_buffer_size
|| new_total > world_state.gpu_buffers.as_ref().unwrap().object_buffer_size
|| new_total
> world_state
.gpu_buffers
.as_ref()
.unwrap()
.custom_data_buffer_size;
if needs_grow {
let new_size = std::cmp::min(
(new_total as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize,
MAX_INSTANCES,
);
if new_size
> world_state
.gpu_buffers
.as_ref()
.unwrap()
.transform_buffer_size
{
let new_transform = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Transform Buffer (Incremental Resize)"),
size: (std::mem::size_of::<ModelMatrix>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
if !world_state.cached_transforms.is_empty() {
queue.write_buffer(
&new_transform,
0,
bytemuck::cast_slice(&world_state.cached_transforms),
);
}
let new_custom = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Custom Data Buffer (Incremental Resize)"),
size: (std::mem::size_of::<[f32; 4]>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
if !world_state.cached_custom_data.is_empty() {
queue.write_buffer(
&new_custom,
0,
bytemuck::cast_slice(&world_state.cached_custom_data),
);
}
let gpu_mut = world_state.gpu_buffers.as_mut().unwrap();
gpu_mut.transform_buffer = new_transform;
gpu_mut.transform_buffer_size = new_size;
gpu_mut.custom_data_buffer = new_custom;
gpu_mut.custom_data_buffer_size = new_size;
}
if new_size > world_state.gpu_buffers.as_ref().unwrap().object_buffer_size {
let new_object = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Object Buffer (Incremental Resize)"),
size: (std::mem::size_of::<ObjectData>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
if !world_state.cached_objects.is_empty() {
queue.write_buffer(
&new_object,
0,
bytemuck::cast_slice(&world_state.cached_objects),
);
}
let gpu_mut = world_state.gpu_buffers.as_mut().unwrap();
gpu_mut.object_buffer = new_object;
gpu_mut.object_buffer_size = new_size;
}
let visible_size = (world_state
.gpu_buffers
.as_ref()
.unwrap()
.visible_indices_buffer
.size()
/ std::mem::size_of::<u32>() as u64)
as usize;
if new_size > visible_size {
let gpu_mut = world_state.gpu_buffers.as_mut().unwrap();
gpu_mut.visible_indices_buffer =
device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Visible Indices Buffer (Incremental Resize)"),
size: (std::mem::size_of::<u32>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
gpu_mut.phase1_visible_indices_buffer =
device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Phase 1 Visible Indices Buffer (Incremental Resize)"),
size: (std::mem::size_of::<u32>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
}
{
let gpu_mut = world_state.gpu_buffers.as_mut().unwrap();
gpu_mut.instance_bind_group =
device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Mesh Instance Bind Group"),
layout: &self.instance_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: gpu_mut.transform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: gpu_mut.materials_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: gpu_mut.object_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: gpu_mut.lights_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 4,
resource: gpu_mut.visible_indices_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 5,
resource: gpu_mut.custom_data_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 6,
resource: self.morph_displacement_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 7,
resource: gpu_mut.light_grid_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 8,
resource: gpu_mut.light_indices_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 9,
resource: gpu_mut.cluster_uniforms_buffer.as_entire_binding(),
},
],
});
gpu_mut.phase1_instance_bind_group =
device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Phase 1 Instance Bind Group"),
layout: &self.instance_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: gpu_mut.transform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: gpu_mut.materials_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: gpu_mut.object_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: gpu_mut.lights_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 4,
resource: gpu_mut
.phase1_visible_indices_buffer
.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 5,
resource: gpu_mut.custom_data_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 6,
resource: self.morph_displacement_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 7,
resource: gpu_mut.light_grid_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 8,
resource: gpu_mut.light_indices_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 9,
resource: gpu_mut.cluster_uniforms_buffer.as_entire_binding(),
},
],
});
gpu_mut.culling_bind_group = None;
gpu_mut.phase1_culling_bind_group = None;
gpu_mut.transform_compute_bind_group = None;
gpu_mut.instanced_compute_bind_group = None;
}
}
}
let gpu = world_state.gpu_buffers.as_ref().unwrap();
let batches_invalidated = self
.frame_dirty
.as_ref()
.is_some_and(|fd| fd.batches_invalidated);
let needs_rebatch = batches_invalidated
|| !reused_indices.is_empty()
|| !generic_reused_indices.is_empty()
|| !truly_appended_indices.is_empty();
if !truly_appended_indices.is_empty() {
let insert_at = world_state.regular_object_count as usize;
let instanced_slot_count = current_object_count.saturating_sub(insert_at);
let appended_count = truly_appended_indices.len();
let new_total = current_object_count + appended_count;
if new_total > world_state.cached_transforms.len() {
world_state
.cached_transforms
.resize(new_total, bytemuck::Zeroable::zeroed());
world_state
.cached_objects
.resize(new_total, bytemuck::Zeroable::zeroed());
world_state
.cached_custom_data
.resize(new_total, [1.0, 1.0, 1.0, 1.0]);
world_state.cached_entities.resize(
new_total,
crate::ecs::world::Entity {
id: 0,
generation: 0,
},
);
}
if world_state.gpu_registry.slot_to_entity.len() < new_total {
world_state
.gpu_registry
.slot_to_entity
.resize(new_total, None);
}
if instanced_slot_count > 0 {
world_state.cached_transforms.copy_within(
insert_at..insert_at + instanced_slot_count,
insert_at + appended_count,
);
world_state.cached_objects.copy_within(
insert_at..insert_at + instanced_slot_count,
insert_at + appended_count,
);
world_state.cached_custom_data.copy_within(
insert_at..insert_at + instanced_slot_count,
insert_at + appended_count,
);
for slot_index in (insert_at + appended_count)
..(insert_at + appended_count + instanced_slot_count)
{
world_state.cached_objects[slot_index].transform_index = slot_index as u32;
}
let shift = appended_count as u32;
for batch in &mut world_state.instanced_opaque_batches {
batch.2 += shift;
batch.3 += shift;
}
for batch in &mut world_state.instanced_opaque_double_sided_batches {
batch.2 += shift;
batch.3 += shift;
}
for batch in &mut world_state.instanced_transparent_batches {
batch.2 += shift;
batch.3 += shift;
}
}
for (local_index, &classified_index) in truly_appended_indices.iter().enumerate() {
let info = &classified[classified_index];
let slot = (insert_at + local_index) as u32;
let mesh_data = &self.mesh_data[info.mesh_id as usize];
let model_matrix = ModelMatrix {
model: info.transform.into(),
normal_matrix: compute_normal_matrix(&info.transform),
};
let object_data = ObjectData {
transform_index: slot,
mesh_id: info.mesh_id,
material_id: info.material_id,
batch_id: 0,
morph_weights: [0.0f32; 8],
morph_target_count: mesh_data.morph_target_count,
morph_displacement_offset: mesh_data.morph_displacement_offset,
mesh_vertex_offset: mesh_data.vertex_offset,
mesh_vertex_count: mesh_data.vertex_count,
entity_id: info.entity.id,
is_overlay: 0,
skip_occlusion: u32::from(info.is_transparent),
_padding0: 0,
};
world_state.cached_transforms[slot as usize] = model_matrix;
world_state.cached_objects[slot as usize] = object_data;
world_state.cached_custom_data[slot as usize] = info.custom_data;
world_state.cached_entities[slot as usize] = info.entity;
world_state
.gpu_registry
.entity_to_slot
.insert(info.entity, slot);
world_state.gpu_registry.slot_to_entity[slot as usize] = Some(info.entity);
world_state.gpu_registry.slot_count =
world_state.gpu_registry.slot_count.max(slot + 1);
world_state
.entity_to_transform_index
.insert(info.entity, slot);
world_state
.cached_material_map
.insert(info.entity, info.material_id);
}
{
let append_end = insert_at + appended_count;
let transform_offset =
(insert_at as u64) * std::mem::size_of::<ModelMatrix>() as u64;
queue.write_buffer(
&gpu.transform_buffer,
transform_offset,
bytemuck::cast_slice(&world_state.cached_transforms[insert_at..append_end]),
);
let object_offset = (insert_at as u64) * std::mem::size_of::<ObjectData>() as u64;
queue.write_buffer(
&gpu.object_buffer,
object_offset,
bytemuck::cast_slice(&world_state.cached_objects[insert_at..append_end]),
);
let custom_offset = (insert_at as u64) * std::mem::size_of::<[f32; 4]>() as u64;
queue.write_buffer(
&gpu.custom_data_buffer,
custom_offset,
bytemuck::cast_slice(&world_state.cached_custom_data[insert_at..append_end]),
);
}
if instanced_slot_count > 0 {
let shifted_start = insert_at + appended_count;
let shifted_end = shifted_start + instanced_slot_count;
let transform_offset =
(shifted_start as u64) * std::mem::size_of::<ModelMatrix>() as u64;
queue.write_buffer(
&gpu.transform_buffer,
transform_offset,
bytemuck::cast_slice(
&world_state.cached_transforms[shifted_start..shifted_end],
),
);
let custom_offset = (shifted_start as u64) * std::mem::size_of::<[f32; 4]>() as u64;
queue.write_buffer(
&gpu.custom_data_buffer,
custom_offset,
bytemuck::cast_slice(
&world_state.cached_custom_data[shifted_start..shifted_end],
),
);
}
world_state.object_count += appended_count as u32;
world_state.regular_object_count += appended_count as u32;
}
if needs_rebatch {
let regular_count = world_state.regular_object_count as usize;
let overlay_opaque_instances =
std::mem::take(&mut world_state.overlay_opaque_instances);
let overlay_opaque_double_sided_instances =
std::mem::take(&mut world_state.overlay_opaque_double_sided_instances);
let overlay_transparent_instances =
std::mem::take(&mut world_state.overlay_transparent_instances);
world_state.opaque_instances.clear();
world_state.opaque_double_sided_instances.clear();
world_state.transparent_instances.clear();
let transparent_mat_ids = &world_state.cached_transparent_material_ids;
let double_sided_mat_ids = &world_state.cached_double_sided_material_ids;
for index in 0..regular_count.min(world_state.cached_objects.len()) {
if world_state
.gpu_registry
.slot_to_entity
.get(index)
.copied()
.flatten()
.is_none()
{
continue;
}
let material_id = world_state.cached_objects[index].material_id;
let is_transparent = transparent_mat_ids.contains(&material_id);
let is_double_sided = double_sided_mat_ids.contains(&material_id);
world_state.cached_objects[index].skip_occlusion = u32::from(is_transparent);
let object_index = index as u32;
let mesh_id = world_state.cached_objects[index].mesh_id;
let batch_list = if is_transparent {
&mut world_state.transparent_instances
} else if is_double_sided {
&mut world_state.opaque_double_sided_instances
} else {
&mut world_state.opaque_instances
};
if let Some(last) = batch_list.last_mut() {
if last.0 == mesh_id && last.1 == material_id && last.3 == object_index {
last.3 = object_index + 1;
} else {
batch_list.push((mesh_id, material_id, object_index, object_index + 1));
}
} else {
batch_list.push((mesh_id, material_id, object_index, object_index + 1));
}
}
world_state.overlay_opaque_instances = overlay_opaque_instances;
world_state.overlay_opaque_double_sided_instances =
overlay_opaque_double_sided_instances;
world_state.overlay_transparent_instances = overlay_transparent_instances;
let all_regular_batches: Vec<BatchRange> = world_state
.opaque_instances
.iter()
.chain(world_state.opaque_double_sided_instances.iter())
.chain(world_state.transparent_instances.iter())
.cloned()
.collect();
for (batch_index, &(_, _, start, end)) in all_regular_batches.iter().enumerate() {
let batch_id = batch_index as u32;
for slot in start..end {
if (slot as usize) < world_state.cached_objects.len() {
world_state.cached_objects[slot as usize].batch_id = batch_id;
}
}
}
let overlay_batch_start = all_regular_batches.len() as u32;
let all_overlay_batches: Vec<BatchRange> = world_state
.overlay_opaque_instances
.iter()
.chain(world_state.overlay_opaque_double_sided_instances.iter())
.chain(world_state.overlay_transparent_instances.iter())
.cloned()
.collect();
for (batch_index, &(_, _, start, end)) in all_overlay_batches.iter().enumerate() {
let batch_id = overlay_batch_start + batch_index as u32;
for slot in start..end {
if (slot as usize) < world_state.cached_objects.len() {
world_state.cached_objects[slot as usize].batch_id = batch_id;
}
}
}
let instanced_batch_start = overlay_batch_start + all_overlay_batches.len() as u32;
let all_instanced_batches: Vec<BatchRange> = world_state
.instanced_opaque_batches
.iter()
.chain(world_state.instanced_opaque_double_sided_batches.iter())
.chain(world_state.instanced_transparent_batches.iter())
.cloned()
.collect();
for (batch_index, &(_, _, start, end)) in all_instanced_batches.iter().enumerate() {
let batch_id = instanced_batch_start + batch_index as u32;
for slot in start..end {
if (slot as usize) < world_state.cached_objects.len() {
world_state.cached_objects[slot as usize].batch_id = batch_id;
}
}
}
{
let gpu = world_state.gpu_buffers.as_ref().unwrap();
queue.write_buffer(
&gpu.object_buffer,
0,
bytemuck::cast_slice(&world_state.cached_objects),
);
}
let all_instances: Vec<_> = world_state
.opaque_instances
.iter()
.chain(world_state.opaque_double_sided_instances.iter())
.chain(world_state.transparent_instances.iter())
.chain(world_state.overlay_opaque_instances.iter())
.chain(world_state.overlay_opaque_double_sided_instances.iter())
.chain(world_state.overlay_transparent_instances.iter())
.chain(world_state.instanced_opaque_batches.iter())
.chain(world_state.instanced_opaque_double_sided_batches.iter())
.chain(world_state.instanced_transparent_batches.iter())
.cloned()
.collect();
let total_batch_count = all_instances.len();
let total_object_count = world_state.object_count as usize;
if total_batch_count
> world_state
.gpu_buffers
.as_ref()
.unwrap()
.indirect_buffer_size
{
let new_size = std::cmp::min(
(total_batch_count as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize,
MAX_INSTANCES,
);
let gpu_mut = world_state.gpu_buffers.as_mut().unwrap();
gpu_mut.indirect_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Indirect Draw Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>() * new_size) as u64,
usage: wgpu::BufferUsages::INDIRECT
| wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
gpu_mut.indirect_buffer_size = new_size;
gpu_mut.phase1_indirect_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Phase 1 Indirect Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>() * new_size) as u64,
usage: wgpu::BufferUsages::INDIRECT
| wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
gpu_mut.indirect_reset_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Indirect Reset Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>() * new_size) as u64,
usage: wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
gpu_mut.phase1_indirect_reset_buffer =
device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Phase 1 Indirect Reset Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>() * new_size) as u64,
usage: wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
gpu_mut.culling_bind_group = None;
gpu_mut.phase1_culling_bind_group = None;
}
let gpu = world_state.gpu_buffers.as_ref().unwrap();
let mut indirect_commands = Vec::with_capacity(total_batch_count);
let mut indirect_reset_commands = Vec::with_capacity(total_batch_count);
let mut visible_indices = Vec::with_capacity(total_object_count);
for &(mesh_id, _material_id, start, end) in all_instances.iter() {
let mesh_data = &self.mesh_data[mesh_id as usize];
let instance_count = end - start;
let first_instance = visible_indices.len() as u32;
indirect_commands.push(DrawIndexedIndirect {
index_count: mesh_data.index_count,
instance_count,
first_index: mesh_data.index_offset,
base_vertex: mesh_data.vertex_offset as i32,
first_instance,
});
indirect_reset_commands.push(DrawIndexedIndirect {
index_count: mesh_data.index_count,
instance_count: 0,
first_index: mesh_data.index_offset,
base_vertex: mesh_data.vertex_offset as i32,
first_instance,
});
for object_index in start..end {
visible_indices.push(object_index);
}
}
queue.write_buffer(
&gpu.indirect_buffer,
0,
bytemuck::cast_slice(&indirect_commands),
);
queue.write_buffer(
&gpu.visible_indices_buffer,
0,
bytemuck::cast_slice(&visible_indices),
);
queue.write_buffer(
&gpu.phase1_visible_indices_buffer,
0,
bytemuck::cast_slice(&visible_indices),
);
queue.write_buffer(
&gpu.indirect_reset_buffer,
0,
bytemuck::cast_slice(&indirect_reset_commands),
);
queue.write_buffer(
&gpu.phase1_indirect_reset_buffer,
0,
bytemuck::cast_slice(&indirect_reset_commands),
);
world_state.indirect_reset_count = total_batch_count;
}
world_state.frames_since_full_rebuild += 1;
let _ = world_state;
}
pub(super) fn can_do_rebatch_only(&self) -> bool {
let Some(fd) = self.frame_dirty.as_ref() else {
return false;
};
if !fd.frame_initialized
|| fd.full_rebuild_needed
|| fd.instanced_meshes_changed
|| !fd.entities_added.is_empty()
|| !fd.entities_removed.is_empty()
|| !fd.material_dirty.is_empty()
{
return false;
}
if !fd.batches_invalidated {
return false;
}
let world_state = match self.world_states.get(&self.current_world_id) {
Some(state) => state,
None => return false,
};
!world_state.cached_entities.is_empty() && world_state.gpu_buffers.is_some()
}
pub(super) fn rebatch_cached_entities(
&mut self,
world: &crate::ecs::world::World,
device: &wgpu::Device,
queue: &wgpu::Queue,
) {
let dirty_transforms = self
.frame_dirty
.as_mut()
.map(|fd| std::mem::take(&mut fd.transform_dirty))
.unwrap_or_default();
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
let gpu = world_state.gpu_buffers.as_ref().unwrap();
for entity in &dirty_transforms {
if let Some(&slot) = world_state.gpu_registry.entity_to_slot.get(entity)
&& let Some(transform) = world.get_global_transform(*entity)
{
let model_matrix = ModelMatrix {
model: transform.0.into(),
normal_matrix: compute_normal_matrix(&transform.0),
};
if (slot as usize) < world_state.cached_transforms.len() {
world_state.cached_transforms[slot as usize] = model_matrix;
}
let offset = (slot as u64) * std::mem::size_of::<ModelMatrix>() as u64;
queue.write_buffer(
&gpu.transform_buffer,
offset,
bytemuck::cast_slice(&[model_matrix]),
);
}
}
let regular_count = world_state.regular_object_count as usize;
let overlay_opaque_instances = std::mem::take(&mut world_state.overlay_opaque_instances);
let overlay_opaque_double_sided_instances =
std::mem::take(&mut world_state.overlay_opaque_double_sided_instances);
let overlay_transparent_instances =
std::mem::take(&mut world_state.overlay_transparent_instances);
world_state.opaque_instances.clear();
world_state.opaque_double_sided_instances.clear();
world_state.transparent_instances.clear();
let transparent_mat_ids = &world_state.cached_transparent_material_ids;
let double_sided_mat_ids = &world_state.cached_double_sided_material_ids;
for index in 0..regular_count.min(world_state.cached_objects.len()) {
if world_state
.gpu_registry
.slot_to_entity
.get(index)
.copied()
.flatten()
.is_none()
{
continue;
}
let obj = &world_state.cached_objects[index];
let is_transparent = transparent_mat_ids.contains(&obj.material_id);
let is_double_sided = double_sided_mat_ids.contains(&obj.material_id);
let object_index = index as u32;
let mesh_id = obj.mesh_id;
let material_id = obj.material_id;
let batch_list = if is_transparent {
&mut world_state.transparent_instances
} else if is_double_sided {
&mut world_state.opaque_double_sided_instances
} else {
&mut world_state.opaque_instances
};
if let Some(last) = batch_list.last_mut() {
if last.0 == mesh_id && last.1 == material_id && last.3 == object_index {
last.3 = object_index + 1;
} else {
batch_list.push((mesh_id, material_id, object_index, object_index + 1));
}
} else {
batch_list.push((mesh_id, material_id, object_index, object_index + 1));
}
}
if !self.mesh_lod_mesh_ids.is_empty() {
for batch_list in [
&mut world_state.opaque_instances,
&mut world_state.opaque_double_sided_instances,
&mut world_state.transparent_instances,
] {
let original = std::mem::take(batch_list);
for (mesh_id, material_id, start, end) in original {
if let Some(lod_ids) = self.mesh_lod_mesh_ids.get(&mesh_id) {
for &lod_mesh_id in lod_ids {
batch_list.push((lod_mesh_id, material_id, start, end));
}
} else {
batch_list.push((mesh_id, material_id, start, end));
}
}
}
}
world_state.overlay_opaque_instances = overlay_opaque_instances;
world_state.overlay_opaque_double_sided_instances = overlay_opaque_double_sided_instances;
world_state.overlay_transparent_instances = overlay_transparent_instances;
let lod_sub_mesh_ids: std::collections::HashSet<u32> = self
.mesh_lod_mesh_ids
.values()
.flat_map(|ids| ids.iter().skip(1).copied())
.collect();
let all_regular_batches: Vec<BatchRange> = world_state
.opaque_instances
.iter()
.chain(world_state.opaque_double_sided_instances.iter())
.chain(world_state.transparent_instances.iter())
.cloned()
.collect();
for (batch_index, &(mesh_id, _, start, end)) in all_regular_batches.iter().enumerate() {
if lod_sub_mesh_ids.contains(&mesh_id) {
continue;
}
let batch_id = batch_index as u32;
for slot in start..end {
if (slot as usize) < world_state.cached_objects.len() {
world_state.cached_objects[slot as usize].batch_id = batch_id;
}
}
}
let overlay_batch_start = all_regular_batches.len() as u32;
let all_overlay_batches: Vec<BatchRange> = world_state
.overlay_opaque_instances
.iter()
.chain(world_state.overlay_opaque_double_sided_instances.iter())
.chain(world_state.overlay_transparent_instances.iter())
.cloned()
.collect();
for (batch_index, &(mesh_id, _, start, end)) in all_overlay_batches.iter().enumerate() {
if lod_sub_mesh_ids.contains(&mesh_id) {
continue;
}
let batch_id = overlay_batch_start + batch_index as u32;
for slot in start..end {
if (slot as usize) < world_state.cached_objects.len() {
world_state.cached_objects[slot as usize].batch_id = batch_id;
}
}
}
let instanced_batch_start = overlay_batch_start + all_overlay_batches.len() as u32;
let all_instanced_batches: Vec<BatchRange> = world_state
.instanced_opaque_batches
.iter()
.chain(world_state.instanced_opaque_double_sided_batches.iter())
.chain(world_state.instanced_transparent_batches.iter())
.cloned()
.collect();
for (batch_index, &(mesh_id, _, start, end)) in all_instanced_batches.iter().enumerate() {
if lod_sub_mesh_ids.contains(&mesh_id) {
continue;
}
let batch_id = instanced_batch_start + batch_index as u32;
for slot in start..end {
if (slot as usize) < world_state.cached_objects.len() {
world_state.cached_objects[slot as usize].batch_id = batch_id;
}
}
}
{
let gpu = world_state.gpu_buffers.as_ref().unwrap();
queue.write_buffer(
&gpu.object_buffer,
0,
bytemuck::cast_slice(&world_state.cached_objects),
);
}
let all_instances: Vec<_> = world_state
.opaque_instances
.iter()
.chain(world_state.opaque_double_sided_instances.iter())
.chain(world_state.transparent_instances.iter())
.chain(world_state.overlay_opaque_instances.iter())
.chain(world_state.overlay_opaque_double_sided_instances.iter())
.chain(world_state.overlay_transparent_instances.iter())
.chain(world_state.instanced_opaque_batches.iter())
.chain(world_state.instanced_opaque_double_sided_batches.iter())
.chain(world_state.instanced_transparent_batches.iter())
.cloned()
.collect();
let total_batch_count = all_instances.len();
let total_visible_slots: usize = all_instances
.iter()
.map(|&(_, _, start, end)| (end - start) as usize)
.sum();
if total_batch_count
> world_state
.gpu_buffers
.as_ref()
.unwrap()
.indirect_buffer_size
{
let new_size = std::cmp::min(
(total_batch_count as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize,
MAX_INSTANCES,
);
let gpu_mut = world_state.gpu_buffers.as_mut().unwrap();
gpu_mut.indirect_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Indirect Draw Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>() * new_size) as u64,
usage: wgpu::BufferUsages::INDIRECT
| wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
gpu_mut.indirect_buffer_size = new_size;
gpu_mut.phase1_indirect_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Phase 1 Indirect Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>() * new_size) as u64,
usage: wgpu::BufferUsages::INDIRECT
| wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
gpu_mut.indirect_reset_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Indirect Reset Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>() * new_size) as u64,
usage: wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
gpu_mut.phase1_indirect_reset_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Phase 1 Indirect Reset Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>() * new_size) as u64,
usage: wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
gpu_mut.culling_bind_group = None;
gpu_mut.phase1_culling_bind_group = None;
}
let gpu = world_state.gpu_buffers.as_ref().unwrap();
let mut indirect_commands = Vec::with_capacity(total_batch_count);
let mut indirect_reset_commands = Vec::with_capacity(total_batch_count);
let mut visible_indices = Vec::with_capacity(total_visible_slots);
for &(mesh_id, _material_id, start, end) in all_instances.iter() {
let mesh_data = &self.mesh_data[mesh_id as usize];
let instance_count = end - start;
let first_instance = visible_indices.len() as u32;
let is_lod_sub = lod_sub_mesh_ids.contains(&mesh_id);
indirect_commands.push(DrawIndexedIndirect {
index_count: mesh_data.index_count,
instance_count: if is_lod_sub { 0 } else { instance_count },
first_index: mesh_data.index_offset,
base_vertex: mesh_data.vertex_offset as i32,
first_instance,
});
indirect_reset_commands.push(DrawIndexedIndirect {
index_count: mesh_data.index_count,
instance_count: 0,
first_index: mesh_data.index_offset,
base_vertex: mesh_data.vertex_offset as i32,
first_instance,
});
for object_index in start..end {
visible_indices.push(object_index);
}
}
queue.write_buffer(
&gpu.indirect_buffer,
0,
bytemuck::cast_slice(&indirect_commands),
);
queue.write_buffer(
&gpu.visible_indices_buffer,
0,
bytemuck::cast_slice(&visible_indices),
);
queue.write_buffer(
&gpu.phase1_visible_indices_buffer,
0,
bytemuck::cast_slice(&visible_indices),
);
queue.write_buffer(
&gpu.indirect_reset_buffer,
0,
bytemuck::cast_slice(&indirect_reset_commands),
);
queue.write_buffer(
&gpu.phase1_indirect_reset_buffer,
0,
bytemuck::cast_slice(&indirect_reset_commands),
);
world_state.indirect_reset_count = total_batch_count;
}
pub(super) fn prepare_uniforms_and_lights(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
world: &crate::ecs::world::World,
) {
use super::super::projection::*;
let light_result = collect_lights(world, MAX_LIGHTS);
let mut lights_data = light_result.lights_data;
let directional_light = light_result.directional_light;
let entity_to_lights_index = light_result.entity_to_index;
{
let state = self.world_states.get_mut(&self.current_world_id).unwrap();
state.num_directional_lights = light_result.num_directional_lights;
state.num_total_lights = lights_data.len() as u32;
}
let directional_light_direction = directional_light
.as_ref()
.map(|(_light, transform)| {
let dir = transform.forward_vector();
[dir.x, dir.y, dir.z, 0.0]
})
.unwrap_or([0.0, -1.0, 0.0, 0.0]);
let cascade_result = calculate_cascade_shadows(world, directional_light.as_ref());
let cascade_view_projections = cascade_result.cascade_view_projections;
let cascade_diameters = cascade_result.cascade_diameters;
let light_view_projection = cascade_result.light_view_projection;
let shadow_bias = cascade_result.shadow_bias;
let shadows_enabled = cascade_result.shadows_enabled;
let cascade_texture_resolution = if cfg!(target_arch = "wasm32") {
2048.0
} else {
4096.0
};
let cascade_atlas_offsets: [[f32; 4]; crate::render::wgpu::passes::NUM_SHADOW_CASCADES] = [
[
0.0,
0.0,
cascade_diameters[0] / cascade_texture_resolution,
0.0,
],
[
0.5,
0.0,
cascade_diameters[1] / cascade_texture_resolution,
0.0,
],
[
0.0,
0.5,
cascade_diameters[2] / cascade_texture_resolution,
0.0,
],
[
0.5,
0.5,
cascade_diameters[3] / cascade_texture_resolution,
0.0,
],
];
if let Some(camera_matrices) =
crate::ecs::camera::queries::query_active_camera_matrices(world)
{
let global_unlit = if world.resources.graphics.unlit_mode {
1.0
} else {
0.0
};
let (snap_resolution, snap_enabled) =
if let Some(ref vertex_snap) = world.resources.graphics.vertex_snap {
(vertex_snap.resolution, 1)
} else {
([320.0, 240.0], 0)
};
let affine_enabled = if world.resources.graphics.affine_texture_mapping {
1
} else {
0
};
let (fog_color, fog_enabled, fog_start, fog_end) =
if let Some(ref fog) = world.resources.graphics.fog {
(fog.color, 1, fog.start, fog.end)
} else {
([0.5, 0.5, 0.6], 0, 5.0, 30.0)
};
let time = world.resources.window.timing.uptime_milliseconds as f32 / 1000.0;
let uniforms = MeshUniforms {
view: camera_matrices.view.into(),
projection: camera_matrices.projection.into(),
camera_position: [
camera_matrices.camera_position.x,
camera_matrices.camera_position.y,
camera_matrices.camera_position.z,
1.0,
],
num_lights: [lights_data.len() as u32, 0, 0, 0],
ambient_light: world.resources.graphics.ambient_light,
light_view_projection,
shadow_bias,
shadows_enabled,
global_unlit,
shadow_normal_bias: 1.8,
snap_resolution,
snap_enabled,
affine_enabled,
fog_color,
fog_enabled,
fog_start,
fog_end,
cascade_count: crate::render::wgpu::passes::NUM_SHADOW_CASCADES as u32,
directional_light_size: 1.0,
cascade_view_projections,
cascade_split_distances: CASCADE_SPLIT_DISTANCES,
cascade_atlas_offsets,
cascade_atlas_scale: [0.5, 0.5, 0.0, 0.0],
time,
pbr_debug_mode: world.resources.graphics.pbr_debug_mode.as_u32(),
texture_debug_stripes: world.resources.graphics.texture_debug_stripes as u32,
texture_debug_stripes_speed: world.resources.graphics.texture_debug_stripes_speed,
directional_light_direction,
ibl_blend_factor: 0.0,
_padding3: [0.0; 19],
};
queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[uniforms]));
let overlay_uniforms = MeshUniforms {
view: camera_matrices.view.into(),
projection: camera_matrices.projection.into(),
camera_position: [
camera_matrices.camera_position.x,
camera_matrices.camera_position.y,
camera_matrices.camera_position.z,
1.0,
],
num_lights: [lights_data.len() as u32, 0, 0, 0],
ambient_light: world.resources.graphics.ambient_light,
light_view_projection,
shadow_bias,
shadows_enabled: 0.0,
global_unlit,
shadow_normal_bias: 1.8,
snap_resolution,
snap_enabled,
affine_enabled,
fog_color,
fog_enabled,
fog_start,
fog_end,
cascade_count: crate::render::wgpu::passes::NUM_SHADOW_CASCADES as u32,
directional_light_size: 1.0,
cascade_view_projections,
cascade_split_distances: CASCADE_SPLIT_DISTANCES,
cascade_atlas_offsets,
cascade_atlas_scale: [0.5, 0.5, 0.0, 0.0],
time,
pbr_debug_mode: world.resources.graphics.pbr_debug_mode.as_u32(),
texture_debug_stripes: world.resources.graphics.texture_debug_stripes as u32,
texture_debug_stripes_speed: world.resources.graphics.texture_debug_stripes_speed,
directional_light_direction,
ibl_blend_factor: 0.0,
_padding3: [0.0; 19],
};
queue.write_buffer(
&self.overlay_uniform_buffer,
0,
bytemuck::cast_slice(&[overlay_uniforms]),
);
let view_proj = camera_matrices.projection * camera_matrices.view;
let (culling_frustum_planes, culling_view_proj) =
if let Some(culling_camera) = world.resources.graphics.culling_camera_override {
if let Some(culling_matrices) =
crate::ecs::camera::queries::query_camera_matrices(world, culling_camera)
{
let culling_vp = culling_matrices.projection * culling_matrices.view;
(extract_frustum_planes(&culling_vp), culling_vp)
} else {
(extract_frustum_planes(&view_proj), view_proj)
}
} else {
(extract_frustum_planes(&view_proj), view_proj)
};
let (screen_width, screen_height) = self.hiz_pass.screen_size();
let projection_scale_y = camera_matrices.projection[(1, 1)];
let min_screen_pixel_size = world.resources.graphics.min_screen_pixel_size;
let occlusion_enabled =
screen_width > 0 && world.resources.graphics.occlusion_culling_enabled;
let object_count = self
.world_states
.get(&self.current_world_id)
.unwrap()
.object_count;
let phase1_culling_uniforms = CullingUniforms {
frustum_planes: culling_frustum_planes.map(|v| [v.x, v.y, v.z, v.w]),
view_projection: culling_view_proj.into(),
screen_size: [screen_width as f32, screen_height as f32],
hiz_mip_count: self.hiz_pass.mip_count() as f32,
occlusion_enabled: 0,
object_count,
min_screen_pixel_size,
projection_scale_y,
_padding: 0,
};
queue.write_buffer(
&self.phase1_culling_uniform_buffer,
0,
bytemuck::cast_slice(&[phase1_culling_uniforms]),
);
let culling_uniforms = CullingUniforms {
frustum_planes: culling_frustum_planes.map(|v| [v.x, v.y, v.z, v.w]),
view_projection: culling_view_proj.into(),
screen_size: [screen_width as f32, screen_height as f32],
hiz_mip_count: self.hiz_pass.mip_count() as f32,
occlusion_enabled: if occlusion_enabled { 1 } else { 0 },
object_count,
min_screen_pixel_size,
projection_scale_y,
_padding: 0,
};
queue.write_buffer(
&self.culling_uniform_buffer,
0,
bytemuck::cast_slice(&[culling_uniforms]),
);
let hiz_view = self.hiz_pass.hiz_view_or_dummy();
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
let gpu = world_state.gpu_buffers.as_mut().unwrap();
if gpu.culling_bind_group.is_none() {
gpu.culling_bind_group =
Some(device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Mesh Culling Bind Group"),
layout: &self.culling_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: gpu.transform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: gpu.object_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: self.culling_uniform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: self.mesh_bounds_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 4,
resource: gpu.indirect_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 5,
resource: gpu.visible_indices_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 6,
resource: wgpu::BindingResource::TextureView(hiz_view),
},
wgpu::BindGroupEntry {
binding: 7,
resource: self.mesh_aabbs_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 8,
resource: self.mesh_lod_buffer.as_entire_binding(),
},
],
}));
}
if gpu.phase1_culling_bind_group.is_none() {
gpu.phase1_culling_bind_group =
Some(device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Phase 1 Culling Bind Group"),
layout: &self.culling_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: gpu.transform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: gpu.object_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: self.phase1_culling_uniform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: self.mesh_bounds_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 4,
resource: gpu.phase1_indirect_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 5,
resource: gpu.phase1_visible_indices_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 6,
resource: wgpu::BindingResource::TextureView(hiz_view),
},
wgpu::BindGroupEntry {
binding: 7,
resource: self.mesh_aabbs_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 8,
resource: self.mesh_lod_buffer.as_entire_binding(),
},
],
}));
}
let (z_near, z_far) = world
.resources
.active_camera
.and_then(|entity| world.get_camera(entity))
.map(|camera| match &camera.projection {
crate::ecs::camera::components::Projection::Perspective(persp) => {
(persp.z_near, persp.z_far.unwrap_or(1000.0))
}
crate::ecs::camera::components::Projection::Orthographic(ortho) => {
(ortho.z_near, ortho.z_far)
}
})
.unwrap_or((0.1, 1000.0));
let tile_size_x = (screen_width as f32) / (CLUSTER_GRID_X as f32);
let tile_size_y = (screen_height as f32) / (CLUSTER_GRID_Y as f32);
let inverse_projection: [[f32; 4]; 4] =
nalgebra_glm::inverse(&camera_matrices.projection).into();
let num_directional_lights = self
.world_states
.get(&self.current_world_id)
.unwrap()
.num_directional_lights;
let cluster_uniforms = ClusterUniforms {
inverse_projection,
screen_size: [screen_width as f32, screen_height as f32],
z_near,
z_far,
cluster_count: [CLUSTER_GRID_X, CLUSTER_GRID_Y, CLUSTER_GRID_Z, 0],
tile_size: [tile_size_x, tile_size_y],
num_lights: lights_data.len() as u32,
num_directional_lights,
};
let cluster_uniforms_buffer = &self
.world_states
.get(&self.current_world_id)
.unwrap()
.gpu_buffers
.as_ref()
.unwrap()
.cluster_uniforms_buffer;
queue.write_buffer(
cluster_uniforms_buffer,
0,
bytemuck::cast_slice(&[cluster_uniforms]),
);
let view_matrix: [[f32; 4]; 4] = camera_matrices.view.into();
queue.write_buffer(
&self.view_matrix_buffer,
0,
bytemuck::cast_slice(&[view_matrix]),
);
let view_proj_hash = {
use std::hash::{Hash, Hasher};
let mut hasher = std::collections::hash_map::DefaultHasher::new();
for row in &inverse_projection {
for val in row {
val.to_bits().hash(&mut hasher);
}
}
screen_width.hash(&mut hasher);
screen_height.hash(&mut hasher);
hasher.finish()
};
{
let state = self.world_states.get_mut(&self.current_world_id).unwrap();
state.camera_changed = view_proj_hash != state.last_camera_hash;
if state.camera_changed {
state.last_camera_hash = view_proj_hash;
}
}
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
let gpu = world_state.gpu_buffers.as_mut().unwrap();
gpu.cluster_assign_bind_group =
Some(device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Cluster Light Assign Bind Group (Per-World)"),
layout: &self.cluster_assign_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: gpu.cluster_uniforms_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: self.cluster_bounds_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: gpu.light_grid_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: gpu.light_indices_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 4,
resource: gpu.lights_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 5,
resource: self.view_matrix_buffer.as_entire_binding(),
},
],
}));
}
let camera_position = world
.resources
.active_camera
.and_then(|cam| world.get_global_transform(cam))
.map(|t| nalgebra_glm::vec3(t.0[(0, 3)], t.0[(1, 3)], t.0[(2, 3)]))
.unwrap_or_else(|| nalgebra_glm::vec3(0.0, 0.0, 0.0));
let spotlight_result = collect_spotlight_shadows(world, camera_position);
apply_spotlight_shadow_indices(
&mut lights_data,
&spotlight_result.entity_to_shadow_index,
&entity_to_lights_index,
);
if !spotlight_result.shadow_data.is_empty() {
queue.write_buffer(
&self.spotlight_shadow_buffer,
0,
bytemuck::cast_slice(&spotlight_result.shadow_data),
);
}
let point_shadow_result = collect_point_light_shadows(
world,
camera_position,
&mut lights_data,
&entity_to_lights_index,
);
if !point_shadow_result.is_empty() {
queue.write_buffer(
&self.point_shadow_buffer,
0,
bytemuck::cast_slice(&point_shadow_result),
);
}
if !lights_data.is_empty() {
let lights_buffer_size = self
.world_states
.get(&self.current_world_id)
.unwrap()
.gpu_buffers
.as_ref()
.unwrap()
.lights_buffer_size;
if lights_data.len() > lights_buffer_size {
let new_size = (lights_data.len() as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize;
let new_size = new_size.min(MAX_LIGHTS);
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Lights Buffer (Resized)"),
size: (std::mem::size_of::<LightData>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
{
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
let gpu = world_state.gpu_buffers.as_mut().unwrap();
gpu.lights_buffer = new_buffer;
gpu.lights_buffer_size = new_size;
}
self.rebuild_instance_bind_group(device);
}
let lights_buffer = &self
.world_states
.get(&self.current_world_id)
.unwrap()
.gpu_buffers
.as_ref()
.unwrap()
.lights_buffer;
queue.write_buffer(lights_buffer, 0, bytemuck::cast_slice(&lights_data));
}
}
pub(super) fn push_batch_with_lod(
&self,
instances: &mut Vec<(u32, u32, u32, u32)>,
mesh_id: u32,
material_id: u32,
start: u32,
end: u32,
) {
if let Some(lod_ids) = self.mesh_lod_mesh_ids.get(&mesh_id) {
for &lod_mesh_id in lod_ids {
instances.push((lod_mesh_id, material_id, start, end));
}
} else {
instances.push((mesh_id, material_id, start, end));
}
}
pub(super) fn populate_gpu_registry_from_instances(&mut self) {
let state = self.world_states.get_mut(&self.current_world_id).unwrap();
state.gpu_registry.clear();
state.free_slots_by_group.clear();
for (slot, entity) in state.cached_entities.iter().enumerate() {
let slot = slot as u32;
state.gpu_registry.entity_to_slot.insert(*entity, slot);
if state.gpu_registry.slot_to_entity.len() <= slot as usize {
state
.gpu_registry
.slot_to_entity
.resize(slot as usize + 1, None);
}
state.gpu_registry.slot_to_entity[slot as usize] = Some(*entity);
state.gpu_registry.slot_count = state.gpu_registry.slot_count.max(slot + 1);
}
}
pub(super) fn resolve_lod_chains(
&mut self,
chains: &[crate::ecs::graphics::resources::MeshLodChain],
queue: &wgpu::Queue,
) {
self.mesh_lod_mesh_ids.clear();
for info in &mut self.mesh_lod_data {
info.lod_count = 1;
info.thresholds = [0.0, 0.0, 0.0];
}
for chain in chains {
let Some(&base_id) = self.meshes.get(&chain.base_mesh) else {
continue;
};
let mut lod_ids = Vec::new();
let mut thresholds = [0.0f32; 3];
for (index, level) in chain.levels.iter().enumerate() {
if let Some(&mesh_id) = self.meshes.get(&level.mesh_name) {
lod_ids.push(mesh_id);
if index < 3 {
thresholds[index] = level.min_screen_pixels;
}
}
}
if lod_ids.len() > 1 {
let lod_count = lod_ids.len() as u32;
if (base_id as usize) < self.mesh_lod_data.len() {
self.mesh_lod_data[base_id as usize] = MeshLodInfo {
lod_count,
thresholds,
};
}
self.mesh_lod_mesh_ids.insert(base_id, lod_ids);
}
}
if !self.mesh_lod_data.is_empty() {
queue.write_buffer(
&self.mesh_lod_buffer,
0,
bytemuck::cast_slice(&self.mesh_lod_data),
);
}
}
pub(super) fn rebuild_transform_compute_bind_group(&mut self, device: &wgpu::Device) {
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
let gpu = world_state.gpu_buffers.as_mut().unwrap();
gpu.transform_compute_bind_group =
Some(device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Transform Compute Bind Group"),
layout: &self.transform_compute_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: gpu.local_transform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: gpu.transform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: self.transform_compute_uniforms_buffer.as_entire_binding(),
},
],
}));
}
pub(super) fn dispatch_transform_compute(
&self,
encoder: &mut wgpu::CommandEncoder,
queue: &wgpu::Queue,
entity_count: u32,
) {
let state = self.world_states.get(&self.current_world_id).unwrap();
let max_hierarchy_depth = state.max_hierarchy_depth;
if entity_count == 0 || max_hierarchy_depth == 0 {
return;
}
let gpu = state.gpu_buffers.as_ref().unwrap();
if let Some(ref bind_group) = gpu.transform_compute_bind_group {
let dispatch_count = entity_count.div_ceil(256);
let uniform_size = std::mem::size_of::<super::types::TransformComputeUniforms>();
let mut staging_data =
Vec::with_capacity(uniform_size * (max_hierarchy_depth as usize + 1));
for depth in 0..=max_hierarchy_depth {
let uniforms = super::types::TransformComputeUniforms {
entity_count,
current_depth: depth,
_pad: [0; 2],
};
staging_data.extend_from_slice(bytemuck::cast_slice(&[uniforms]));
}
queue.write_buffer(&self.transform_compute_staging_buffer, 0, &staging_data);
for depth in 0..=max_hierarchy_depth {
let staging_offset = (depth as usize * uniform_size) as u64;
encoder.copy_buffer_to_buffer(
&self.transform_compute_staging_buffer,
staging_offset,
&self.transform_compute_uniforms_buffer,
0,
uniform_size as u64,
);
let mut compute_pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("Transform Compute Pass"),
timestamp_writes: None,
});
compute_pass.set_pipeline(&self.transform_compute_pipeline);
compute_pass.set_bind_group(0, bind_group, &[]);
compute_pass.dispatch_workgroups(dispatch_count, 1, 1);
}
}
}
pub(super) fn rebuild_instanced_compute_bind_group(&mut self, device: &wgpu::Device) {
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
let gpu = world_state.gpu_buffers.as_mut().unwrap();
gpu.instanced_compute_bind_group =
Some(device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Instanced Transform Compute Bind Group"),
layout: &self.instanced_compute_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: gpu.instanced_local_matrix_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: gpu.transform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: self.instanced_compute_uniforms_buffer.as_entire_binding(),
},
],
}));
}
pub(super) fn dispatch_instanced_transform_compute(
&self,
encoder: &mut wgpu::CommandEncoder,
queue: &wgpu::Queue,
world: &crate::ecs::world::World,
) {
let world_state = self.world_states.get(&self.current_world_id).unwrap();
let gpu = world_state.gpu_buffers.as_ref().unwrap();
let Some(ref bind_group) = gpu.instanced_compute_bind_group else {
return;
};
let ranges: Vec<_> = world_state
.instanced_transform_ranges
.iter()
.map(|(&entity, &(start, count))| (entity, start, count))
.collect();
if ranges.is_empty() {
return;
}
let uniform_size = std::mem::size_of::<InstancedTransformComputeUniforms>();
for &(entity, start, count) in &ranges {
let parent_transform = world
.get_global_transform(entity)
.map(|t| t.0)
.unwrap_or_else(nalgebra_glm::Mat4::identity);
let uniforms = InstancedTransformComputeUniforms {
parent_transform: parent_transform.into(),
instance_count: count,
output_offset: start,
_pad: [0; 2],
};
queue.write_buffer(
&self.instanced_compute_staging_buffer,
0,
bytemuck::bytes_of(&uniforms),
);
encoder.copy_buffer_to_buffer(
&self.instanced_compute_staging_buffer,
0,
&self.instanced_compute_uniforms_buffer,
0,
uniform_size as u64,
);
let mut compute_pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("Instanced Transform Compute Pass"),
timestamp_writes: None,
});
compute_pass.set_pipeline(&self.instanced_compute_pipeline);
compute_pass.set_bind_group(0, bind_group, &[]);
compute_pass.dispatch_workgroups(count.div_ceil(256), 1, 1);
}
}
pub(super) fn upload_local_transforms(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
local_transforms: &[super::types::GpuLocalTransform],
) {
if local_transforms.is_empty() {
return;
}
let local_transform_buffer_size = self
.world_states
.get(&self.current_world_id)
.unwrap()
.gpu_buffers
.as_ref()
.unwrap()
.local_transform_buffer_size;
if local_transforms.len() > local_transform_buffer_size {
let new_size = std::cmp::min(
(local_transforms.len() as f32 * super::types::BUFFER_GROWTH_FACTOR).ceil()
as usize,
super::types::MAX_INSTANCES,
);
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Local Transform Buffer (Resized)"),
size: (std::mem::size_of::<super::types::GpuLocalTransform>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
{
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
let gpu = world_state.gpu_buffers.as_mut().unwrap();
gpu.local_transform_buffer = new_buffer;
gpu.local_transform_buffer_size = new_size;
}
self.rebuild_transform_compute_bind_group(device);
}
let local_transform_buffer = &self
.world_states
.get(&self.current_world_id)
.unwrap()
.gpu_buffers
.as_ref()
.unwrap()
.local_transform_buffer;
queue.write_buffer(
local_transform_buffer,
0,
bytemuck::cast_slice(local_transforms),
);
}
pub(super) fn upload_instanced_local_matrices(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
local_matrices: &[[[f32; 4]; 4]],
) {
if local_matrices.is_empty() {
return;
}
let current_size = self
.world_states
.get(&self.current_world_id)
.unwrap()
.gpu_buffers
.as_ref()
.unwrap()
.instanced_local_matrix_buffer_size;
if local_matrices.len() > current_size {
let new_size = std::cmp::min(
(local_matrices.len() as f32 * super::types::BUFFER_GROWTH_FACTOR).ceil() as usize,
super::types::MAX_INSTANCES,
);
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Instanced Local Matrix Buffer (Resized)"),
size: (std::mem::size_of::<[[f32; 4]; 4]>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
{
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
let gpu = world_state.gpu_buffers.as_mut().unwrap();
gpu.instanced_local_matrix_buffer = new_buffer;
gpu.instanced_local_matrix_buffer_size = new_size;
}
}
let buffer = &self
.world_states
.get(&self.current_world_id)
.unwrap()
.gpu_buffers
.as_ref()
.unwrap()
.instanced_local_matrix_buffer;
queue.write_buffer(buffer, 0, bytemuck::cast_slice(local_matrices));
self.rebuild_instanced_compute_bind_group(device);
}
pub(super) fn ensure_world_gpu_buffers(&mut self, device: &wgpu::Device, world_id: u64) {
self.world_states.entry(world_id).or_default();
let state = self.world_states.get_mut(&world_id).unwrap();
if state.gpu_buffers.is_none() {
state.gpu_buffers = Some(WorldGpuBuffers::new(
device,
&self.instance_bind_group_layout,
&self._cluster_bounds_bind_group_layout,
&self.cluster_bounds_buffer,
&self.morph_displacement_buffer,
));
}
}
pub fn cleanup_world_state(&mut self, world_id: u64) {
if self.last_prepared_world_id == Some(world_id) {
self.last_prepared_world_id = None;
}
self.world_states.remove(&world_id);
}
pub fn cleanup_stale_world_states(&mut self, max_age_frames: u64) {
let current_frame = self.frame_counter;
let stale_world_ids: Vec<u64> = self
.world_states
.iter()
.filter(|(world_id, state)| {
**world_id != self.last_prepared_world_id.unwrap_or(u64::MAX)
&& current_frame.saturating_sub(state.last_used_frame) > max_age_frames
})
.map(|(world_id, _)| *world_id)
.collect();
for world_id in stale_world_ids {
self.world_states.remove(&world_id);
}
}
}