use crate::ecs::generational_registry::{registry_entry, registry_entry_by_name};
use std::collections::HashMap;
use super::super::super::projection::*;
use super::super::pass::{MaterialBindGroupContext, MeshPass};
use super::super::types::{
BUFFER_GROWTH_FACTOR, BUFFER_SHRINK_THRESHOLD, COMPACTION_MIN_CHECK_INTERVAL,
COMPACTION_UTILIZATION_DROP_THRESHOLD, CullingUniforms, DrawIndexedIndirect, GpuLocalTransform,
MAX_HIERARCHY_DEPTH, MAX_INSTANCES, MAX_LIGHTS, MaterialData, MaterialTextures, MeshBoundsData,
MeshUniforms, ModelMatrix, NORMAL_MAP_FLIP_Y, NORMAL_MAP_TWO_COMPONENT, ObjectData,
compute_normal_matrix, pack_uv_set_indices,
};
struct MaterialCollection {
materials_data: Vec<MaterialData>,
material_map: HashMap<crate::ecs::world::Entity, u32>,
material_texture_map: HashMap<u32, MaterialTextures>,
name_to_material_id: HashMap<String, u32>,
transparent_material_ids: std::collections::HashSet<u32>,
double_sided_material_ids: std::collections::HashSet<u32>,
}
struct EntityBuckets {
world_opaque: HashMap<String, Vec<crate::ecs::world::Entity>>,
world_opaque_double_sided: HashMap<String, Vec<crate::ecs::world::Entity>>,
world_transparent: HashMap<String, Vec<crate::ecs::world::Entity>>,
overlay_opaque: HashMap<String, Vec<crate::ecs::world::Entity>>,
overlay_opaque_double_sided: HashMap<String, Vec<crate::ecs::world::Entity>>,
overlay_transparent: HashMap<String, Vec<crate::ecs::world::Entity>>,
}
struct BatchAccumulator {
transforms: Vec<ModelMatrix>,
custom_data: Vec<[f32; 4]>,
objects: Vec<ObjectData>,
entities: Vec<crate::ecs::world::Entity>,
}
struct BatchConfig {
is_overlay: u32,
skip_occlusion: u32,
batch_offset: usize,
}
fn convert_material_to_gpu_data(
material: &crate::ecs::material::components::Material,
) -> (MaterialData, MaterialTextures) {
let alpha_mode = match material.alpha_mode {
crate::ecs::material::components::AlphaMode::Opaque => 0,
crate::ecs::material::components::AlphaMode::Mask => 1,
crate::ecs::material::components::AlphaMode::Blend => 2,
};
let normal_map_flags = if material.normal_map_flip_y {
NORMAL_MAP_FLIP_Y
} else {
0
} | if material.normal_map_two_component {
NORMAL_MAP_TWO_COMPONENT
} else {
0
};
let data = MaterialData {
base_color: material.base_color,
emissive_factor: material.emissive_factor,
alpha_mode,
alpha_cutoff: material.alpha_cutoff,
has_base_texture: material.base_texture.is_some() as u32,
has_emissive_texture: material.emissive_texture.is_some() as u32,
has_normal_texture: material.normal_texture.is_some() as u32,
has_metallic_roughness_texture: material.metallic_roughness_texture.is_some() as u32,
has_occlusion_texture: material.occlusion_texture.is_some() as u32,
normal_scale: material.normal_scale,
occlusion_strength: material.occlusion_strength,
roughness: material.roughness,
metallic: material.metallic,
unlit: material.unlit as u32,
normal_map_flags,
uv_scale: material.uv_scale,
transmission_factor: material.transmission_factor,
has_transmission_texture: material.transmission_texture.is_some() as u32,
thickness: material.thickness,
has_thickness_texture: material.thickness_texture.is_some() as u32,
_align_attenuation: [0, 0],
attenuation_color: material.attenuation_color,
attenuation_distance: material.attenuation_distance,
ior: material.ior,
specular_factor: material.specular_factor,
_align_specular: [0, 0],
specular_color_factor: material.specular_color_factor,
has_specular_texture: material.specular_texture.is_some() as u32,
has_specular_color_texture: material.specular_color_texture.is_some() as u32,
emissive_strength: material.emissive_strength,
uv_set_indices: pack_uv_set_indices(material),
_pad_end: 0.0,
};
let textures = MaterialTextures {
base_texture: material.base_texture.clone(),
emissive_texture: material.emissive_texture.clone(),
normal_texture: material.normal_texture.clone(),
metallic_roughness_texture: material.metallic_roughness_texture.clone(),
occlusion_texture: material.occlusion_texture.clone(),
transmission_texture: material.transmission_texture.clone(),
thickness_texture: material.thickness_texture.clone(),
specular_texture: material.specular_texture.clone(),
specular_color_texture: material.specular_color_texture.clone(),
};
(data, textures)
}
fn default_material_data() -> MaterialData {
MaterialData {
base_color: [0.7, 0.7, 0.7, 1.0],
emissive_factor: [0.0, 0.0, 0.0],
alpha_mode: 0,
alpha_cutoff: 0.5,
has_base_texture: 0,
has_emissive_texture: 0,
has_normal_texture: 0,
has_metallic_roughness_texture: 0,
has_occlusion_texture: 0,
normal_scale: 1.0,
occlusion_strength: 1.0,
roughness: 0.5,
metallic: 0.0,
unlit: 0,
normal_map_flags: 0,
uv_scale: [1.0, 1.0],
transmission_factor: 0.0,
has_transmission_texture: 0,
thickness: 0.0,
has_thickness_texture: 0,
_align_attenuation: [0, 0],
attenuation_color: [1.0, 1.0, 1.0],
attenuation_distance: f32::INFINITY,
ior: 1.5,
specular_factor: 1.0,
_align_specular: [0, 0],
specular_color_factor: [1.0, 1.0, 1.0],
has_specular_texture: 0,
has_specular_color_texture: 0,
emissive_strength: 1.0,
uv_set_indices: 0,
_pad_end: 0.0,
}
}
fn resolve_material_for_entity(
world: &crate::ecs::world::World,
entity: crate::ecs::world::Entity,
) -> Option<&crate::ecs::material::components::Material> {
world.core.get_material_ref(entity).and_then(|mat_ref| {
if let Some(id) = mat_ref.id {
registry_entry(
&world.resources.material_registry.registry,
id.index,
id.generation,
)
} else {
registry_entry_by_name(&world.resources.material_registry.registry, &mat_ref.name)
}
})
}
fn collect_entity_materials(
world: &crate::ecs::world::World,
entities: &[crate::ecs::world::Entity],
materials_data: &mut Vec<MaterialData>,
material_texture_map: &mut HashMap<u32, MaterialTextures>,
name_to_material_id: &mut HashMap<String, u32>,
material_map: &mut HashMap<crate::ecs::world::Entity, u32>,
) {
for &entity in entities {
if let Some(material_ref) = world.core.get_material_ref(entity) {
let material_opt = resolve_material_for_entity(world, entity);
let material_name = &material_ref.name;
let material_id = if let Some(&existing_id) = name_to_material_id.get(material_name) {
existing_id
} else if let Some(material) = material_opt {
let new_id = materials_data.len() as u32;
let (data, textures) = convert_material_to_gpu_data(material);
materials_data.push(data);
material_texture_map.insert(new_id, textures);
name_to_material_id.insert(material_name.clone(), new_id);
new_id
} else {
0
};
material_map.insert(entity, material_id);
}
}
}
fn collect_remaining_registry_materials(
world: &crate::ecs::world::World,
materials_data: &mut Vec<MaterialData>,
material_texture_map: &mut HashMap<u32, MaterialTextures>,
name_to_material_id: &mut HashMap<String, u32>,
) {
for (name, &index) in &world.resources.material_registry.registry.name_to_index {
if name_to_material_id.contains_key(name) {
continue;
}
let Some(material) =
world.resources.material_registry.registry.entries[index as usize].as_ref()
else {
continue;
};
let new_id = materials_data.len() as u32;
let (data, textures) = convert_material_to_gpu_data(material);
materials_data.push(data);
material_texture_map.insert(new_id, textures);
name_to_material_id.insert(name.clone(), new_id);
}
}
impl MeshPass {
fn collect_materials(
&self,
world: &crate::ecs::world::World,
mesh_entities: &[crate::ecs::world::Entity],
instanced_mesh_entities: &[crate::ecs::world::Entity],
) -> MaterialCollection {
let mut materials_data = vec![default_material_data()];
let mut material_map: HashMap<crate::ecs::world::Entity, u32> = HashMap::new();
let mut material_texture_map: HashMap<u32, MaterialTextures> = HashMap::new();
let mut name_to_material_id: HashMap<String, u32> = HashMap::new();
collect_entity_materials(
world,
mesh_entities,
&mut materials_data,
&mut material_texture_map,
&mut name_to_material_id,
&mut material_map,
);
collect_entity_materials(
world,
instanced_mesh_entities,
&mut materials_data,
&mut material_texture_map,
&mut name_to_material_id,
&mut material_map,
);
collect_remaining_registry_materials(
world,
&mut materials_data,
&mut material_texture_map,
&mut name_to_material_id,
);
MaterialCollection {
materials_data,
material_map,
material_texture_map,
name_to_material_id,
transparent_material_ids: std::collections::HashSet::new(),
double_sided_material_ids: std::collections::HashSet::new(),
}
}
fn classify_mesh_entities(
&self,
world: &crate::ecs::world::World,
mesh_entities: &[crate::ecs::world::Entity],
materials: &mut MaterialCollection,
) -> EntityBuckets {
let mut buckets = EntityBuckets {
world_opaque: HashMap::new(),
world_opaque_double_sided: HashMap::new(),
world_transparent: HashMap::new(),
overlay_opaque: HashMap::new(),
overlay_opaque_double_sided: HashMap::new(),
overlay_transparent: HashMap::new(),
};
for &entity in mesh_entities {
if world
.sprite2d
.entity_has_components(entity, crate::ecs::world::SPRITE)
{
continue;
}
if let Some(visibility) = world.core.get_visibility(entity)
&& !visibility.visible
{
continue;
}
let render_layer = world
.core
.get_render_layer(entity)
.map(|layer| layer.0)
.unwrap_or(crate::ecs::render_layer::components::RenderLayer::WORLD);
let should_render = match render_layer {
crate::ecs::render_layer::components::RenderLayer::WORLD => {
world.resources.graphics.render_layer_world_enabled
}
crate::ecs::render_layer::components::RenderLayer::OVERLAY => {
world.resources.graphics.render_layer_overlay_enabled
}
_ => true,
};
if !should_render {
continue;
}
if let Some(mesh) = world.core.get_render_mesh(entity)
&& world.core.get_global_transform(entity).is_some()
&& self.meshes.contains_key(&mesh.name)
{
let material = resolve_material_for_entity(world, entity);
let material_is_transparent = material.is_some_and(|m| {
m.alpha_mode == crate::ecs::material::components::AlphaMode::Blend
|| m.transmission_factor > 0.0
});
let custom_alpha = 1.0_f32;
let is_transparent = material_is_transparent || custom_alpha < 1.0;
let is_double_sided = material.is_some_and(|m| m.double_sided);
if let Some(&mat_id) = materials.material_map.get(&entity) {
if material_is_transparent {
materials.transparent_material_ids.insert(mat_id);
}
if is_double_sided {
materials.double_sided_material_ids.insert(mat_id);
}
}
let is_overlay =
render_layer == crate::ecs::render_layer::components::RenderLayer::OVERLAY;
let target = if is_overlay {
if is_transparent {
&mut buckets.overlay_transparent
} else if is_double_sided {
&mut buckets.overlay_opaque_double_sided
} else {
&mut buckets.overlay_opaque
}
} else if is_transparent {
&mut buckets.world_transparent
} else if is_double_sided {
&mut buckets.world_opaque_double_sided
} else {
&mut buckets.world_opaque
};
target.entry(mesh.name.clone()).or_default().push(entity);
}
}
buckets
}
fn build_batch_objects(
&self,
world: &crate::ecs::world::World,
entities_by_mesh: HashMap<String, Vec<crate::ecs::world::Entity>>,
material_map: &HashMap<crate::ecs::world::Entity, u32>,
config: &BatchConfig,
accum: &mut BatchAccumulator,
instances: &mut Vec<(u32, u32, u32, u32)>,
) {
let mut by_mesh_material: HashMap<(u32, u32), Vec<crate::ecs::world::Entity>> =
HashMap::new();
for (mesh_name, entities) in entities_by_mesh {
if let Some(&mesh_id) = self.meshes.get(&mesh_name) {
for entity in entities {
let material_id = *material_map.get(&entity).unwrap_or(&0);
by_mesh_material
.entry((mesh_id, material_id))
.or_default()
.push(entity);
}
}
}
let mut sorted_batches: Vec<_> = by_mesh_material.into_iter().collect();
sorted_batches.sort_by_key(|((mesh_id, material_id), _)| (*material_id, *mesh_id));
for ((mesh_id, material_id), mut entities) in sorted_batches {
entities.sort_by_key(|e| (e.id, e.generation));
let start = accum.objects.len() as u32;
let batch_id = (config.batch_offset + instances.len()) as u32;
let mesh_data = &self.mesh_data[mesh_id as usize];
for entity in entities {
if let Some(transform) = world.core.get_global_transform(entity) {
let transform_index = accum.transforms.len() as u32;
accum.transforms.push(ModelMatrix {
model: transform.0.into(),
normal_matrix: compute_normal_matrix(&transform.0),
});
accum.custom_data.push([1.0, 1.0, 1.0, 1.0]);
let morph_weights = if let Some(mw) = world.core.get_morph_weights(entity) {
let mut weights = [0.0f32; 8];
for (index, weight) in mw.weights.iter().take(8).enumerate() {
weights[index] = *weight;
}
weights
} else {
[0.0f32; 8]
};
accum.objects.push(ObjectData {
transform_index,
mesh_id,
material_id,
batch_id,
morph_weights,
morph_target_count: mesh_data.morph_target_count,
morph_displacement_offset: mesh_data.morph_displacement_offset,
mesh_vertex_offset: mesh_data.vertex_offset,
mesh_vertex_count: mesh_data.vertex_count,
entity_id: entity.id,
is_overlay: config.is_overlay,
skip_occlusion: config.skip_occlusion,
_padding0: 0,
});
accum.entities.push(entity);
}
}
let end = accum.objects.len() as u32;
if start < end {
self.push_batch_with_lod(instances, mesh_id, material_id, start, end);
}
}
}
fn resize_gpu_buffers_if_needed(
&mut self,
device: &wgpu::Device,
all_transforms: &[ModelMatrix],
all_objects: &[ObjectData],
) {
if all_transforms.len() > self.gpu().transform_buffer_size {
let new_size = std::cmp::min(
(all_transforms.len() as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize,
MAX_INSTANCES,
);
if new_size > self.gpu().transform_buffer_size {
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Transform Buffer (Resized)"),
size: (std::mem::size_of::<ModelMatrix>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
self.gpu_mut().transform_buffer = new_buffer;
self.gpu_mut().transform_buffer_size = new_size;
self.gpu_mut().custom_data_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Custom Data Buffer (Resized)"),
size: (std::mem::size_of::<[f32; 4]>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
self.gpu_mut().custom_data_buffer_size = new_size;
self.rebuild_instance_bind_group(device);
}
}
if all_objects.len() > self.gpu().object_buffer_size {
let new_size = std::cmp::min(
(all_objects.len() as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize,
MAX_INSTANCES,
);
if new_size > self.gpu().object_buffer_size {
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Object Buffer (Resized)"),
size: (std::mem::size_of::<ObjectData>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
self.gpu_mut().object_buffer = new_buffer;
self.gpu_mut().object_buffer_size = new_size;
self.rebuild_instance_bind_group(device);
}
}
}
fn write_buffer_data(
&self,
queue: &wgpu::Queue,
all_transforms: &[ModelMatrix],
all_custom_data: &[[f32; 4]],
all_objects: &[ObjectData],
) {
if all_transforms.len() <= self.gpu().transform_buffer_size {
queue.write_buffer(
&self.gpu().transform_buffer,
0,
bytemuck::cast_slice(all_transforms),
);
}
if all_custom_data.len() <= self.gpu().custom_data_buffer_size {
queue.write_buffer(
&self.gpu().custom_data_buffer,
0,
bytemuck::cast_slice(all_custom_data),
);
}
if all_objects.len() <= self.gpu().object_buffer_size {
queue.write_buffer(
&self.gpu().object_buffer,
0,
bytemuck::cast_slice(all_objects),
);
}
}
fn build_hierarchy_data(
&mut self,
world: &crate::ecs::world::World,
device: &wgpu::Device,
queue: &wgpu::Queue,
all_entities: &[crate::ecs::world::Entity],
) {
self.state_mut().entity_depths.clear();
self.state_mut().entity_parent_indices.clear();
self.state_mut().entity_depths.resize(all_entities.len(), 0);
self.state_mut()
.entity_parent_indices
.resize(all_entities.len(), -1);
let mut max_depth = 0u32;
for (index, entity) in all_entities.iter().enumerate() {
let mut depth = 0u32;
let mut current = *entity;
while let Some(parent) = world.core.get_parent(current) {
if let Some(parent_entity) = parent.0 {
if depth == 0
&& let Some(&parent_transform_index) = self
.state_mut()
.entity_to_transform_index
.get(&parent_entity)
{
self.state_mut().entity_parent_indices[index] =
parent_transform_index as i32;
}
depth += 1;
current = parent_entity;
} else {
break;
}
}
self.state_mut().entity_depths[index] = depth;
max_depth = max_depth.max(depth);
}
self.state_mut().max_hierarchy_depth = max_depth.min(MAX_HIERARCHY_DEPTH);
if max_depth > 0 {
let mut local_transforms = Vec::with_capacity(all_entities.len());
for (index, entity) in all_entities.iter().enumerate() {
let parent_index = self.state_mut().entity_parent_indices[index];
let depth = self.state_mut().entity_depths[index];
let should_skip_gpu_compute = depth > 0 && parent_index < 0;
let (translation, rotation, scale, effective_depth) = if should_skip_gpu_compute {
(
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
u32::MAX,
)
} else if let Some(local) = world.core.get_local_transform(*entity) {
(
[
local.translation.x,
local.translation.y,
local.translation.z,
],
[
local.rotation.i,
local.rotation.j,
local.rotation.k,
local.rotation.w,
],
[local.scale.x, local.scale.y, local.scale.z],
self.state_mut().entity_depths[index],
)
} else {
(
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
u32::MAX,
)
};
local_transforms.push(GpuLocalTransform {
translation,
parent_index,
rotation,
scale,
depth: effective_depth,
});
}
self.upload_local_transforms(device, queue, &local_transforms);
self.state_mut().hierarchy_dirty = true;
}
}
fn upload_instanced_local_data(
&mut self,
world: &crate::ecs::world::World,
device: &wgpu::Device,
queue: &wgpu::Queue,
) {
if self.state().instanced_transform_ranges.is_empty() {
return;
}
let mut instanced_local_data: Vec<[[f32; 4]; 4]> = Vec::new();
let ranges: Vec<_> = self
.state()
.instanced_transform_ranges
.iter()
.map(|(&entity, &(start, count))| (entity, start, count))
.collect();
let mut max_end: usize = 0;
for &(_, start, count) in &ranges {
let end = start as usize + count as usize;
if end > max_end {
max_end = end;
}
}
instanced_local_data.resize(max_end, [[0.0; 4]; 4]);
for &(entity, start, count) in &ranges {
if let Some(instanced_mesh) = world.core.get_instanced_mesh(entity) {
let local_matrices = instanced_mesh.cached_local_matrices();
let start_idx = start as usize;
for (offset, mat) in local_matrices.iter().take(count as usize).enumerate() {
let raw: [[f32; 4]; 4] = (*mat).into();
instanced_local_data[start_idx + offset] = raw;
}
}
}
self.upload_instanced_local_matrices(device, queue, &instanced_local_data);
self.state_mut().instanced_compute_dirty = true;
}
fn build_culling_bind_groups(&mut self, device: &wgpu::Device) {
let hiz_view = self.hiz_pass.hiz_view_or_dummy();
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
let gpu = world_state.gpu_buffers.as_mut().unwrap();
gpu.culling_bind_group = Some(device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Mesh Culling Bind Group"),
layout: &self.culling_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: gpu.transform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: gpu.object_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: self.culling_uniform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: self.mesh_bounds_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 4,
resource: gpu.indirect_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 5,
resource: gpu.visible_indices_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 6,
resource: wgpu::BindingResource::TextureView(hiz_view),
},
wgpu::BindGroupEntry {
binding: 7,
resource: self.mesh_aabbs_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 8,
resource: self.mesh_lod_buffer.as_entire_binding(),
},
],
}));
gpu.phase1_culling_bind_group =
Some(device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Phase 1 Culling Bind Group"),
layout: &self.culling_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: gpu.transform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: gpu.object_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: self.phase1_culling_uniform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: self.mesh_bounds_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 4,
resource: gpu.phase1_indirect_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 5,
resource: gpu.phase1_visible_indices_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 6,
resource: wgpu::BindingResource::TextureView(hiz_view),
},
wgpu::BindGroupEntry {
binding: 7,
resource: self.mesh_aabbs_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 8,
resource: self.mesh_lod_buffer.as_entire_binding(),
},
],
}));
}
fn write_culling_uniforms(&self, world: &crate::ecs::world::World, queue: &wgpu::Queue) {
if let Some(camera_matrices) =
crate::ecs::camera::queries::query_active_camera_matrices(world)
{
let view_proj = camera_matrices.projection * camera_matrices.view;
let frustum_planes = extract_frustum_planes(&view_proj);
let (screen_width, screen_height) = self.hiz_pass.screen_size();
let projection_scale_y = camera_matrices.projection[(1, 1)];
let min_screen_pixel_size = world.resources.graphics.min_screen_pixel_size;
let occlusion_enabled =
screen_width > 0 && world.resources.graphics.occlusion_culling_enabled;
let phase1_culling_uniforms = CullingUniforms {
frustum_planes: frustum_planes.map(|v| [v.x, v.y, v.z, v.w]),
view_projection: view_proj.into(),
screen_size: [screen_width as f32, screen_height as f32],
hiz_mip_count: self.hiz_pass.mip_count() as f32,
occlusion_enabled: 0,
object_count: self.state().object_count,
min_screen_pixel_size,
projection_scale_y,
_padding: 0,
};
queue.write_buffer(
&self.phase1_culling_uniform_buffer,
0,
bytemuck::cast_slice(&[phase1_culling_uniforms]),
);
let culling_uniforms = CullingUniforms {
frustum_planes: frustum_planes.map(|v| [v.x, v.y, v.z, v.w]),
view_projection: view_proj.into(),
screen_size: [screen_width as f32, screen_height as f32],
hiz_mip_count: self.hiz_pass.mip_count() as f32,
occlusion_enabled: u32::from(occlusion_enabled),
object_count: self.state().object_count,
min_screen_pixel_size,
projection_scale_y,
_padding: 0,
};
queue.write_buffer(
&self.culling_uniform_buffer,
0,
bytemuck::cast_slice(&[culling_uniforms]),
);
}
}
fn resize_and_write_indirect_buffers(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
all_instances: &[(u32, u32, u32, u32)],
) {
let total_batch_count = all_instances.len();
if total_batch_count > self.gpu().indirect_buffer_size {
let new_size = std::cmp::min(
(total_batch_count as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize,
MAX_INSTANCES,
);
self.gpu_mut().indirect_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Indirect Draw Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>() * new_size) as u64,
usage: wgpu::BufferUsages::INDIRECT
| wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
self.gpu_mut().indirect_buffer_size = new_size;
self.gpu_mut().phase1_indirect_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Phase 1 Indirect Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>() * new_size) as u64,
usage: wgpu::BufferUsages::INDIRECT
| wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
self.gpu_mut().phase1_indirect_reset_buffer =
device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Phase 1 Indirect Reset Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>() * new_size) as u64,
usage: wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
self.gpu_mut().culling_bind_group = None;
self.gpu_mut().phase1_culling_bind_group = None;
}
let total_visible_slots: usize = all_instances
.iter()
.map(|&(_, _, start, end)| (end - start) as usize)
.sum();
let visible_indices_buffer_size =
(self.gpu().visible_indices_buffer.size() / std::mem::size_of::<u32>() as u64) as usize;
if total_visible_slots > visible_indices_buffer_size {
let new_size = (total_visible_slots as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize;
self.gpu_mut().visible_indices_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Visible Indices Buffer (Resized)"),
size: (std::mem::size_of::<u32>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
self.gpu_mut().phase1_visible_indices_buffer =
device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Phase 1 Visible Indices Buffer (Resized)"),
size: (std::mem::size_of::<u32>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
self.rebuild_instance_bind_group(device);
self.gpu_mut().culling_bind_group = None;
self.gpu_mut().phase1_culling_bind_group = None;
}
if !all_instances.is_empty() {
let lod_sub_mesh_ids: std::collections::HashSet<u32> = self
.mesh_lod_mesh_ids
.values()
.flat_map(|ids| ids.iter().skip(1).copied())
.collect();
let mut indirect_commands = Vec::with_capacity(total_batch_count);
let mut indirect_reset_commands = Vec::with_capacity(total_batch_count);
let mut visible_indices = Vec::with_capacity(total_visible_slots);
for &(mesh_id, _material_id, start, end) in all_instances.iter() {
let mesh_data = &self.mesh_data[mesh_id as usize];
let instance_count = end - start;
let first_instance = visible_indices.len() as u32;
let is_lod_sub = lod_sub_mesh_ids.contains(&mesh_id);
indirect_commands.push(DrawIndexedIndirect {
index_count: mesh_data.index_count,
instance_count: if is_lod_sub { 0 } else { instance_count },
first_index: mesh_data.index_offset,
base_vertex: mesh_data.vertex_offset as i32,
first_instance,
});
indirect_reset_commands.push(DrawIndexedIndirect {
index_count: mesh_data.index_count,
instance_count: 0,
first_index: mesh_data.index_offset,
base_vertex: mesh_data.vertex_offset as i32,
first_instance,
});
for object_index in start..end {
visible_indices.push(object_index);
}
}
queue.write_buffer(
&self.gpu().indirect_buffer,
0,
bytemuck::cast_slice(&indirect_commands),
);
queue.write_buffer(
&self.gpu().visible_indices_buffer,
0,
bytemuck::cast_slice(&visible_indices),
);
queue.write_buffer(
&self.gpu().phase1_visible_indices_buffer,
0,
bytemuck::cast_slice(&visible_indices),
);
if total_batch_count > 0 {
let reset_buffer_size = (self.gpu().indirect_reset_buffer.size()
/ std::mem::size_of::<DrawIndexedIndirect>() as u64)
as usize;
if total_batch_count > reset_buffer_size {
self.gpu_mut().indirect_reset_buffer =
device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Indirect Reset Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>()
* self.gpu().indirect_buffer_size)
as u64,
usage: wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
self.gpu_mut().phase1_indirect_reset_buffer =
device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Phase 1 Indirect Reset Buffer (Resized)"),
size: (std::mem::size_of::<DrawIndexedIndirect>()
* self.gpu().indirect_buffer_size)
as u64,
usage: wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
}
queue.write_buffer(
&self.gpu().indirect_reset_buffer,
0,
bytemuck::cast_slice(&indirect_reset_commands),
);
queue.write_buffer(
&self.gpu().phase1_indirect_reset_buffer,
0,
bytemuck::cast_slice(&indirect_reset_commands),
);
self.state_mut().indirect_reset_count = total_batch_count;
}
}
}
fn update_material_bind_groups(
&mut self,
device: &wgpu::Device,
materials_data: &[MaterialData],
material_texture_map: &HashMap<u32, MaterialTextures>,
) {
let max_material_id = materials_data.len() as u32;
let ctx = MaterialBindGroupContext {
registered_textures: &self.registered_textures,
dummy_white_view: &self.dummy_white_view,
dummy_black_view: &self.dummy_black_view,
dummy_normal_view: &self.dummy_normal_view,
dummy_sampler: &self.dummy_sampler,
texture_bind_group_layout: &self.texture_bind_group_layout,
};
let world_state = self.world_states.get_mut(&self.current_world_id).unwrap();
world_state
.material_bind_groups
.retain(|&id, _| id < max_material_id);
world_state
.material_bind_group_cache_key
.retain(|&id, _| id < max_material_id);
world_state
.material_bind_group_found_textures
.retain(|&id, _| id < max_material_id);
for material_id in 0..materials_data.len() as u32 {
let cache_key = material_texture_map
.get(&material_id)
.cloned()
.unwrap_or_default();
let prev_found = world_state
.material_bind_group_found_textures
.get(&material_id)
.cloned()
.unwrap_or_default();
let textures_became_available =
cache_key.has_newly_available_textures(&prev_found, ctx.registered_textures);
if world_state.material_bind_groups.contains_key(&material_id)
&& world_state.material_bind_group_cache_key.get(&material_id) == Some(&cache_key)
&& !cache_key.contains_any(&self.newly_registered_textures)
&& !textures_became_available
{
continue;
}
let (bind_group, found_textures) =
MeshPass::create_material_bind_group(device, material_id, &cache_key, &ctx);
world_state
.material_bind_groups
.insert(material_id, bind_group);
world_state
.material_bind_group_cache_key
.insert(material_id, cache_key);
world_state
.material_bind_group_found_textures
.insert(material_id, found_textures);
}
self.newly_registered_textures.clear();
}
pub(in super::super) fn prepare_pass_node(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
world: &crate::ecs::world::World,
) {
self.current_world_id = world.resources.world_id;
self.frame_counter += 1;
if self.frame_counter.is_multiple_of(300) {
self.cleanup_stale_world_states(600);
}
self.ensure_world_gpu_buffers(device, self.current_world_id);
self.state_mut().last_used_frame = self.frame_counter;
self.last_prepared_world_id = Some(self.current_world_id);
self.refresh_material_bind_groups_for_world(device);
if let Some(fd) = self.frame_dirty.as_ref()
&& fd.instanced_compute_dispatch_needed
{
self.state_mut().instanced_compute_dirty = true;
}
let frame_initialized = self
.frame_dirty
.as_ref()
.is_some_and(|fd| fd.frame_initialized);
let has_any_changes = self.frame_dirty.as_ref().is_some_and(|fd| {
!fd.transform_dirty.is_empty()
|| !fd.material_dirty.is_empty()
|| !fd.entities_added.is_empty()
|| !fd.entities_removed.is_empty()
|| fd.batches_invalidated
|| fd.full_rebuild_needed
|| fd.instanced_meshes_changed
}) || self.resize_full_rebuild_pending;
if self.resize_full_rebuild_pending {
if let Some(fd) = self.frame_dirty.as_mut() {
fd.full_rebuild_needed = true;
}
self.resize_full_rebuild_pending = false;
}
if frame_initialized && !has_any_changes {
self.prepare_uniforms_and_lights(device, queue, world);
return;
}
if self.can_do_incremental_update() && self.update_dirty_transforms(world, queue) {
self.prepare_uniforms_and_lights(device, queue, world);
return;
}
self.sync_meshes_from_cache(device, queue, &world.resources.mesh_cache);
self.sync_textures(&world.resources.texture_cache);
if self.can_do_incremental_entity_update(world) {
self.incremental_update_entities(world, device, queue);
self.prepare_uniforms_and_lights(device, queue, world);
return;
}
if self.can_do_rebatch_only() {
self.rebatch_cached_entities(world, device, queue);
self.prepare_uniforms_and_lights(device, queue, world);
return;
}
self.compaction_frame_counter += 1;
if self.compaction_frame_counter >= COMPACTION_MIN_CHECK_INTERVAL {
let current_vertex_utilization = self.compute_vertex_utilization();
let current_index_utilization = self.compute_index_utilization();
let vertex_drop = self.last_vertex_utilization - current_vertex_utilization;
let index_drop = self.last_index_utilization - current_index_utilization;
let significant_drop = vertex_drop > COMPACTION_UTILIZATION_DROP_THRESHOLD
|| index_drop > COMPACTION_UTILIZATION_DROP_THRESHOLD;
let below_threshold = current_vertex_utilization < BUFFER_SHRINK_THRESHOLD
|| current_index_utilization < BUFFER_SHRINK_THRESHOLD;
if significant_drop || below_threshold {
self.compaction_frame_counter = 0;
self.last_vertex_utilization = current_vertex_utilization;
self.last_index_utilization = current_index_utilization;
self.check_and_compact_buffers(device, queue, &world.resources.mesh_cache);
} else {
self.last_vertex_utilization = current_vertex_utilization;
self.last_index_utilization = current_index_utilization;
}
}
let light_result = collect_lights(world, MAX_LIGHTS);
let mut lights_data = light_result.lights_data;
let directional_light = light_result.directional_light;
let entity_to_lights_index = light_result.entity_to_index;
let directional_light_direction = directional_light
.as_ref()
.map(|(_light, transform)| {
let dir = transform.forward_vector();
[dir.x, dir.y, dir.z, 0.0]
})
.unwrap_or([0.0, -1.0, 0.0, 0.0]);
let cascade_result = calculate_cascade_shadows(world, directional_light.as_ref());
let cascade_view_projections = cascade_result.cascade_view_projections;
let cascade_diameters = cascade_result.cascade_diameters;
let light_view_projection = cascade_result.light_view_projection;
let shadow_bias = cascade_result.shadow_bias;
let shadows_enabled = cascade_result.shadows_enabled;
let cascade_texture_resolution = if cfg!(target_arch = "wasm32") {
2048.0
} else {
4096.0
};
let cascade_atlas_offsets: [[f32; 4]; crate::render::wgpu::passes::NUM_SHADOW_CASCADES] = [
[
0.0,
0.0,
cascade_diameters[0] / cascade_texture_resolution,
0.0,
],
[
0.5,
0.0,
cascade_diameters[1] / cascade_texture_resolution,
0.0,
],
[
0.0,
0.5,
cascade_diameters[2] / cascade_texture_resolution,
0.0,
],
[
0.5,
0.5,
cascade_diameters[3] / cascade_texture_resolution,
0.0,
],
];
if let Some(camera_matrices) =
crate::ecs::camera::queries::query_active_camera_matrices(world)
{
let global_unlit = if world.resources.graphics.unlit_mode {
1.0
} else {
0.0
};
let (snap_resolution, snap_enabled) =
if let Some(ref vertex_snap) = world.resources.graphics.vertex_snap {
(vertex_snap.resolution, 1)
} else {
([320.0, 240.0], 0)
};
let affine_enabled = if world.resources.graphics.affine_texture_mapping {
1
} else {
0
};
let (fog_color, fog_enabled, fog_start, fog_end) =
if let Some(ref fog) = world.resources.graphics.fog {
(fog.color, 1, fog.start, fog.end)
} else {
([0.5, 0.5, 0.6], 0, 5.0, 30.0)
};
let time = world.resources.window.timing.uptime_milliseconds as f32 / 1000.0;
let uniforms = MeshUniforms {
view: camera_matrices.view.into(),
projection: camera_matrices.projection.into(),
camera_position: [
camera_matrices.camera_position.x,
camera_matrices.camera_position.y,
camera_matrices.camera_position.z,
1.0,
],
num_lights: [lights_data.len() as u32, 0, 0, 0],
ambient_light: world.resources.graphics.ambient_light,
light_view_projection,
shadow_bias,
shadows_enabled,
global_unlit,
shadow_normal_bias: 1.8,
snap_resolution,
snap_enabled,
affine_enabled,
fog_color,
fog_enabled,
fog_start,
fog_end,
cascade_count: crate::render::wgpu::passes::NUM_SHADOW_CASCADES as u32,
directional_light_size: 1.0,
cascade_view_projections,
cascade_split_distances: CASCADE_SPLIT_DISTANCES,
cascade_atlas_offsets,
cascade_atlas_scale: [0.5, 0.5, 0.0, 0.0],
time,
pbr_debug_mode: world.resources.graphics.pbr_debug_mode.as_u32(),
texture_debug_stripes: world.resources.graphics.texture_debug_stripes as u32,
texture_debug_stripes_speed: world.resources.graphics.texture_debug_stripes_speed,
directional_light_direction,
ibl_blend_factor: world.resources.graphics.ibl_blend_factor,
_padding3: [0.0; 19],
};
queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[uniforms]));
let overlay_uniforms = MeshUniforms {
shadows_enabled: 0.0,
..uniforms
};
queue.write_buffer(
&self.overlay_uniform_buffer,
0,
bytemuck::cast_slice(&[overlay_uniforms]),
);
}
let camera_position = world
.resources
.active_camera
.and_then(|cam| world.core.get_global_transform(cam))
.map(|t| nalgebra_glm::vec3(t.0[(0, 3)], t.0[(1, 3)], t.0[(2, 3)]))
.unwrap_or_else(|| nalgebra_glm::vec3(0.0, 0.0, 0.0));
let spotlight_result = collect_spotlight_shadows(world, camera_position);
apply_spotlight_shadow_indices(
&mut lights_data,
&spotlight_result.entity_to_shadow_index,
&entity_to_lights_index,
);
if !spotlight_result.shadow_data.is_empty() {
queue.write_buffer(
&self.spotlight_shadow_buffer,
0,
bytemuck::cast_slice(&spotlight_result.shadow_data),
);
}
let point_shadow_result = collect_point_light_shadows(
world,
camera_position,
&mut lights_data,
&entity_to_lights_index,
);
if !point_shadow_result.is_empty() {
queue.write_buffer(
&self.point_shadow_buffer,
0,
bytemuck::cast_slice(&point_shadow_result),
);
}
if !lights_data.is_empty() {
if lights_data.len() > self.gpu().lights_buffer_size {
let new_size = (lights_data.len() as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize;
let new_size = new_size.min(MAX_LIGHTS);
let new_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Lights Buffer (Resized)"),
size: (std::mem::size_of::<LightData>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
self.gpu_mut().lights_buffer = new_buffer;
self.gpu_mut().lights_buffer_size = new_size;
self.rebuild_instance_bind_group(device);
}
queue.write_buffer(
&self.gpu().lights_buffer,
0,
bytemuck::cast_slice(&lights_data),
);
}
let mesh_entities: Vec<_> = world
.core
.query_entities(crate::ecs::world::RENDER_MESH)
.filter(|&entity| {
!world
.core
.entity_has_components(entity, crate::ecs::world::SKIN)
&& !world
.core
.entity_has_components(entity, crate::ecs::world::WATER)
})
.collect();
let instanced_mesh_entities_for_materials: Vec<_> = world
.core
.query_entities(crate::ecs::world::INSTANCED_MESH)
.collect();
let mut materials = self.collect_materials(
world,
&mesh_entities,
&instanced_mesh_entities_for_materials,
);
{
let gpu = self.gpu_mut();
if materials.materials_data.len() > gpu.materials_buffer_size {
let new_size =
(materials.materials_data.len() as f32 * BUFFER_GROWTH_FACTOR).ceil() as usize;
gpu.materials_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mesh Materials Buffer (Per-World, Resized)"),
size: (std::mem::size_of::<MaterialData>() * new_size) as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
gpu.materials_buffer_size = new_size;
}
queue.write_buffer(
&gpu.materials_buffer,
0,
bytemuck::cast_slice(&materials.materials_data),
);
}
self.rebuild_instance_bind_group(device);
self.resolve_lod_chains(&world.resources.graphics.mesh_lod_chains, queue);
let mut buckets = self.classify_mesh_entities(world, &mesh_entities, &mut materials);
let instanced_mesh_entities: Vec<_> = world
.core
.query_entities(crate::ecs::world::INSTANCED_MESH)
.collect();
let mut accum = BatchAccumulator {
transforms: Vec::new(),
custom_data: Vec::new(),
objects: Vec::new(),
entities: Vec::new(),
};
let mut opaque_instances = Vec::new();
let mut opaque_double_sided_instances = Vec::new();
let mut transparent_instances = Vec::new();
let mut overlay_opaque_instances = Vec::new();
let mut overlay_opaque_double_sided_instances = Vec::new();
let mut overlay_transparent_instances = Vec::new();
self.build_batch_objects(
world,
std::mem::take(&mut buckets.world_opaque),
&materials.material_map,
&BatchConfig {
is_overlay: 0,
skip_occlusion: 0,
batch_offset: 0,
},
&mut accum,
&mut opaque_instances,
);
self.build_batch_objects(
world,
std::mem::take(&mut buckets.world_opaque_double_sided),
&materials.material_map,
&BatchConfig {
is_overlay: 0,
skip_occlusion: 0,
batch_offset: opaque_instances.len(),
},
&mut accum,
&mut opaque_double_sided_instances,
);
self.build_batch_objects(
world,
std::mem::take(&mut buckets.world_transparent),
&materials.material_map,
&BatchConfig {
is_overlay: 0,
skip_occlusion: 1,
batch_offset: opaque_instances.len() + opaque_double_sided_instances.len(),
},
&mut accum,
&mut transparent_instances,
);
self.build_batch_objects(
world,
std::mem::take(&mut buckets.overlay_opaque),
&materials.material_map,
&BatchConfig {
is_overlay: 1,
skip_occlusion: 0,
batch_offset: opaque_instances.len()
+ opaque_double_sided_instances.len()
+ transparent_instances.len(),
},
&mut accum,
&mut overlay_opaque_instances,
);
self.build_batch_objects(
world,
std::mem::take(&mut buckets.overlay_opaque_double_sided),
&materials.material_map,
&BatchConfig {
is_overlay: 1,
skip_occlusion: 0,
batch_offset: opaque_instances.len()
+ opaque_double_sided_instances.len()
+ transparent_instances.len()
+ overlay_opaque_instances.len(),
},
&mut accum,
&mut overlay_opaque_double_sided_instances,
);
self.build_batch_objects(
world,
std::mem::take(&mut buckets.overlay_transparent),
&materials.material_map,
&BatchConfig {
is_overlay: 1,
skip_occlusion: 0,
batch_offset: opaque_instances.len()
+ opaque_double_sided_instances.len()
+ transparent_instances.len()
+ overlay_opaque_instances.len()
+ overlay_opaque_double_sided_instances.len(),
},
&mut accum,
&mut overlay_transparent_instances,
);
let lod_sub_mesh_ids: std::collections::HashSet<u32> = self
.mesh_lod_mesh_ids
.values()
.flat_map(|ids| ids.iter().skip(1).copied())
.collect();
if !transparent_instances.is_empty() && !self.mesh_bounds_data.is_empty() {
let batch_offset =
(opaque_instances.len() + opaque_double_sided_instances.len()) as u32;
sort_transparent_batches_by_depth(
&mut transparent_instances,
&accum.transforms,
&self.mesh_bounds_data,
&camera_position,
batch_offset,
&mut accum.objects,
&lod_sub_mesh_ids,
);
}
if !overlay_transparent_instances.is_empty() && !self.mesh_bounds_data.is_empty() {
let batch_offset = (opaque_instances.len()
+ opaque_double_sided_instances.len()
+ transparent_instances.len()
+ overlay_opaque_instances.len()
+ overlay_opaque_double_sided_instances.len())
as u32;
sort_transparent_batches_by_depth(
&mut overlay_transparent_instances,
&accum.transforms,
&self.mesh_bounds_data,
&camera_position,
batch_offset,
&mut accum.objects,
&lod_sub_mesh_ids,
);
}
self.state_mut().regular_object_count = accum.transforms.len() as u32;
self.state_mut().instanced_transform_ranges.clear();
let mut instanced_opaque_batches = Vec::new();
let mut instanced_opaque_double_sided_batches = Vec::new();
let mut instanced_transparent_batches = Vec::new();
let instanced_batch_offset = opaque_instances.len()
+ opaque_double_sided_instances.len()
+ transparent_instances.len()
+ overlay_opaque_instances.len()
+ overlay_opaque_double_sided_instances.len()
+ overlay_transparent_instances.len();
for &entity in &instanced_mesh_entities {
if let Some(visibility) = world.core.get_visibility(entity)
&& !visibility.visible
{
continue;
}
let Some(instanced_mesh) = world.core.get_instanced_mesh(entity) else {
continue;
};
if instanced_mesh.instances.is_empty() {
continue;
}
let Some(&mesh_id) = self.meshes.get(&instanced_mesh.mesh_name) else {
continue;
};
let material_id = *materials.material_map.get(&entity).unwrap_or(&0);
let material = resolve_material_for_entity(world, entity);
let is_transparent = material.is_some_and(|m| {
m.alpha_mode == crate::ecs::material::components::AlphaMode::Blend
|| m.transmission_factor > 0.0
});
let is_double_sided = material.is_some_and(|m| m.double_sided);
let start = accum.objects.len() as u32;
let base_transform_index = accum.transforms.len() as u32;
let cached_model_matrices = instanced_mesh.cached_model_matrices();
let custom_data_slice = instanced_mesh.custom_data_slice();
let instance_count = cached_model_matrices.len();
self.state_mut()
.instanced_transform_ranges
.insert(entity, (base_transform_index, instance_count as u32));
accum
.transforms
.extend(
cached_model_matrices
.iter()
.map(|instance_matrix| ModelMatrix {
model: instance_matrix.model,
normal_matrix: instance_matrix.normal_matrix,
}),
);
accum
.custom_data
.extend(custom_data_slice.iter().map(|data| data.tint));
let batch_id = if is_transparent {
(instanced_batch_offset
+ instanced_opaque_batches.len()
+ instanced_opaque_double_sided_batches.len()
+ instanced_transparent_batches.len()) as u32
} else if is_double_sided {
(instanced_batch_offset
+ instanced_opaque_batches.len()
+ instanced_opaque_double_sided_batches.len()) as u32
} else {
(instanced_batch_offset + instanced_opaque_batches.len()) as u32
};
let instanced_mesh_data = &self.mesh_data[mesh_id as usize];
let instanced_entity_id = entity.id;
accum
.objects
.extend((0..instance_count as u32).map(|index| ObjectData {
transform_index: base_transform_index + index,
mesh_id,
material_id,
batch_id,
morph_weights: [0.0f32; 8],
morph_target_count: instanced_mesh_data.morph_target_count,
morph_displacement_offset: instanced_mesh_data.morph_displacement_offset,
mesh_vertex_offset: instanced_mesh_data.vertex_offset,
mesh_vertex_count: instanced_mesh_data.vertex_count,
entity_id: instanced_entity_id,
is_overlay: 0,
skip_occlusion: u32::from(is_transparent),
_padding0: 0,
}));
let end = accum.objects.len() as u32;
if start < end {
if is_transparent {
self.push_batch_with_lod(
&mut instanced_transparent_batches,
mesh_id,
material_id,
start,
end,
);
} else if is_double_sided {
self.push_batch_with_lod(
&mut instanced_opaque_double_sided_batches,
mesh_id,
material_id,
start,
end,
);
} else {
self.push_batch_with_lod(
&mut instanced_opaque_batches,
mesh_id,
material_id,
start,
end,
);
}
}
}
let all_custom_data = accum.custom_data;
if !accum.objects.is_empty() {
self.resize_gpu_buffers_if_needed(device, &accum.transforms, &accum.objects);
self.write_buffer_data(queue, &accum.transforms, &all_custom_data, &accum.objects);
self.state_mut().object_count = accum.objects.len() as u32;
let all_entities = accum.entities.clone();
self.state_mut().cached_transforms = accum.transforms;
self.state_mut().cached_objects = accum.objects;
self.state_mut().cached_entities = all_entities.clone();
self.state_mut().entity_to_transform_index.clear();
for (index, entity) in all_entities.iter().enumerate() {
self.state_mut()
.entity_to_transform_index
.insert(*entity, index as u32);
}
self.build_hierarchy_data(world, device, queue, &all_entities);
self.upload_instanced_local_data(world, device, queue);
self.build_culling_bind_groups(device);
self.write_culling_uniforms(world, queue);
}
self.state_mut().opaque_instances = opaque_instances;
self.state_mut().opaque_double_sided_instances = opaque_double_sided_instances;
self.state_mut().transparent_instances = transparent_instances;
self.state_mut().overlay_opaque_instances = overlay_opaque_instances;
self.state_mut().overlay_opaque_double_sided_instances =
overlay_opaque_double_sided_instances;
self.state_mut().overlay_transparent_instances = overlay_transparent_instances;
self.state_mut().instanced_opaque_batches = instanced_opaque_batches;
self.state_mut().instanced_opaque_double_sided_batches =
instanced_opaque_double_sided_batches;
self.state_mut().instanced_transparent_batches = instanced_transparent_batches;
let all_instances: Vec<_> = self
.state()
.opaque_instances
.iter()
.chain(self.state().opaque_double_sided_instances.iter())
.chain(self.state().transparent_instances.iter())
.chain(self.state().overlay_opaque_instances.iter())
.chain(self.state().overlay_opaque_double_sided_instances.iter())
.chain(self.state().overlay_transparent_instances.iter())
.chain(self.state().instanced_opaque_batches.iter())
.chain(self.state().instanced_opaque_double_sided_batches.iter())
.chain(self.state().instanced_transparent_batches.iter())
.cloned()
.collect();
self.resize_and_write_indirect_buffers(device, queue, &all_instances);
self.update_material_bind_groups(
device,
&materials.materials_data,
&materials.material_texture_map,
);
for (name, &mat_id) in &materials.name_to_material_id {
if let Some(material) =
registry_entry_by_name(&world.resources.material_registry.registry, name)
{
if material.alpha_mode == crate::ecs::material::components::AlphaMode::Blend
|| material.transmission_factor > 0.0
{
materials.transparent_material_ids.insert(mat_id);
}
if material.double_sided {
materials.double_sided_material_ids.insert(mat_id);
}
}
}
self.state_mut().cached_name_to_material_id = materials.name_to_material_id;
self.state_mut().cached_material_map = materials.material_map;
self.state_mut().cached_custom_data = all_custom_data;
self.state_mut().cached_transparent_material_ids = materials.transparent_material_ids;
self.state_mut().cached_double_sided_material_ids = materials.double_sided_material_ids;
self.state_mut().frames_since_full_rebuild = 0;
self.populate_gpu_registry_from_instances();
self.prepare_uniforms_and_lights(device, queue, world);
}
}
fn sort_transparent_batches_by_depth(
batches: &mut [(u32, u32, u32, u32)],
transforms: &[ModelMatrix],
mesh_bounds: &[MeshBoundsData],
camera_position: &nalgebra_glm::Vec3,
batch_offset: u32,
all_objects: &mut [ObjectData],
lod_sub_mesh_ids: &std::collections::HashSet<u32>,
) {
let mut groups: Vec<(usize, usize)> = Vec::new();
let mut index = 0;
while index < batches.len() {
let group_start = index;
index += 1;
while index < batches.len()
&& lod_sub_mesh_ids.contains(&batches[index].0)
&& batches[index].2 == batches[group_start].2
&& batches[index].3 == batches[group_start].3
{
index += 1;
}
groups.push((group_start, index));
}
let mut group_depths: Vec<(usize, f32)> = groups
.iter()
.enumerate()
.map(|(group_index, &(start_index, _))| {
let (mesh_id, _, start, _) = batches[start_index];
let depth = if (mesh_id as usize) < mesh_bounds.len()
&& (start as usize) < transforms.len()
{
let transform = &transforms[start as usize];
let bounds = &mesh_bounds[mesh_id as usize];
let model: nalgebra_glm::Mat4 = transform.model.into();
let center_vec4 =
nalgebra_glm::vec4(bounds.center[0], bounds.center[1], bounds.center[2], 1.0);
let center = model * center_vec4;
nalgebra_glm::distance2(
&nalgebra_glm::vec3(center[0], center[1], center[2]),
camera_position,
)
} else {
0.0
};
(group_index, depth)
})
.collect();
group_depths.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
let original_batches = batches.to_vec();
let mut write_pos = 0;
for &(group_index, _) in &group_depths {
let (group_start, group_end) = groups[group_index];
for batch in &original_batches[group_start..group_end] {
batches[write_pos] = *batch;
write_pos += 1;
}
}
for (new_index, &(mesh_id, _, start, end)) in batches.iter().enumerate() {
if lod_sub_mesh_ids.contains(&mesh_id) {
continue;
}
let new_batch_id = batch_offset + new_index as u32;
for object in all_objects[start as usize..end as usize].iter_mut() {
object.batch_id = new_batch_id;
}
}
}