gloss_renderer/forward_renderer/render_passes/upload_pass.rs
1#![allow(clippy::cast_precision_loss)]
2
3extern crate nalgebra as na;
4
5// use crate::backend_specific_action;
6
7use crate::{
8 camera::Camera,
9 components::{
10 Colors, ColorsGPU, DiffuseImg, DiffuseTex, Edges, EdgesV1, EdgesV1GPU, EdgesV2, EdgesV2GPU, EnvironmentMap, EnvironmentMapGpu, Faces,
11 FacesGPU, GenericImageGetter, GpuAtrib, LightEmit, MeshColorType, Name, NormalImg, NormalTex, Normals, NormalsGPU, PosLookat, Projection,
12 ProjectionWithFov, Renderable, RoughnessImg, RoughnessTex, ShadowCaster, Tangents, TangentsGPU, TextureGetter, UVs, UVsGPU, Verts, VertsGPU,
13 VisMesh,
14 },
15 config::RenderConfig,
16 scene::Scene,
17};
18
19// use abi_stable::reexports::SelfOps;
20use easy_wgpu::{
21 bind_group::BindGroupBuilder,
22 bind_group_layout::{BindGroupLayoutBuilder, BindGroupLayoutDesc},
23 buffer::Buffer,
24 gpu::Gpu,
25 mipmap::RenderMipmapGenerator,
26 texture::Texture,
27};
28use gloss_utils::tensor::{DynamicMatrixOps, DynamicTensorFloat2D, DynamicTensorOps};
29
30use gloss_hecs::{Changed, CommandBuffer, Component, Entity};
31use gloss_utils::numerical::{align, align_usz};
32use log::{debug, info, warn};
33#[cfg(not(target_arch = "wasm32"))]
34use pollster::FutureExt;
35use std::collections::HashMap;
36#[cfg(target_arch = "wasm32")]
37use std::sync::mpsc;
38#[cfg(target_arch = "wasm32")]
39use wasm_bindgen_futures;
40use wgpu::util::DeviceExt;
41
42use encase;
43
44pub const MAX_NUM_LIGHTS: usize = 20; //lower than 20 causes wasm to throw error because the uniform is too small..
45pub const MAX_NUM_SHADOWS: usize = 3; //HAS to be lower than MAX_NUM_LIGHTS.
46
47// pub struct PendingDiffuseUpload;
48// pub struct PendingNormalUpload;
49// pub struct PendingRoughnessUpload;
50
51pub fn index_vertices_from_edges(matrix: &na::DMatrix<f32>, v_indices: &na::DMatrix<u32>, col_id: usize) -> na::DMatrix<f32> {
52 let index_slice = v_indices.column(col_id).into_owned();
53 let indices: Vec<usize> = index_slice.iter().copied().map(|x| x as usize).collect();
54
55 // Select rows based on indices
56 let mut selected_rows = Vec::new();
57 for &index in &indices {
58 let row = matrix.row(index);
59 selected_rows.push(row);
60 }
61 na::DMatrix::from_rows(&selected_rows)
62}
63
64trait TextureUploadable {
65 type Img: Component + Clone + GenericImageGetter;
66 type Tex: Component + Clone + TextureGetter;
67
68 fn tex_name() -> &'static str;
69 fn new_tex(texture: Texture) -> Self::Tex;
70 fn is_srgb() -> bool;
71 #[cfg(target_arch = "wasm32")]
72 fn texture_receiver(upload_pass: &mut UploadPass) -> &mut Option<mpsc::Receiver<(Entity, Self::Tex)>>;
73}
74
75struct DiffuseUploadable;
76impl TextureUploadable for DiffuseUploadable {
77 type Img = DiffuseImg;
78 type Tex = DiffuseTex;
79
80 fn tex_name() -> &'static str {
81 "diffuse"
82 }
83 fn new_tex(texture: Texture) -> Self::Tex {
84 DiffuseTex(texture)
85 }
86
87 fn is_srgb() -> bool {
88 true
89 }
90
91 #[cfg(target_arch = "wasm32")]
92 fn texture_receiver(upload_pass: &mut UploadPass) -> &mut Option<mpsc::Receiver<(Entity, Self::Tex)>> {
93 &mut upload_pass.diffuse_receiver
94 }
95}
96
97struct NormalUploadable;
98impl TextureUploadable for NormalUploadable {
99 type Img = NormalImg;
100 type Tex = NormalTex;
101
102 fn tex_name() -> &'static str {
103 "normal"
104 }
105 fn new_tex(texture: Texture) -> Self::Tex {
106 NormalTex(texture)
107 }
108
109 fn is_srgb() -> bool {
110 false
111 }
112
113 #[cfg(target_arch = "wasm32")]
114 fn texture_receiver(upload_pass: &mut UploadPass) -> &mut Option<mpsc::Receiver<(Entity, Self::Tex)>> {
115 &mut upload_pass.normal_receiver
116 }
117}
118
119struct RoughnessUploadable;
120impl TextureUploadable for RoughnessUploadable {
121 type Img = RoughnessImg;
122 type Tex = RoughnessTex;
123
124 fn tex_name() -> &'static str {
125 "roughness"
126 }
127 fn new_tex(texture: Texture) -> Self::Tex {
128 RoughnessTex(texture)
129 }
130
131 fn is_srgb() -> bool {
132 false
133 }
134
135 #[cfg(target_arch = "wasm32")]
136 fn texture_receiver(upload_pass: &mut UploadPass) -> &mut Option<mpsc::Receiver<(Entity, Self::Tex)>> {
137 &mut upload_pass.roughness_receiver
138 }
139}
140
141/// Upload pass which uploads to GPU any data that is necessary, like vertex
142/// buffers for meshes and camera parameters.
143pub struct UploadPass {
144 //all the buffers for per_frame stuff like light positions, cam parameters, etc. This are stuff that don't change from mesh to mesh
145 per_frame_uniforms: PerFrameUniforms,
146 mipmapper: Option<RenderMipmapGenerator>,
147 //the local stuff that changes from mesh to mesh is allocated by each pass, because each pass might need something different from the mesh
148 pub command_buffer: CommandBuffer, //defer insertions and deletion of scene entities for whenever we apply this command buffer
149 pub staging_buffer: Option<Buffer>,
150 #[cfg(target_arch = "wasm32")]
151 diffuse_receiver: Option<mpsc::Receiver<(Entity, DiffuseTex)>>,
152 #[cfg(target_arch = "wasm32")]
153 normal_receiver: Option<mpsc::Receiver<(Entity, NormalTex)>>,
154 #[cfg(target_arch = "wasm32")]
155 roughness_receiver: Option<mpsc::Receiver<(Entity, RoughnessTex)>>,
156}
157
158impl UploadPass {
159 pub fn new(gpu: &Gpu, params: &RenderConfig) -> Self {
160 //wasm likes everything to be 16 bytes aligned
161 const_assert!(std::mem::size_of::<PerFrameSceneCPU>() % 16 == 0);
162 const_assert!(std::mem::size_of::<PerFrameCamCPU>() % 16 == 0);
163 const_assert!(std::mem::size_of::<PerFrameLightCPU>() % 16 == 0);
164 const_assert!(std::mem::size_of::<PerFrameParamsCPU>() % 16 == 0);
165
166 let per_frame_uniforms = PerFrameUniforms::new(gpu);
167
168 let mipmapper = Some(RenderMipmapGenerator::new_with_format_hints(
169 gpu.device(),
170 &[
171 wgpu::TextureFormat::Rgba8Unorm, //for normal maps
172 wgpu::TextureFormat::Rgba8UnormSrgb, //for diffuse maps
173 wgpu::TextureFormat::R8Unorm, //for roughness maps
174 ],
175 ));
176
177 let command_buffer = CommandBuffer::new();
178
179 let staging_buffer = if params.preallocated_staging_buffer_bytes != 0 {
180 info!(
181 "Using preallocated staging buffer with {} MB",
182 params.preallocated_staging_buffer_bytes / (1024 * 1024)
183 );
184 Some(Buffer::new_empty(
185 gpu.device(),
186 wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::MAP_WRITE,
187 Some("gloss_staging_buffer"),
188 align_usz(params.preallocated_staging_buffer_bytes as usize, 256),
189 ))
190 } else {
191 None
192 };
193
194 Self {
195 per_frame_uniforms,
196 mipmapper,
197 command_buffer,
198 staging_buffer,
199 #[cfg(target_arch = "wasm32")]
200 diffuse_receiver: None,
201 #[cfg(target_arch = "wasm32")]
202 normal_receiver: None,
203 #[cfg(target_arch = "wasm32")]
204 roughness_receiver: None,
205 }
206 }
207
208 pub fn run(&mut self, gpu: &Gpu, camera: &Camera, scene: &mut Scene, render_params: &RenderConfig) -> &PerFrameUniforms {
209 //upload each component (all of these are needed for the mesh)
210 self.upload_v(gpu, scene);
211 self.upload_e(gpu, scene);
212 self.upload_f(gpu, scene);
213 self.upload_uv(gpu, scene);
214 self.upload_nv(gpu, scene);
215 self.upload_t(gpu, scene);
216 self.upload_c(gpu, scene);
217 self.upload_textures(gpu, scene);
218
219 self.upload_scene(gpu, scene);
220 self.upload_cam(gpu, camera, scene);
221 self.upload_lights(gpu, scene);
222 self.upload_params(gpu, scene, render_params);
223
224 &self.per_frame_uniforms
225 }
226
227 pub fn upload_textures(&mut self, gpu: &Gpu, scene: &mut Scene) {
228 #[cfg(target_arch = "wasm32")]
229 self.process_completed_texture_uploads(scene);
230
231 self.upload_texture::<DiffuseUploadable>(gpu, scene);
232 self.upload_texture::<NormalUploadable>(gpu, scene);
233 self.upload_texture::<RoughnessUploadable>(gpu, scene);
234 self.upload_environment_map(gpu, scene);
235 }
236
237 #[allow(clippy::unnecessary_unwrap)] //I know it's unnecesary but it makes everything more compact the two cases
238 // more explicit
239 fn upload_dynamic_vertex_atrib<T, C: DynamicTensorOps<T> + Component, G: GpuAtrib + Component>(
240 &mut self,
241 entity: Entity,
242 atrib: &C,
243 atrib_gpu: Option<&mut G>,
244 gpu: &Gpu,
245 additional_usage: wgpu::BufferUsages, // scene: &mut Scene,
246 label: &str,
247 ) {
248 // TODO: If DynamicTensor of Wgpu backend, do a direct buffer to buffer copy
249 let verts_bytes = atrib.as_bytes();
250 let size_bytes = verts_bytes.len();
251 if atrib_gpu.is_none() || atrib_gpu.as_ref().unwrap().data_ref().size() != std::convert::TryInto::<u64>::try_into(size_bytes).unwrap() {
252 // Allocate new memory for the GPU buffer if it doesn't exist or size has
253 // changed
254 let desc = wgpu::util::BufferInitDescriptor {
255 label: Some(label),
256 contents: &verts_bytes, // Use the raw data directly
257 usage: additional_usage | wgpu::BufferUsages::COPY_DST,
258 };
259
260 let buf: wgpu::Buffer = gpu.device().create_buffer_init(&desc);
261
262 // Insert the new GPU buffer component into the entity
263 self.command_buffer
264 .insert_one(entity, G::new_from(buf, u32::try_from(atrib.nrows()).unwrap()));
265 } else {
266 gpu.queue().write_buffer(
267 atrib_gpu.unwrap().data_ref(),
268 0,
269 &verts_bytes, // Use the raw data directly
270 );
271 }
272 }
273
274 /// Functions for uploading each component of the mesh
275 fn upload_v(&mut self, gpu: &Gpu, scene: &mut Scene) {
276 let query = scene
277 .world
278 .query_mut::<(&Verts, Option<&mut VertsGPU>, Changed<Verts>)>()
279 .with::<&Renderable>();
280 let usage = wgpu::BufferUsages::VERTEX;
281
282 for (ent, (verts, mut verts_gpu, changed_verts)) in query {
283 if changed_verts {
284 self.upload_dynamic_vertex_atrib(ent, &verts.0, verts_gpu.as_deref_mut(), gpu, usage, "verts");
285 }
286 }
287 self.command_buffer.run_on(&mut scene.world);
288 }
289
290 fn upload_e(&mut self, gpu: &Gpu, scene: &mut Scene) {
291 let query = scene
292 .world
293 .query_mut::<(
294 &Verts,
295 &Edges,
296 Option<&mut EdgesV1GPU>,
297 Option<&mut EdgesV2GPU>,
298 Changed<Verts>,
299 Changed<Edges>,
300 )>()
301 .with::<&Renderable>();
302
303 let usage = wgpu::BufferUsages::VERTEX;
304 for (ent, (verts, edges, mut edges_v1_gpu, mut edges_v2_gpu, changed_verts, changed_edges)) in query {
305 if changed_verts || changed_edges {
306 let edges_v1_mat = index_vertices_from_edges(&verts.0.to_dmatrix(), &edges.0.to_dmatrix(), 0);
307 let edges_v2_mat = index_vertices_from_edges(&verts.0.to_dmatrix(), &edges.0.to_dmatrix(), 1);
308
309 let edges_v1_mat_tensor = DynamicTensorFloat2D::from_dmatrix(&edges_v1_mat);
310 let edges_v2_mat_tensor = DynamicTensorFloat2D::from_dmatrix(&edges_v2_mat);
311 let edges_v1 = EdgesV1(edges_v1_mat_tensor);
312 let edges_v2 = EdgesV2(edges_v2_mat_tensor);
313
314 self.upload_dynamic_vertex_atrib(ent, &edges_v1.0, edges_v1_gpu.as_deref_mut(), gpu, usage, "edges_v1");
315 self.upload_dynamic_vertex_atrib(ent, &edges_v2.0, edges_v2_gpu.as_deref_mut(), gpu, usage, "edges_v2");
316 }
317 }
318 self.command_buffer.run_on(&mut scene.world);
319 }
320
321 fn upload_f(&mut self, gpu: &Gpu, scene: &mut Scene) {
322 let query = scene
323 .world
324 .query_mut::<(&Faces, Option<&mut FacesGPU>, Changed<Faces>)>()
325 .with::<&Renderable>();
326 let usage = wgpu::BufferUsages::INDEX;
327 for (ent, (faces, mut faces_gpu, changed_faces)) in query {
328 if changed_faces {
329 self.upload_dynamic_vertex_atrib(ent, &faces.0, faces_gpu.as_deref_mut(), gpu, usage, "faces");
330 }
331 }
332 self.command_buffer.run_on(&mut scene.world);
333 }
334 fn upload_uv(&mut self, gpu: &Gpu, scene: &mut Scene) {
335 let query = scene.world.query_mut::<(&UVs, Option<&mut UVsGPU>, Changed<UVs>)>().with::<&Renderable>();
336 let usage = wgpu::BufferUsages::VERTEX;
337 for (ent, (uvs, mut uvs_gpu, changed_uvs)) in query {
338 if changed_uvs {
339 self.upload_dynamic_vertex_atrib(ent, &uvs.0, uvs_gpu.as_deref_mut(), gpu, usage, "uv");
340 }
341 }
342 self.command_buffer.run_on(&mut scene.world);
343 }
344
345 fn upload_nv(&mut self, gpu: &Gpu, scene: &mut Scene) {
346 let query = scene
347 .world
348 .query_mut::<(&Normals, Option<&mut NormalsGPU>, Changed<Normals>)>()
349 .with::<&Renderable>();
350 let usage = wgpu::BufferUsages::VERTEX;
351 for (ent, (normals, mut normals_gpu, changed_normals)) in query {
352 if changed_normals {
353 self.upload_dynamic_vertex_atrib(ent, &normals.0, normals_gpu.as_deref_mut(), gpu, usage, "normals");
354 }
355 }
356 self.command_buffer.run_on(&mut scene.world);
357 }
358
359 fn upload_t(&mut self, gpu: &Gpu, scene: &mut Scene) {
360 let query = scene
361 .world
362 .query_mut::<(&Tangents, Option<&mut TangentsGPU>, Changed<Tangents>)>()
363 .with::<&Renderable>();
364 let usage = wgpu::BufferUsages::VERTEX;
365 for (ent, (tangents, mut tangents_gpu, changed_tangents)) in query {
366 if changed_tangents {
367 self.upload_dynamic_vertex_atrib(ent, &tangents.0, tangents_gpu.as_deref_mut(), gpu, usage, "tangents");
368 }
369 }
370 self.command_buffer.run_on(&mut scene.world);
371 }
372
373 fn upload_c(&mut self, gpu: &Gpu, scene: &mut Scene) {
374 let query = scene
375 .world
376 .query_mut::<(&Colors, Option<&mut ColorsGPU>, Changed<Colors>)>()
377 .with::<&Renderable>();
378 let usage = wgpu::BufferUsages::VERTEX;
379 for (ent, (colors, mut colors_gpu, changed_colors)) in query {
380 if changed_colors {
381 self.upload_dynamic_vertex_atrib(ent, &colors.0, colors_gpu.as_deref_mut(), gpu, usage, "colors");
382 }
383 }
384 self.command_buffer.run_on(&mut scene.world);
385 }
386
387 #[cfg(target_arch = "wasm32")]
388 // Insert textures to entity once they are completed and clear the receiver
389 fn process_completed_texture_uploads(&mut self, scene: &mut Scene) {
390 fn process_texture_receiver<T: TextureUploadable>(
391 texture_receiver: &mut Option<mpsc::Receiver<(Entity, T::Tex)>>,
392 command_buffer: &mut CommandBuffer,
393 ) {
394 if let Some(recv) = texture_receiver {
395 let mut processed = false;
396 while let Ok((entity, texture)) = recv.try_recv() {
397 command_buffer.insert_one(entity, texture);
398 processed = true;
399 }
400 if processed {
401 *texture_receiver = None;
402 }
403 }
404 }
405
406 process_texture_receiver::<DiffuseUploadable>(&mut self.diffuse_receiver, &mut self.command_buffer);
407 process_texture_receiver::<NormalUploadable>(&mut self.normal_receiver, &mut self.command_buffer);
408 process_texture_receiver::<RoughnessUploadable>(&mut self.roughness_receiver, &mut self.command_buffer);
409
410 self.command_buffer.run_on(&mut scene.world);
411 }
412
413 #[allow(clippy::too_many_lines)]
414 fn upload_texture<T: TextureUploadable>(&mut self, gpu: &Gpu, scene: &mut Scene) {
415 let mut modified_entities = Vec::new();
416 {
417 let mut query = scene
418 .world
419 .query::<(&mut T::Img, Option<&mut T::Tex>, Changed<T::Img>)>()
420 .with::<&Renderable>();
421
422 for (entity, (mut img, tex_opt, changed_img)) in query.iter() {
423 if changed_img && img.generic_img().cpu_img.is_some() {
424 debug!("{} changed for entity {entity:?}", T::tex_name());
425 let nr_channels = img.generic_img().img_ref().color().channel_count();
426 if nr_channels != 4 {
427 warn!("unoptimal use of memory: diffuse does not have 4 channels, it has {nr_channels}");
428 }
429 modified_entities.push(entity);
430 let is_srgb = T::is_srgb(); //only true for diffuse since they are in srgb space in the png but we want to sample linear colors
431 let keep_on_cpu = img.generic_img().config.keep_on_cpu;
432
433 #[cfg(not(target_arch = "wasm32"))]
434 let staging_buffer = if img.generic_img().config.fast_upload {
435 None
436 } else {
437 //using slow upload through a preallocated staging buffer
438 if self.staging_buffer.is_none() {
439 warn!("The diffuse image is set to slow upload which would require a preallocated staging buffer. However no bytes have been allocated for it. Check the config.toml for the preallocated_staging_buffer. Now we default to fast upload through wgpu staging buffer which might use more memory than necessary.");
440 }
441 self.staging_buffer.as_ref()
442 };
443
444 //either create a new tex or update the existing one
445 let mut tex_uploaded = false;
446 #[allow(unused_mut)]
447 if let Some(mut existing_tex) = tex_opt {
448 let new_tex_extent = Texture::extent_from_img(img.generic_img().img_ref());
449 let new_tex_format = Texture::format_from_img(img.generic_img().img_ref(), is_srgb);
450 let old_tex_extent = existing_tex.texture().extent();
451 let old_format = existing_tex.texture().texture.format();
452 if new_tex_format == old_format && new_tex_extent == old_tex_extent {
453 debug!("reusing diffuse tex");
454
455 #[cfg(not(target_arch = "wasm32"))]
456 {
457 existing_tex
458 .texture_mut()
459 .update_from_img(
460 img.generic_img().img_ref(),
461 gpu.device(),
462 gpu.queue(),
463 is_srgb,
464 img.generic_img().config.generate_mipmaps,
465 img.generic_img().config.mipmap_generation_cpu,
466 staging_buffer,
467 self.mipmapper.as_ref(),
468 )
469 .block_on()
470 .unwrap();
471 }
472
473 #[cfg(target_arch = "wasm32")]
474 {
475 // We can safely clone a lot of the wgpu types because internally they are behind arcs
476 let img_clone = img.generic_img().img_ref().clone();
477 let device = gpu.device().clone();
478 let queue = gpu.queue().clone();
479 let generate_mipmaps = img.generic_img().config.generate_mipmaps;
480 let mipmap_generation_cpu = img.generic_img().config.mipmap_generation_cpu;
481 let mipmapper_clone = self.mipmapper.clone();
482 let mut existing_tex_clone = existing_tex.clone();
483
484 wasm_bindgen_futures::spawn_local(async move {
485 match existing_tex_clone
486 .texture_mut()
487 .update_from_img(
488 &img_clone,
489 &device,
490 &queue,
491 is_srgb,
492 generate_mipmaps,
493 mipmap_generation_cpu,
494 None, //TODO: Forcing fast upload on WASM to avoid parking issues, look into fixes for this
495 mipmapper_clone.as_ref(),
496 )
497 .await
498 {
499 Ok(_) => {}
500 Err(e) => {
501 log::error!("Texture update failed: {:?}", e);
502 }
503 }
504 });
505 }
506
507 tex_uploaded = true;
508 }
509 }
510 //we create a new one if we couldn't update an existing one
511 if !tex_uploaded {
512 #[cfg(not(target_arch = "wasm32"))]
513 {
514 let tex = Texture::from_img(
515 img.generic_img().img_ref(),
516 gpu.device(),
517 gpu.queue(),
518 is_srgb,
519 img.generic_img().config.generate_mipmaps,
520 img.generic_img().config.mipmap_generation_cpu,
521 staging_buffer,
522 self.mipmapper.as_ref(),
523 )
524 .block_on()
525 .unwrap();
526 self.command_buffer.insert_one(entity, T::new_tex(tex));
527 }
528 #[cfg(target_arch = "wasm32")]
529 {
530 let (sender, receiver) = mpsc::channel();
531 let texture_receiver = T::texture_receiver(self);
532 if texture_receiver.is_none() {
533 *texture_receiver = Some(receiver);
534 }
535
536 let img_clone = img.generic_img().img_ref().clone();
537 let device = gpu.device().clone();
538 let queue = gpu.queue().clone();
539 let generate_mipmaps = img.generic_img().config.generate_mipmaps;
540 let mipmap_generation_cpu = img.generic_img().config.mipmap_generation_cpu;
541 let mipmapper_clone = self.mipmapper.clone();
542
543 wasm_bindgen_futures::spawn_local(async move {
544 match Texture::from_img(
545 &img_clone,
546 &device,
547 &queue,
548 is_srgb,
549 generate_mipmaps,
550 mipmap_generation_cpu,
551 None, //TODO: Forcing fast upload on WASM to avoid parking issues, look into fixes for this
552 mipmapper_clone.as_ref(),
553 )
554 .await
555 {
556 Ok(texture) => {
557 let _ = sender.send((entity, T::new_tex(texture)));
558 }
559 Err(e) => {
560 log::error!("Texture creation failed: {:?}", e);
561 }
562 }
563 });
564 }
565 }
566
567 if !keep_on_cpu {
568 let _ = img.generic_img_mut().cpu_img.take();
569 }
570 }
571 }
572
573 //set those meshes to actually visualize the mesh
574 for entity in modified_entities {
575 // let mut vis_mesh = scene.get_comp::<&mut VisMesh>(&entity);
576 if let Ok(mut vis_mesh) = scene.get_comp::<&mut VisMesh>(&entity) {
577 if vis_mesh.added_automatically {
578 vis_mesh.color_type = MeshColorType::Texture;
579 }
580 }
581 }
582 }
583 self.command_buffer.run_on(&mut scene.world);
584 }
585
586 // #[allow(clippy::too_many_lines)]
587 // fn upload_diffuse_tex(&mut self, gpu: &Gpu, scene: &mut Scene) {
588 // let mut modified_entities = Vec::new();
589 // {
590 // let mut query = scene
591 // .world
592 // .query::<(&mut DiffuseImg, Option<&mut DiffuseTex>, Changed<DiffuseImg>)>()
593 // .with::<&Renderable>();
594
595 // for (entity, (mut img, tex_opt, changed_img)) in query.iter() {
596 // if changed_img && img.generic_img.cpu_img.is_some() {
597 // debug!("DiffuseImg changed for entity {entity:?}");
598 // let nr_channels = img.generic_img.img_ref().color().channel_count();
599 // if nr_channels != 4 {
600 // warn!("unoptimal use of memory: diffuse does not have 4 channels, it has {nr_channels}");
601 // }
602 // modified_entities.push(entity);
603 // let is_srgb = true; //only true for diffuse since they are in srgb space in the png but we want to sample linear colors
604 // let keep_on_cpu = img.generic_img.config.keep_on_cpu;
605
606 // #[cfg(not(target_arch = "wasm32"))]
607 // let staging_buffer = if img.generic_img.config.fast_upload {
608 // None
609 // } else {
610 // //using slow upload through a preallocated staging buffer
611 // if self.staging_buffer.is_none() {
612 // warn!("The diffuse image is set to slow upload which would require a preallocated staging buffer. However no bytes have been allocated for it. Check the config.toml for the preallocated_staging_buffer. Now we default to fast upload through wgpu staging buffer which might use more memory than necessary.");
613 // }
614 // self.staging_buffer.as_ref()
615 // };
616
617 // //either create a new tex or update the existing one
618 // let mut tex_uploaded = false;
619 // #[allow(unused_mut)]
620 // if let Some(mut existing_tex) = tex_opt {
621 // let new_tex_extent = Texture::extent_from_img(img.generic_img.img_ref());
622 // let new_tex_format = Texture::format_from_img(img.generic_img.img_ref(), is_srgb);
623 // let old_tex_extent = existing_tex.0.extent();
624 // let old_format = existing_tex.0.texture.format();
625 // if new_tex_format == old_format && new_tex_extent == old_tex_extent {
626 // debug!("reusing diffuse tex");
627
628 // #[cfg(not(target_arch = "wasm32"))]
629 // {
630 // existing_tex
631 // .0
632 // .update_from_img(
633 // img.generic_img.img_ref(),
634 // gpu.device(),
635 // gpu.queue(),
636 // is_srgb,
637 // img.generic_img.config.generate_mipmaps,
638 // img.generic_img.config.mipmap_generation_cpu,
639 // staging_buffer,
640 // self.mipmapper.as_ref(),
641 // )
642 // .block_on()
643 // .unwrap();
644 // }
645
646 // #[cfg(target_arch = "wasm32")]
647 // {
648 // let (_sender, receiver) = mpsc::channel();
649 // if self.diffuse_receiver.is_none() {
650 // self.diffuse_receiver = Some(receiver);
651 // }
652 // // We can safely clone a lot of the wgpu types because internally they are behind arcs
653 // let img_clone = img.generic_img.img_ref().clone();
654 // let device = gpu.device().clone();
655 // let queue = gpu.queue().clone();
656 // let generate_mipmaps = img.generic_img.config.generate_mipmaps;
657 // let mipmap_generation_cpu = img.generic_img.config.mipmap_generation_cpu;
658 // let mipmapper_clone = self.mipmapper.clone();
659 // let mut existing_tex_clone = existing_tex.clone();
660
661 // wasm_bindgen_futures::spawn_local(async move {
662 // match existing_tex_clone
663 // .0
664 // .update_from_img(
665 // &img_clone,
666 // &device,
667 // &queue,
668 // is_srgb,
669 // generate_mipmaps,
670 // mipmap_generation_cpu,
671 // None, //TODO: Forcing fast upload on WASM to avoid parking issues, look into fixes for this
672 // mipmapper_clone.as_ref(),
673 // )
674 // .await
675 // {
676 // Ok(_) => {}
677 // Err(e) => {
678 // log::error!("Texture update failed: {:?}", e);
679 // }
680 // }
681 // });
682
683 // // Mark entity as having pending upload
684 // // self.command_buffer.insert_one(entity, PendingDiffuseUpload);
685 // }
686
687 // tex_uploaded = true;
688 // }
689 // }
690 // //we create a new one if we couldn't update an existing one
691 // if !tex_uploaded {
692 // #[cfg(not(target_arch = "wasm32"))]
693 // {
694 // let tex = Texture::from_img(
695 // img.generic_img.img_ref(),
696 // gpu.device(),
697 // gpu.queue(),
698 // is_srgb,
699 // img.generic_img.config.generate_mipmaps,
700 // img.generic_img.config.mipmap_generation_cpu,
701 // staging_buffer,
702 // self.mipmapper.as_ref(),
703 // )
704 // .block_on()
705 // .unwrap();
706 // self.command_buffer.insert_one(entity, DiffuseTex(tex));
707 // }
708 // #[cfg(target_arch = "wasm32")]
709 // {
710 // let (sender, receiver) = mpsc::channel();
711 // if self.diffuse_receiver.is_none() {
712 // self.diffuse_receiver = Some(receiver);
713 // }
714
715 // let img_clone = img.generic_img.img_ref().clone();
716 // let device = gpu.device().clone();
717 // let queue = gpu.queue().clone();
718 // let generate_mipmaps = img.generic_img.config.generate_mipmaps;
719 // let mipmap_generation_cpu = img.generic_img.config.mipmap_generation_cpu;
720 // let mipmapper_clone = self.mipmapper.clone();
721
722 // wasm_bindgen_futures::spawn_local(async move {
723 // match Texture::from_img(
724 // &img_clone,
725 // &device,
726 // &queue,
727 // is_srgb,
728 // generate_mipmaps,
729 // mipmap_generation_cpu,
730 // None, //TODO: Forcing fast upload on WASM to avoid parking issues, look into fixes for this
731 // mipmapper_clone.as_ref(),
732 // )
733 // .await
734 // {
735 // Ok(texture) => {
736 // let _ = sender.send((entity, DiffuseTex(texture)));
737 // }
738 // Err(e) => {
739 // log::error!("Texture creation failed: {:?}", e);
740 // }
741 // }
742 // });
743
744 // // Mark entity as having pending upload
745 // // self.command_buffer.insert_one(entity, PendingDiffuseUpload);
746 // }
747 // }
748
749 // if !keep_on_cpu {
750 // // self.command_buffer.remove_one::<DiffuseImg>(entity);
751 // let _ = img.generic_img.cpu_img.take();
752 // }
753 // }
754 // }
755
756 // //set those meshes to actually visualize the mesh
757 // for entity in modified_entities {
758 // // let mut vis_mesh = scene.get_comp::<&mut VisMesh>(&entity);
759 // if let Ok(mut vis_mesh) = scene.get_comp::<&mut VisMesh>(&entity) {
760 // if vis_mesh.added_automatically {
761 // vis_mesh.color_type = MeshColorType::Texture;
762 // }
763 // }
764 // }
765 // }
766 // self.command_buffer.run_on(&mut scene.world);
767 // }
768
769 // #[allow(clippy::too_many_lines)]
770 // fn upload_normal_tex(&mut self, gpu: &Gpu, scene: &mut Scene) {
771 // let mut modified_entities = Vec::new();
772 // {
773 // let mut query = scene
774 // .world
775 // .query::<(&mut NormalImg, Option<&mut NormalTex>, Changed<NormalImg>)>()
776 // .with::<&Renderable>();
777 // for (entity, (mut img, tex_opt, changed_img)) in query.iter() {
778 // if changed_img && img.generic_img.cpu_img.is_some() {
779 // debug!("NormalImg changed for entity {entity:?}");
780 // let nr_channels = img.generic_img.img_ref().color().channel_count();
781 // if nr_channels != 4 {
782 // warn!("unoptimal use of memory: normal does not have 4 channels, it has {nr_channels}");
783 // }
784 // modified_entities.push(entity);
785 // let is_srgb = false; //only true for diffuse since they are in srgb space in the png but we want to sample linear colors
786 // let keep_on_cpu = img.generic_img.config.keep_on_cpu;
787
788 // #[cfg(not(target_arch = "wasm32"))]
789 // let staging_buffer = if img.generic_img.config.fast_upload {
790 // None
791 // } else {
792 // //using slow upload through a preallocated staging buffer
793 // if self.staging_buffer.is_none() {
794 // warn!("The normal image is set to slow upload which would require a preallocated staging buffer. However no bytes have been allocated for it. Check the config.toml for the preallocated_staging_buffer. Now we default to fast upload through wgpu staging buffer which might use more memory than necessary.");
795 // }
796 // self.staging_buffer.as_ref()
797 // };
798
799 // //either create a new tex or update the existing one
800 // let mut tex_uploaded = false;
801 // #[allow(unused_mut)]
802 // if let Some(mut existing_tex) = tex_opt {
803 // let new_tex_extent = Texture::extent_from_img(img.generic_img.img_ref());
804 // let new_tex_format = Texture::format_from_img(img.generic_img.img_ref(), is_srgb);
805 // let old_tex_extent = existing_tex.0.extent();
806 // let old_format = existing_tex.0.texture.format();
807 // if new_tex_format == old_format && new_tex_extent == old_tex_extent {
808 // debug!("reusing normal tex");
809
810 // #[cfg(not(target_arch = "wasm32"))]
811 // {
812 // existing_tex
813 // .0
814 // .update_from_img(
815 // img.generic_img.img_ref(),
816 // gpu.device(),
817 // gpu.queue(),
818 // is_srgb,
819 // img.generic_img.config.generate_mipmaps,
820 // img.generic_img.config.mipmap_generation_cpu,
821 // staging_buffer,
822 // self.mipmapper.as_ref(),
823 // )
824 // .block_on()
825 // .unwrap();
826 // }
827
828 // #[cfg(target_arch = "wasm32")]
829 // {
830 // let (_sender, receiver) = mpsc::channel();
831 // if self.normal_receiver.is_none() {
832 // self.normal_receiver = Some(receiver);
833 // }
834 // // We can safely clone a lot of the wgpu types because internally they are behind arcs
835 // let img_clone = img.generic_img.img_ref().clone();
836 // let device = gpu.device().clone();
837 // let queue = gpu.queue().clone();
838 // let generate_mipmaps = img.generic_img.config.generate_mipmaps;
839 // let mipmap_generation_cpu = img.generic_img.config.mipmap_generation_cpu;
840 // let mipmapper_clone = self.mipmapper.clone();
841 // let mut existing_tex_clone = existing_tex.clone();
842
843 // wasm_bindgen_futures::spawn_local(async move {
844 // match existing_tex_clone
845 // .0
846 // .update_from_img(
847 // &img_clone,
848 // &device,
849 // &queue,
850 // is_srgb,
851 // generate_mipmaps,
852 // mipmap_generation_cpu,
853 // None, //TODO: Forcing fast upload on WASM to avoid parking issues, look into fixes for this
854 // mipmapper_clone.as_ref(),
855 // )
856 // .await
857 // {
858 // Ok(_) => {}
859 // Err(e) => {
860 // log::error!("Texture update failed: {:?}", e);
861 // }
862 // }
863 // });
864
865 // // Mark entity as having pending upload
866 // // self.command_buffer.insert_one(entity, PendingNormalUpload);
867 // }
868
869 // tex_uploaded = true;
870 // }
871 // }
872 // //we create a new one if we couldn't update an existing one
873 // if !tex_uploaded {
874 // #[cfg(not(target_arch = "wasm32"))]
875 // {
876 // let tex = Texture::from_img(
877 // img.generic_img.img_ref(),
878 // gpu.device(),
879 // gpu.queue(),
880 // is_srgb,
881 // img.generic_img.config.generate_mipmaps,
882 // img.generic_img.config.mipmap_generation_cpu,
883 // staging_buffer,
884 // self.mipmapper.as_ref(),
885 // )
886 // .block_on()
887 // .unwrap();
888 // self.command_buffer.insert_one(entity, NormalTex(tex));
889 // }
890
891 // #[cfg(target_arch = "wasm32")]
892 // {
893 // let (sender, receiver) = mpsc::channel();
894 // if self.normal_receiver.is_none() {
895 // self.normal_receiver = Some(receiver);
896 // }
897
898 // let img_clone = img.generic_img.img_ref().clone();
899 // let device = gpu.device().clone();
900 // let queue = gpu.queue().clone();
901 // let generate_mipmaps = img.generic_img.config.generate_mipmaps;
902 // let mipmap_generation_cpu = img.generic_img.config.mipmap_generation_cpu;
903 // let mipmapper_clone = self.mipmapper.clone();
904
905 // wasm_bindgen_futures::spawn_local(async move {
906 // match Texture::from_img(
907 // &img_clone,
908 // &device,
909 // &queue,
910 // is_srgb,
911 // generate_mipmaps,
912 // mipmap_generation_cpu,
913 // None, //TODO: Forcing fast upload on WASM to avoid parking issues, look into fixes for this
914 // mipmapper_clone.as_ref(),
915 // )
916 // .await
917 // {
918 // Ok(texture) => {
919 // let _ = sender.send((entity, NormalTex(texture)));
920 // }
921 // Err(e) => {
922 // log::error!("Texture creation failed: {:?}", e);
923 // }
924 // }
925 // });
926
927 // // Mark entity as having pending upload
928 // // self.command_buffer.insert_one(entity, PendingNormalUpload);
929 // }
930 // }
931
932 // if !keep_on_cpu {
933 // // self.command_buffer.remove_one::<NormalImg>(entity);
934 // let _ = img.generic_img.cpu_img.take();
935 // }
936 // }
937 // }
938
939 // //set those meshes to actually visualize the mesh
940 // for entity in modified_entities {
941 // if let Ok(mut vis_mesh) = scene.get_comp::<&mut VisMesh>(&entity) {
942 // if vis_mesh.added_automatically {
943 // vis_mesh.color_type = MeshColorType::Texture;
944 // }
945 // }
946 // }
947 // }
948
949 // self.command_buffer.run_on(&mut scene.world);
950 // }
951
952 // #[allow(clippy::too_many_lines)]
953 // fn upload_roughness_tex(&mut self, gpu: &Gpu, scene: &mut Scene) {
954 // let mut modified_entities = Vec::new();
955 // {
956 // let mut query = scene
957 // .world
958 // .query::<(&mut RoughnessImg, Option<&mut RoughnessTex>, Changed<RoughnessImg>)>()
959 // .with::<&Renderable>();
960 // for (entity, (mut img, tex_opt, changed_img)) in query.iter() {
961 // if changed_img && img.generic_img.cpu_img.is_some() {
962 // debug!("RoughnessImg changed for entity {entity:?}");
963 // let nr_channels = img.generic_img.img_ref().color().channel_count();
964 // if nr_channels != 1 {
965 // warn!("unoptimal use of memory: roughness does not have 1 channels, it has {nr_channels}");
966 // }
967 // modified_entities.push(entity);
968 // let is_srgb = false; //only true for diffuse since they are in srgb space in the png but we want to
969 // // sample linear colors
970 // let keep_on_cpu = img.generic_img.config.keep_on_cpu;
971
972 // #[cfg(not(target_arch = "wasm32"))]
973 // let staging_buffer = if img.generic_img.config.fast_upload {
974 // None
975 // } else {
976 // //using slow upload through a preallocated staging buffer
977 // if self.staging_buffer.is_none() {
978 // warn!("The roughness image is set to slow upload which would require a preallocated staging buffer. However no bytes have been allocated for it. Check the config.toml for the preallocated_staging_buffer. Now we default to fast upload through wgpu staging buffer which might use more memory than necessary.");
979 // }
980 // self.staging_buffer.as_ref()
981 // };
982
983 // //either create a new tex or update the existing one
984 // let mut tex_uploaded = false;
985 // #[allow(unused_mut)]
986 // if let Some(mut existing_tex) = tex_opt {
987 // let new_tex_extent = Texture::extent_from_img(img.generic_img.img_ref());
988 // let new_tex_format = Texture::format_from_img(img.generic_img.img_ref(), is_srgb);
989 // let old_tex_extent = existing_tex.0.extent();
990 // let old_format = existing_tex.0.texture.format();
991 // if new_tex_format == old_format && new_tex_extent == old_tex_extent {
992 // debug!("reusing roughness tex");
993
994 // #[cfg(not(target_arch = "wasm32"))]
995 // {
996 // existing_tex
997 // .0
998 // .update_from_img(
999 // img.generic_img.img_ref(),
1000 // gpu.device(),
1001 // gpu.queue(),
1002 // is_srgb,
1003 // img.generic_img.config.generate_mipmaps,
1004 // img.generic_img.config.mipmap_generation_cpu,
1005 // staging_buffer,
1006 // self.mipmapper.as_ref(),
1007 // )
1008 // .block_on()
1009 // .unwrap();
1010 // }
1011
1012 // #[cfg(target_arch = "wasm32")]
1013 // {
1014 // let (_sender, receiver) = mpsc::channel();
1015 // if self.roughness_receiver.is_none() {
1016 // self.roughness_receiver = Some(receiver);
1017 // }
1018 // // We can safely clone a lot of the wgpu types because internally they are behind arcs
1019 // let img_clone = img.generic_img.img_ref().clone();
1020 // let device = gpu.device().clone();
1021 // let queue = gpu.queue().clone();
1022 // let generate_mipmaps = img.generic_img.config.generate_mipmaps;
1023 // let mipmap_generation_cpu = img.generic_img.config.mipmap_generation_cpu;
1024 // let mipmapper_clone = self.mipmapper.clone();
1025 // let mut existing_tex_clone = existing_tex.clone();
1026
1027 // wasm_bindgen_futures::spawn_local(async move {
1028 // match existing_tex_clone
1029 // .0
1030 // .update_from_img(
1031 // &img_clone,
1032 // &device,
1033 // &queue,
1034 // is_srgb,
1035 // generate_mipmaps,
1036 // mipmap_generation_cpu,
1037 // None, //TODO: Forcing fast upload on WASM to avoid parking issues, look into fixes for this
1038 // mipmapper_clone.as_ref(),
1039 // )
1040 // .await
1041 // {
1042 // Ok(_) => {}
1043 // Err(e) => {
1044 // log::error!("Texture update failed: {:?}", e);
1045 // }
1046 // }
1047 // });
1048
1049 // // Mark entity as having pending upload
1050 // // self.command_buffer.insert_one(entity, PendingRoughnessUpload);
1051 // }
1052
1053 // tex_uploaded = true;
1054 // }
1055 // }
1056 // //we create a new one if we couldn't update an existing one
1057 // if !tex_uploaded {
1058 // #[cfg(not(target_arch = "wasm32"))]
1059 // {
1060 // let tex = Texture::from_img(
1061 // img.generic_img.img_ref(),
1062 // gpu.device(),
1063 // gpu.queue(),
1064 // is_srgb,
1065 // img.generic_img.config.generate_mipmaps,
1066 // img.generic_img.config.mipmap_generation_cpu,
1067 // staging_buffer,
1068 // self.mipmapper.as_ref(),
1069 // )
1070 // .block_on()
1071 // .unwrap();
1072 // self.command_buffer.insert_one(entity, RoughnessTex(tex));
1073 // }
1074
1075 // #[cfg(target_arch = "wasm32")]
1076 // {
1077 // let (sender, receiver) = mpsc::channel();
1078 // if self.roughness_receiver.is_none() {
1079 // self.roughness_receiver = Some(receiver);
1080 // }
1081
1082 // let img_clone = img.generic_img.img_ref().clone();
1083 // let device = gpu.device().clone();
1084 // let queue = gpu.queue().clone();
1085 // let generate_mipmaps = img.generic_img.config.generate_mipmaps;
1086 // let mipmap_generation_cpu = img.generic_img.config.mipmap_generation_cpu;
1087 // let mipmapper_clone = self.mipmapper.clone();
1088
1089 // wasm_bindgen_futures::spawn_local(async move {
1090 // match Texture::from_img(
1091 // &img_clone,
1092 // &device,
1093 // &queue,
1094 // is_srgb,
1095 // generate_mipmaps,
1096 // mipmap_generation_cpu,
1097 // None, //TODO: Forcing fast upload on WASM to avoid parking issues, look into fixes for this
1098 // mipmapper_clone.as_ref(),
1099 // )
1100 // .await
1101 // {
1102 // Ok(texture) => {
1103 // let _ = sender.send((entity, RoughnessTex(texture)));
1104 // }
1105 // Err(e) => {
1106 // log::error!("Texture creation failed: {:?}", e);
1107 // }
1108 // }
1109 // });
1110
1111 // // Mark entity as having pending upload
1112 // // self.command_buffer.insert_one(entity, PendingRoughnessUpload);
1113 // }
1114 // }
1115
1116 // if !keep_on_cpu {
1117 // // self.command_buffer.remove_one::<RoughnessImg>(entity);
1118 // let _ = img.generic_img.cpu_img.take();
1119 // }
1120 // }
1121 // }
1122
1123 // //set those meshes to actually visualize the mesh
1124 // for entity in modified_entities {
1125 // if let Ok(mut vis_mesh) = scene.get_comp::<&mut VisMesh>(&entity) {
1126 // if vis_mesh.added_automatically {
1127 // vis_mesh.color_type = MeshColorType::Texture;
1128 // }
1129 // }
1130 // }
1131 // }
1132
1133 // self.command_buffer.run_on(&mut scene.world);
1134 // }
1135
1136 fn upload_environment_map(&mut self, gpu: &Gpu, scene: &mut Scene) {
1137 // if scene.has_resource::<EnvironmentMap>() {
1138 let query = scene.world.query_mut::<(&EnvironmentMap, Changed<EnvironmentMap>)>();
1139 for (entity, (env_map, changed_env)) in query {
1140 if changed_env {
1141 let diffue_raw_data = std::fs::read(env_map.diffuse_path.clone()).unwrap();
1142 let diffuse_reader = ktx2::Reader::new(diffue_raw_data.as_slice()).expect("Can't create diffuse_reader");
1143 let specular_raw_data = std::fs::read(env_map.specular_path.clone()).unwrap();
1144 let specular_reader = ktx2::Reader::new(specular_raw_data.as_slice()).expect("Can't create specular_reader");
1145
1146 let diffuse_tex = EnvironmentMapGpu::reader2texture(&diffuse_reader, gpu.device(), gpu.queue());
1147 let specular_tex = EnvironmentMapGpu::reader2texture(&specular_reader, gpu.device(), gpu.queue());
1148
1149 let env_map_gpu = EnvironmentMapGpu { diffuse_tex, specular_tex };
1150
1151 // scene.add_resource(env_map);
1152 self.command_buffer.insert_one(entity, env_map_gpu);
1153 }
1154 }
1155
1156 self.command_buffer.run_on(&mut scene.world);
1157 }
1158
1159 fn upload_scene(&mut self, gpu: &Gpu, scene: &mut Scene) {
1160 let entities_lights = scene.get_lights(false);
1161 let env_map = scene.get_resource::<&EnvironmentMapGpu>().unwrap();
1162 let environment_map_smallest_specular_mip_level = env_map.specular_tex.texture.mip_level_count() - 1;
1163
1164 let per_frame_scene_data = PerFrameSceneCPU {
1165 nr_lights: u32::try_from(entities_lights.len()).unwrap(),
1166 environment_map_smallest_specular_mip_level,
1167 pad_1: 0,
1168 pad_2: 0,
1169 };
1170
1171 self.per_frame_uniforms.scene_buf.push_cpu_chunk_packed(&per_frame_scene_data);
1172 self.per_frame_uniforms.scene_buf.upload_from_cpu_chunks(gpu.queue());
1173 self.per_frame_uniforms.scene_buf.reset_chunks_offset();
1174 }
1175
1176 fn upload_cam(&mut self, gpu: &Gpu, camera: &Camera, scene: &mut Scene) {
1177 let pos_lookat = if let Ok(pos_lookat) = scene.world.get::<&mut PosLookat>(camera.entity) {
1178 pos_lookat.clone()
1179 } else {
1180 PosLookat::default()
1181 };
1182
1183 let view_matrix = pos_lookat.view_matrix();
1184 let view_inv_matrix = pos_lookat.view_matrix_isometry().inverse().to_matrix();
1185
1186 //get projection info but also take into account that if there are no entities
1187 // yet in the scene, there is also no projection matrix so we just set some
1188 // reasonable defaults
1189 let proj_matrix;
1190 let near;
1191 let far;
1192 if scene.world.has::<Projection>(camera.entity).unwrap() {
1193 proj_matrix = camera.proj_matrix_reverse_z(scene);
1194 (near, far) = camera.near_far(scene);
1195 } else {
1196 let proj = ProjectionWithFov::default();
1197 proj_matrix = proj.proj_matrix_reverse_z();
1198 (near, far) = (proj.near, proj.far);
1199 }
1200 let (width, height) = camera.get_target_res(scene);
1201 let aspect_ratio = width as f32 / height as f32;
1202 let proj_inv_matrix = proj_matrix.try_inverse().unwrap();
1203
1204 let vp_matrix = proj_matrix * view_matrix;
1205 let pos_world = pos_lookat.position.coords;
1206
1207 #[allow(clippy::cast_precision_loss)]
1208 let per_frame_cam_data = PerFrameCamCPU {
1209 view_matrix,
1210 view_inv_matrix,
1211 proj_matrix,
1212 proj_inv_matrix,
1213 vp_matrix,
1214 pos_world,
1215 near,
1216 far,
1217 aspect_ratio,
1218 width: width as f32,
1219 height: height as f32,
1220 };
1221
1222 self.per_frame_uniforms.cam_buf.push_cpu_chunk_packed(&per_frame_cam_data);
1223 self.per_frame_uniforms.cam_buf.upload_from_cpu_chunks(gpu.queue());
1224 self.per_frame_uniforms.cam_buf.reset_chunks_offset();
1225 }
1226
1227 fn upload_lights(&mut self, gpu: &Gpu, scene: &mut Scene) {
1228 self.per_frame_uniforms.idx_ubo2light.clear();
1229
1230 let query = scene
1231 .world
1232 .query_mut::<(&Name, &PosLookat, &Projection, &LightEmit, Option<&ShadowCaster>)>();
1233 for (idx_light, (entity, (name, pos_lookat, proj, light_emit, shadow_caster))) in query.into_iter().enumerate() {
1234 let view_matrix = pos_lookat.view_matrix();
1235 let proj_matrix = match *proj {
1236 Projection::WithFov(ref proj) => proj.proj_matrix_reverse_z(),
1237 Projection::WithIntrinsics(_) => {
1238 panic!("We don't deal with light that have projection as intrinsics")
1239 }
1240 };
1241 let (near, far) = proj.near_far();
1242 let vp_matrix = proj_matrix * view_matrix;
1243 let pos_world = pos_lookat.position.coords;
1244 let lookat_dir_world = pos_lookat.direction();
1245
1246 let color = light_emit.color;
1247 let intensity = light_emit.intensity;
1248 let range = light_emit.range;
1249 let inverse_square_range = 1.0 / (range * range);
1250 let radius = light_emit.radius;
1251 let is_shadow_casting_bool = shadow_caster.is_some();
1252 let is_shadow_casting: u32 = u32::from(is_shadow_casting_bool);
1253
1254 let shadow_bias_fixed = if let Some(shadow_caster) = shadow_caster {
1255 shadow_caster.shadow_bias_fixed
1256 } else {
1257 0.0
1258 };
1259 let shadow_bias = if let Some(shadow_caster) = shadow_caster {
1260 shadow_caster.shadow_bias
1261 } else {
1262 0.0
1263 };
1264 let shadow_bias_normal = if let Some(shadow_caster) = shadow_caster {
1265 shadow_caster.shadow_bias_normal
1266 } else {
1267 0.0
1268 };
1269
1270 // let outer_angle = proj.fovy / 2.0; //we can use the fov as the angle because
1271 // we know the fov_y is the same as fov_x because the shadowmaps are always
1272 // square let outer_angle = 1.57; //we can use the fov as the angle
1273 // because we know the fov_y is the same as fov_x because the shadowmaps are
1274 // always square
1275 let outer_angle = light_emit.outer_angle;
1276 let inner_angle = light_emit.inner_angle;
1277
1278 //encase
1279 let per_frame_light_data = PerFrameLightCPU {
1280 view_matrix,
1281 proj_matrix,
1282 vp_matrix,
1283 pos_world,
1284 lookat_dir_world,
1285 color,
1286 intensity,
1287 range,
1288 inverse_square_range,
1289 radius,
1290 // spot_scale: 1.0,
1291 outer_angle,
1292 inner_angle,
1293 near,
1294 far,
1295 is_shadow_casting,
1296 shadow_bias_fixed,
1297 shadow_bias,
1298 shadow_bias_normal,
1299 pad_b: 1.0,
1300 pad_c: 1.0,
1301 pad_d: 1.0,
1302 };
1303
1304 //push packed because we will expose it as an array inside the shader
1305 self.per_frame_uniforms.lights_buf.push_cpu_chunk_packed(&per_frame_light_data);
1306
1307 //save also a mapping between light name and the idx in the whole light buffer
1308 self.per_frame_uniforms
1309 .light2idx_ubo
1310 .insert(name.0.clone(), u32::try_from(idx_light).unwrap());
1311 self.per_frame_uniforms.idx_ubo2light.push(entity);
1312 }
1313
1314 self.per_frame_uniforms.lights_buf.upload_from_cpu_chunks(gpu.queue());
1315 self.per_frame_uniforms.lights_buf.reset_chunks_offset();
1316 }
1317
1318 fn upload_params(&mut self, gpu: &Gpu, _scene: &mut Scene, render_params: &RenderConfig) {
1319 let per_frame_params_data = PerFrameParamsCPU {
1320 ambient_factor: render_params.ambient_factor,
1321 environment_factor: render_params.environment_factor,
1322 bg_color: render_params.bg_color,
1323 enable_distance_fade: u32::from(render_params.enable_distance_fade.unwrap_or(false)),
1324 distance_fade_center: render_params.distance_fade_center.unwrap_or_default().coords,
1325 distance_fade_start: render_params.distance_fade_start.unwrap_or(0.0),
1326 distance_fade_end: render_params.distance_fade_end.unwrap_or(0.0),
1327 apply_lighting: u32::from(render_params.apply_lighting),
1328 saturation: render_params.saturation,
1329 gamma: render_params.gamma,
1330 exposure: render_params.exposure,
1331 shadow_filter_method: render_params.shadow_filter_method as i32, // post_saturation: render_params.post_saturation,
1332 pad_b: 0.0,
1333 pad_c: 0.0,
1334 pad_d: 0.0,
1335 };
1336
1337 self.per_frame_uniforms.params_buf.push_cpu_chunk_packed(&per_frame_params_data);
1338 self.per_frame_uniforms.params_buf.upload_from_cpu_chunks(gpu.queue());
1339 self.per_frame_uniforms.params_buf.reset_chunks_offset();
1340 }
1341}
1342
1343#[repr(C)]
1344#[derive(Clone, Copy, encase::ShaderType)]
1345struct PerFrameSceneCPU {
1346 nr_lights: u32,
1347 environment_map_smallest_specular_mip_level: u32,
1348 //wasm needs padding to 16 bytes https://github.com/gfx-rs/wgpu/issues/2932
1349 pad_1: u32,
1350 pad_2: u32,
1351}
1352/// Contains camera data that will be sent to the GPU once a frame.
1353#[repr(C)]
1354#[derive(Clone, Copy, encase::ShaderType)]
1355struct PerFrameCamCPU {
1356 view_matrix: na::Matrix4<f32>,
1357 view_inv_matrix: na::Matrix4<f32>,
1358 proj_matrix: na::Matrix4<f32>,
1359 proj_inv_matrix: na::Matrix4<f32>,
1360 vp_matrix: na::Matrix4<f32>, /* proj*view //order matter because we multiply from the left this matrix so we first do the view_matrix and then
1361 * proj */
1362 pos_world: na::Vector3<f32>,
1363 near: f32,
1364 far: f32,
1365 aspect_ratio: f32,
1366 width: f32,
1367 height: f32,
1368}
1369/// Contains light data that will be sent to the GPU once a frame.
1370#[repr(C)]
1371#[derive(Clone, Copy, encase::ShaderType)]
1372// #[derive(Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
1373struct PerFrameLightCPU {
1374 view_matrix: na::Matrix4<f32>,
1375 proj_matrix: na::Matrix4<f32>,
1376 vp_matrix: na::Matrix4<f32>, /* proj*view //order matter because we multiply from the left this matrix so we first do the view_matrix and then
1377 * proj */
1378 pos_world: na::Vector3<f32>,
1379 lookat_dir_world: na::Vector3<f32>,
1380 color: na::Vector3<f32>,
1381 intensity: f32,
1382 range: f32,
1383 inverse_square_range: f32, //just 1/(range*range) because we don't want to compute this on gpu
1384 radius: f32,
1385 outer_angle: f32,
1386 inner_angle: f32,
1387 near: f32,
1388 far: f32,
1389 is_shadow_casting: u32, //should be bool but that is not host-sharable: https://www.w3.org/TR/WGSL/#host-shareable-types
1390 shadow_bias_fixed: f32,
1391 shadow_bias: f32,
1392 shadow_bias_normal: f32,
1393 //wasm needs padding to 16 bytes https://github.com/gfx-rs/wgpu/issues/2932
1394 pad_b: f32,
1395 pad_c: f32,
1396 pad_d: f32,
1397}
1398#[repr(C)]
1399#[derive(Clone, Copy, encase::ShaderType)]
1400struct PerFrameParamsCPU {
1401 ambient_factor: f32,
1402 environment_factor: f32,
1403 bg_color: na::Vector4<f32>,
1404 enable_distance_fade: u32,
1405 distance_fade_center: na::Vector3<f32>,
1406 distance_fade_start: f32,
1407 distance_fade_end: f32,
1408 //color grading, applied before tonemapping
1409 apply_lighting: u32,
1410 saturation: f32,
1411 gamma: f32,
1412 exposure: f32,
1413 shadow_filter_method: i32,
1414 // post_saturation: f32, //applied after tonemapping
1415 //wasm needs padding to 16 bytes https://github.com/gfx-rs/wgpu/issues/2932
1416 pad_b: f32,
1417 pad_c: f32,
1418 pad_d: f32,
1419}
1420
1421/// All the buffers that are the same for all meshes. Contains things like
1422/// camera parameters, lights, and global setting.
1423#[non_exhaustive]
1424pub struct PerFrameUniforms {
1425 scene_buf: Buffer, //group 0, binding 0
1426 cam_buf: Buffer, //group 0, binding 1
1427 lights_buf: Buffer, //group 0, binding 2
1428 params_buf: Buffer, //group 0, binding 3
1429
1430 #[allow(dead_code)]
1431 //storing the samplers is not needed since the bind group consumes them but it makes things more explicit
1432 sampler_nearest: wgpu::Sampler, //group 0, binding 4
1433 #[allow(dead_code)]
1434 sampler_linear: wgpu::Sampler, //group 0, binding 5
1435 #[allow(dead_code)]
1436 sampler_comparison: wgpu::Sampler, //group 0, binding 6
1437 //we save also the bind_group since we will not need to recreate it (the buffer will not be reallocated)
1438 //the layout we keep as a associated function because we want to call it without the object.
1439 pub bind_group: wgpu::BindGroup,
1440 //misc
1441 pub light2idx_ubo: HashMap<String, u32>,
1442 pub idx_ubo2light: Vec<Entity>,
1443}
1444impl PerFrameUniforms {
1445 pub fn new(gpu: &Gpu) -> Self {
1446 let scene_buf = Buffer::new_empty(
1447 gpu.device(),
1448 wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
1449 Some("global_scene_uniform"),
1450 align_usz(std::mem::size_of::<PerFrameSceneCPU>(), 256),
1451 );
1452 //allocate buffers on gpu to hold the corresponding cpu data
1453 let cam_buf = Buffer::new_empty(
1454 gpu.device(),
1455 wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
1456 Some("global_cam_uniform"),
1457 align_usz(std::mem::size_of::<PerFrameCamCPU>(), 256),
1458 );
1459 //allocate space fo MAX_NUM_LIGHTS lights
1460 let lights_buf = Buffer::new_empty(
1461 gpu.device(),
1462 wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
1463 // wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1464 Some("global_lights_uniform"),
1465 MAX_NUM_LIGHTS * align_usz(std::mem::size_of::<PerFrameLightCPU>(), 256),
1466 );
1467 let params_buf = Buffer::new_empty(
1468 gpu.device(),
1469 wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
1470 Some("global_params_uniform"),
1471 align_usz(std::mem::size_of::<PerFrameParamsCPU>(), 256),
1472 );
1473
1474 //samplers for nearest and linear
1475 let sampler_nearest = gpu.device().create_sampler(&wgpu::SamplerDescriptor {
1476 label: Some("sampler_nearest"),
1477 address_mode_u: wgpu::AddressMode::Repeat,
1478 address_mode_v: wgpu::AddressMode::Repeat,
1479 address_mode_w: wgpu::AddressMode::Repeat,
1480 min_filter: wgpu::FilterMode::Nearest,
1481 mag_filter: wgpu::FilterMode::Nearest,
1482 mipmap_filter: wgpu::FilterMode::Nearest,
1483 ..Default::default()
1484 });
1485 let sampler_linear = gpu.device().create_sampler(&wgpu::SamplerDescriptor {
1486 label: Some("sampler_linear"),
1487 address_mode_u: wgpu::AddressMode::Repeat,
1488 address_mode_v: wgpu::AddressMode::Repeat,
1489 address_mode_w: wgpu::AddressMode::Repeat,
1490 min_filter: wgpu::FilterMode::Linear,
1491 mag_filter: wgpu::FilterMode::Linear,
1492 mipmap_filter: wgpu::FilterMode::Linear,
1493 ..Default::default()
1494 });
1495 let sampler_comparison = gpu.device().create_sampler(&wgpu::SamplerDescriptor {
1496 label: Some("sampler_shadow_map"),
1497 min_filter: wgpu::FilterMode::Linear,
1498 mag_filter: wgpu::FilterMode::Linear,
1499 compare: Some(wgpu::CompareFunction::Greater),
1500 ..Default::default()
1501 });
1502
1503 let layout = Self::create_layout(gpu);
1504 let bind_group = BindGroupBuilder::new()
1505 .label("per_frame_bind_group")
1506 .add_entry_buf(&scene_buf.buffer)
1507 .add_entry_buf(&cam_buf.buffer)
1508 .add_entry_buf(&lights_buf.buffer)
1509 .add_entry_buf(¶ms_buf.buffer)
1510 .add_entry_sampler(&sampler_nearest)
1511 .add_entry_sampler(&sampler_linear)
1512 .add_entry_sampler(&sampler_comparison)
1513 .build_bind_group(gpu.device(), &layout);
1514
1515 Self {
1516 scene_buf,
1517 cam_buf,
1518 lights_buf,
1519 params_buf,
1520 sampler_nearest,
1521 sampler_linear,
1522 sampler_comparison,
1523 bind_group,
1524 light2idx_ubo: HashMap::new(),
1525 idx_ubo2light: Vec::new(),
1526 }
1527 }
1528
1529 //keep as associated function so we can call it in the pipeline creation
1530 // without and object
1531 pub fn create_layout(gpu: &Gpu) -> wgpu::BindGroupLayout {
1532 let global_bind_group_layout = Self::build_layout_desc().into_bind_group_layout(gpu.device());
1533 global_bind_group_layout
1534 }
1535
1536 /// # Panics
1537 /// Will panic if the texture is deleted while it's being copied
1538 pub fn build_layout_desc() -> BindGroupLayoutDesc {
1539 BindGroupLayoutBuilder::new()
1540 .label("locals_layout")
1541 //scene
1542 .add_entry_uniform(
1543 wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
1544 false,
1545 wgpu::BufferSize::new(u64::from(align(u32::try_from(std::mem::size_of::<PerFrameSceneCPU>()).unwrap(), 256))),
1546 )
1547 //cam
1548 .add_entry_uniform(
1549 wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
1550 false,
1551 wgpu::BufferSize::new(u64::from(align(u32::try_from(std::mem::size_of::<PerFrameCamCPU>()).unwrap(), 256))),
1552 )
1553 //light
1554 .add_entry_uniform(
1555 wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
1556 false,
1557 wgpu::BufferSize::new(u64::from(align(u32::try_from(std::mem::size_of::<PerFrameLightCPU>()).unwrap(), 256))),
1558 )
1559 //params
1560 .add_entry_uniform(
1561 wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
1562 false,
1563 wgpu::BufferSize::new(u64::from(align(u32::try_from(std::mem::size_of::<PerFrameParamsCPU>()).unwrap(), 256))),
1564 )
1565 //samplers
1566 .add_entry_sampler(wgpu::ShaderStages::FRAGMENT, wgpu::SamplerBindingType::NonFiltering)
1567 .add_entry_sampler(wgpu::ShaderStages::FRAGMENT, wgpu::SamplerBindingType::Filtering)
1568 .add_entry_sampler(wgpu::ShaderStages::FRAGMENT, wgpu::SamplerBindingType::Comparison)
1569 .build()
1570 }
1571}