1pub mod animation;
2pub mod object;
3
4use crate::scene::object::PhysicsPushConstants;
5use rayon::prelude::*;
6use std::collections::HashMap;
7use std::sync::Arc;
8use vulkano::buffer::{Buffer, BufferCreateInfo, BufferUsage, Subbuffer};
9use vulkano::command_buffer::allocator::{
10 StandardCommandBufferAllocator, StandardCommandBufferAllocatorCreateInfo,
11};
12use vulkano::command_buffer::RenderPassBeginInfo;
13use vulkano::command_buffer::SubpassContents;
14use vulkano::command_buffer::{
15 AutoCommandBufferBuilder, CommandBufferUsage, CopyBufferInfo, CopyBufferInfoTyped,
16 PrimaryAutoCommandBuffer, PrimaryCommandBufferAbstract,
17};
18use vulkano::descriptor_set::{
19 allocator::StandardDescriptorSetAllocator, PersistentDescriptorSet, WriteDescriptorSet,
20};
21use vulkano::device::Queue;
22use vulkano::format::Format;
23use vulkano::image::view::ImageView;
24use vulkano::image::ImmutableImage;
25use vulkano::memory::allocator::{AllocationCreateInfo, MemoryUsage, StandardMemoryAllocator};
26use vulkano::pipeline::graphics::viewport::Viewport;
27use vulkano::pipeline::ComputePipeline;
28use vulkano::pipeline::{GraphicsPipeline, Pipeline, PipelineBindPoint};
29use vulkano::render_pass::Framebuffer;
30use vulkano::sampler::{Filter, Sampler, SamplerAddressMode, SamplerCreateInfo, SamplerMipmapMode};
31use vulkano::sync::GpuFuture;
32use vulkano::DeviceSize;
33
34use crate::geometry::Mesh;
35use crate::input::MouseState;
36use crate::rendering::compute_registry::ComputeShaderRegistry;
37use crate::rendering::compute_registry::ComputeShaderType;
38use crate::rendering::pipeline::UniformBufferObject;
39use crate::rendering::shader_registry::{ShaderRegistry, ShaderType};
40use crate::scene::animation::AnimationType;
41use crate::scene::object::{Instance, InstanceData, RenderBatch, Texture, Transform};
42use vulkano::command_buffer::{DrawIndexedIndirectCommand, DrawIndirectCommand};
43
44#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
45#[repr(C)]
46pub struct MeshPushConstants {
47 pub visible_list_offset: u32,
48 pub use_culling: u32, }
50
51pub struct ComputeDispatchInfo {
52 pub compute_shader: ComputeShaderType,
53 pub offset: u32,
54 pub count: u32,
55}
56
57pub struct RenderScene {
58 pub batches: Vec<RenderBatch>,
60 pub frames: Vec<FrameData>,
61 pub light_pos: [f32; 3],
62 pub light_color: [f32; 3],
63 pub light_intensity: f32,
64 pub texture_views: Vec<Arc<ImageView<ImmutableImage>>>,
65 pub texture_sampler: Arc<Sampler>,
66 pub descriptor_set_allocator: Arc<StandardDescriptorSetAllocator>,
67 pub descriptor_sets: Vec<Vec<Arc<PersistentDescriptorSet>>>,
68 pub physics_read: Subbuffer<[InstanceData]>,
69 pub physics_write: Subbuffer<[InstanceData]>,
70 pub big_objects_indices: Subbuffer<[u32]>,
71 pub num_big_objects: u32,
72 pub grid_counts: Subbuffer<[u32]>,
73 pub grid_objects: Subbuffer<[u32]>,
74 pub visible_indices: Subbuffer<[u32]>,
75 pub total_instances: u32,
76 pub max_object_radius: f32,
77}
78
79pub struct FrameData {
80 pub uniform_buffer: Subbuffer<UniformBufferObject>,
81}
82
83#[derive(Clone, Copy)]
84pub struct InstanceHandle {
85 pub batch_index: usize,
86 pub instance_index: usize,
87}
88
89impl RenderScene {
90 pub fn add_instance(
91 &mut self,
92 mesh: Mesh,
93 instance: Instance,
94 allocator: &Arc<StandardMemoryAllocator>,
95 ) -> InstanceHandle {
96 let shader = instance.shader;
97 let compute_shader = instance.physics.compute_shader;
98 for (batch_index, batch) in self.batches.iter_mut().enumerate() {
99 if batch.mesh.vertices.buffer() == mesh.vertices.buffer()
100 && batch.shader == shader
101 && batch.compute_shader == compute_shader
102 {
103 batch.instances.push(instance);
104
105 return InstanceHandle {
106 batch_index,
107 instance_index: batch.instances.len() - 1,
108 };
109 }
110 }
111
112 self.batches.push(RenderBatch {
113 mesh,
114 shader,
115 compute_shader,
116 instances: vec![instance],
117 base_instance_offset: 0,
118 indirect_buffer: Buffer::new_slice::<DrawIndexedIndirectCommand>(
119 allocator,
120 BufferCreateInfo {
121 usage: BufferUsage::INDIRECT_BUFFER
122 | BufferUsage::STORAGE_BUFFER
123 | BufferUsage::TRANSFER_DST,
124 ..Default::default()
125 },
126 AllocationCreateInfo {
127 usage: MemoryUsage::DeviceOnly,
128 ..Default::default()
129 },
130 1,
131 )
132 .unwrap(),
133 });
134
135 InstanceHandle {
136 batch_index: self.batches.len() - 1,
137 instance_index: 0,
138 }
139 }
140
141 pub fn remove_instance(&mut self, handle: InstanceHandle) {
142 if let Some(batch) = self.batches.get_mut(handle.batch_index) {
143 if handle.instance_index < batch.instances.len() {
144 batch.instances.swap_remove(handle.instance_index);
145 }
146
147 if batch.instances.is_empty() {
148 self.batches.swap_remove(handle.batch_index);
149 }
150 }
151 }
152
153 pub fn new(
154 memory_allocator: &Arc<StandardMemoryAllocator>,
155 descriptor_set_allocator: &Arc<StandardDescriptorSetAllocator>,
156 pipeline: &Arc<GraphicsPipeline>,
157 queue: &Arc<Queue>,
158 frames_in_flight: usize,
159 max_instances: usize,
160 ) -> Self {
161 let mut frames = Vec::new();
162 let default_texture =
163 Self::create_texture_image(memory_allocator, queue, &[255, 255, 255, 255], 1, 1);
164 let default_texture_view = ImageView::new_default(default_texture).unwrap();
165 let texture_sampler = Sampler::new(
166 queue.device().clone(),
167 SamplerCreateInfo {
168 mag_filter: Filter::Linear,
169 min_filter: Filter::Linear,
170 mipmap_mode: SamplerMipmapMode::Linear,
171 lod: 0.0..=vulkano::sampler::LOD_CLAMP_NONE,
172 address_mode: [SamplerAddressMode::Repeat; 3],
173 ..Default::default()
174 },
175 )
176 .unwrap();
177
178 let physics_read = Buffer::new_slice::<InstanceData>(
179 memory_allocator,
180 BufferCreateInfo {
181 usage: BufferUsage::STORAGE_BUFFER
182 | BufferUsage::TRANSFER_DST
183 | BufferUsage::TRANSFER_SRC,
184 ..Default::default()
185 },
186 AllocationCreateInfo {
187 usage: MemoryUsage::DeviceOnly,
188 ..Default::default()
189 },
190 max_instances as u64,
191 )
192 .unwrap();
193 let physics_write = Buffer::new_slice::<InstanceData>(
194 memory_allocator,
195 BufferCreateInfo {
196 usage: BufferUsage::STORAGE_BUFFER
197 | BufferUsage::TRANSFER_DST
198 | BufferUsage::TRANSFER_SRC,
199 ..Default::default()
200 },
201 AllocationCreateInfo {
202 usage: MemoryUsage::DeviceOnly,
203 ..Default::default()
204 },
205 max_instances as u64,
206 )
207 .unwrap();
208
209 for _ in 0..frames_in_flight {
210 let uniform_buffer = Buffer::from_data(
211 memory_allocator,
212 BufferCreateInfo {
213 usage: BufferUsage::UNIFORM_BUFFER,
214 ..Default::default()
215 },
216 AllocationCreateInfo {
217 usage: MemoryUsage::Upload,
218 ..Default::default()
219 },
220 UniformBufferObject::default(),
221 )
222 .unwrap();
223
224 frames.push(FrameData { uniform_buffer });
225 }
226 let hash_size = 65521;
227 let max_per_cell = 128;
228
229 let big_objects_indices = Buffer::new_slice::<u32>(
230 memory_allocator,
231 BufferCreateInfo {
232 usage: BufferUsage::STORAGE_BUFFER | BufferUsage::TRANSFER_DST,
233 ..Default::default()
234 },
235 AllocationCreateInfo {
236 usage: MemoryUsage::DeviceOnly,
237 ..Default::default()
238 },
239 1024,
240 )
241 .unwrap();
242
243 let grid_counts = Buffer::new_slice::<u32>(
244 memory_allocator,
245 BufferCreateInfo {
246 usage: BufferUsage::STORAGE_BUFFER | BufferUsage::TRANSFER_DST,
247 ..Default::default()
248 },
249 AllocationCreateInfo {
250 usage: MemoryUsage::DeviceOnly,
251 ..Default::default()
252 },
253 hash_size,
254 )
255 .unwrap();
256
257 let grid_objects = Buffer::new_slice::<u32>(
258 memory_allocator,
259 BufferCreateInfo {
260 usage: BufferUsage::STORAGE_BUFFER,
261 ..Default::default()
262 },
263 AllocationCreateInfo {
264 usage: MemoryUsage::DeviceOnly,
265 ..Default::default()
266 },
267 hash_size * max_per_cell,
268 )
269 .unwrap();
270
271 let visible_indices = Buffer::new_slice::<u32>(
272 memory_allocator,
273 BufferCreateInfo {
274 usage: BufferUsage::STORAGE_BUFFER | BufferUsage::TRANSFER_DST,
275 ..Default::default()
276 },
277 AllocationCreateInfo {
278 usage: MemoryUsage::DeviceOnly,
279 ..Default::default()
280 },
281 1_000_000,
282 )
283 .unwrap();
284
285 Self {
286 batches: Vec::new(),
287 frames,
288 light_pos: [0.0, 10.0, 0.0],
289 light_color: [1.0, 1.0, 1.0],
290 light_intensity: 50.0,
291 texture_views: vec![default_texture_view],
292 texture_sampler,
293 descriptor_set_allocator: descriptor_set_allocator.clone(),
294 descriptor_sets: vec![Vec::new(); frames_in_flight],
295 total_instances: 0,
296 physics_read,
297 physics_write,
298 big_objects_indices: big_objects_indices,
299 num_big_objects: 1024,
300 grid_counts,
301 grid_objects,
302 visible_indices,
303 max_object_radius: 0.0,
304 }
305 }
306
307 pub fn upload_to_gpu(
308 &mut self,
309 allocator: &Arc<StandardMemoryAllocator>,
310 queue: &Arc<Queue>,
311 compute_registry: &ComputeShaderRegistry,
312 ) -> Vec<ComputeDispatchInfo> {
313 let total_instances = self
314 .batches
315 .iter()
316 .map(|batch| batch.instances.len())
317 .sum::<usize>();
318 self.total_instances = total_instances as u32;
319
320 if total_instances == 0 {
321 return vec![];
322 }
323
324 let override_shader = compute_registry.scene_shader_optional();
325 if override_shader.is_none() {
326 self.batches
327 .sort_by_key(|b| (b.compute_shader.sort_key(), b.shader.sort_key()));
328 }
329
330 let mut flat_data = Vec::with_capacity(total_instances);
331 let mut big_indices: Vec<u32> = Vec::new();
332 let mut max_small_radius = 0.1;
333 let threshold = 2.5;
334 let mut current_idx = 0;
335
336 for batch in &self.batches {
337 for inst in &batch.instances {
338 let m = inst.model_matrix;
339 let scale = f32::max(
340 f32::max(
341 (m[0][0].powi(2) + m[0][1].powi(2) + m[0][2].powi(2)).sqrt(),
342 (m[1][0].powi(2) + m[1][1].powi(2) + m[1][2].powi(2)).sqrt(),
343 ),
344 (m[2][0].powi(2) + m[2][1].powi(2) + m[2][2].powi(2)).sqrt(),
345 );
346
347 let radius = scale * 0.5;
348
349 if radius > threshold {
350 big_indices.push(current_idx as u32);
351 } else {
352 if radius > max_small_radius {
353 max_small_radius = radius;
354 }
355 }
356
357 flat_data.push(InstanceData {
358 model: inst.model_matrix,
359 color: [inst.color[0], inst.color[1], inst.color[2], inst.emissive],
360 mat_props: [inst.roughness, inst.metalness, 0.0, 0.0],
361 velocity: [
362 inst.physics.linear_velocity[0],
363 inst.physics.linear_velocity[1],
364 inst.physics.linear_velocity[2],
365 inst.physics.bounciness,
366 ],
367 angular_velocity: [
368 inst.physics.angular_velocity[0],
369 inst.physics.angular_velocity[1],
370 inst.physics.angular_velocity[2],
371 inst.physics.friction,
372 ],
373 physic_props: [
374 inst.physics.collision_type.sort_key(),
375 inst.physics.mass,
376 inst.physics.gravity_scale,
377 0.0,
378 ],
379 });
380 current_idx += 1;
381 }
382 }
383
384 self.max_object_radius = max_small_radius;
385 self.num_big_objects = big_indices.len() as u32;
386
387 let staging = Buffer::from_iter(
388 allocator,
389 BufferCreateInfo {
390 usage: BufferUsage::TRANSFER_SRC,
391 ..Default::default()
392 },
393 AllocationCreateInfo {
394 usage: MemoryUsage::Upload,
395 ..Default::default()
396 },
397 flat_data,
398 )
399 .unwrap();
400
401 let cmd_allocator =
402 StandardCommandBufferAllocator::new(queue.device().clone(), Default::default());
403 let mut builder = AutoCommandBufferBuilder::primary(
404 &cmd_allocator,
405 queue.queue_family_index(),
406 CommandBufferUsage::OneTimeSubmit,
407 )
408 .unwrap();
409
410 let copy_count = self.total_instances as u64;
411 builder
412 .copy_buffer(CopyBufferInfoTyped::buffers(
413 staging.clone(),
414 self.physics_read.clone().slice(0..copy_count),
415 ))
416 .unwrap();
417 builder
418 .copy_buffer(CopyBufferInfoTyped::buffers(
419 staging.clone(),
420 self.physics_write.clone().slice(0..copy_count),
421 ))
422 .unwrap();
423
424 let mut indirect_data: Vec<DrawIndexedIndirectCommand> = Vec::new();
425 for batch in &self.batches {
426 indirect_data.push(DrawIndexedIndirectCommand {
427 index_count: batch.mesh.index_count,
428 instance_count: batch.instances.len() as u32,
429 first_index: 0,
430 vertex_offset: 0,
431 first_instance: 0,
432 });
433 }
434
435 let indirect_staging = Buffer::from_iter(
436 allocator,
437 BufferCreateInfo {
438 usage: BufferUsage::TRANSFER_SRC,
439 ..Default::default()
440 },
441 AllocationCreateInfo {
442 usage: MemoryUsage::Upload,
443 ..Default::default()
444 },
445 indirect_data,
446 )
447 .unwrap();
448
449 let mut indirect_offset = 0u64;
450 let cmd_size = std::mem::size_of::<DrawIndexedIndirectCommand>() as u64;
451
452 for batch in &self.batches {
453 let cmd = DrawIndexedIndirectCommand {
454 index_count: batch.mesh.index_count,
455 instance_count: batch.instances.len() as u32,
456 first_index: 0,
457 vertex_offset: 0,
458 first_instance: 0,
459 };
460 let single_staging = Buffer::from_iter(
461 allocator,
462 BufferCreateInfo {
463 usage: BufferUsage::TRANSFER_SRC,
464 ..Default::default()
465 },
466 AllocationCreateInfo {
467 usage: MemoryUsage::Upload,
468 ..Default::default()
469 },
470 std::iter::once(cmd),
471 )
472 .unwrap();
473 builder
474 .copy_buffer(CopyBufferInfoTyped::buffers(
475 single_staging,
476 batch.indirect_buffer.clone(),
477 ))
478 .unwrap();
479 indirect_offset += cmd_size;
480 }
481
482 if !big_indices.is_empty() {
483 let big_staging = Buffer::from_iter(
484 allocator,
485 BufferCreateInfo {
486 usage: BufferUsage::TRANSFER_SRC,
487 ..Default::default()
488 },
489 AllocationCreateInfo {
490 usage: MemoryUsage::Upload,
491 ..Default::default()
492 },
493 big_indices,
494 )
495 .unwrap();
496 builder
497 .copy_buffer(CopyBufferInfoTyped::buffers(
498 big_staging,
499 self.big_objects_indices
500 .clone()
501 .slice(0..self.num_big_objects as u64),
502 ))
503 .unwrap();
504 }
505
506 builder
507 .build()
508 .unwrap()
509 .execute(queue.clone())
510 .unwrap()
511 .then_signal_fence_and_flush()
512 .unwrap()
513 .wait(None)
514 .unwrap();
515
516 if let Some(shader_type) = override_shader {
517 vec![ComputeDispatchInfo {
518 compute_shader: shader_type,
519 offset: 0,
520 count: self.total_instances,
521 }]
522 } else {
523 let mut dispatches = Vec::new();
524 let mut current_offset = 0;
525 for batch in &mut self.batches {
526 let count = batch.instances.len() as u32;
527 batch.base_instance_offset = current_offset;
528 dispatches.push(ComputeDispatchInfo {
529 compute_shader: batch.compute_shader,
530 offset: current_offset,
531 count,
532 });
533 current_offset += count;
534 }
535 dispatches
536 }
537 }
538
539 pub fn ensure_descriptor_cache(
540 &mut self,
541 pipeline: &Arc<GraphicsPipeline>,
542 target_tex_count: usize,
543 ) {
544 let layout = pipeline.layout().set_layouts()[0].clone();
545
546 for (frame_i, frame) in self.frames.iter().enumerate() {
547 let total_sets_needed = target_tex_count * 2;
548
549 if self.descriptor_sets[frame_i].len() == total_sets_needed {
550 continue;
551 }
552
553 self.descriptor_sets[frame_i].clear();
554
555 for tex_idx in 0..target_tex_count {
556 let set_a = PersistentDescriptorSet::new(
558 &self.descriptor_set_allocator,
559 layout.clone(),
560 [
561 WriteDescriptorSet::buffer(0, frame.uniform_buffer.clone()),
562 WriteDescriptorSet::image_view_sampler(
563 1,
564 self.texture_views[tex_idx].clone(),
565 self.texture_sampler.clone(),
566 ),
567 WriteDescriptorSet::buffer(2, self.physics_read.clone()),
568 WriteDescriptorSet::buffer(3, self.visible_indices.clone()),
569 ],
570 )
571 .unwrap();
572
573 let set_b = PersistentDescriptorSet::new(
575 &self.descriptor_set_allocator,
576 layout.clone(),
577 [
578 WriteDescriptorSet::buffer(0, frame.uniform_buffer.clone()),
579 WriteDescriptorSet::image_view_sampler(
580 1,
581 self.texture_views[tex_idx].clone(),
582 self.texture_sampler.clone(),
583 ),
584 WriteDescriptorSet::buffer(2, self.physics_write.clone()),
585 WriteDescriptorSet::buffer(3, self.visible_indices.clone()),
586 ],
587 )
588 .unwrap();
589
590 self.descriptor_sets[frame_i].push(set_a);
591 self.descriptor_sets[frame_i].push(set_b);
592 }
593 }
594 }
595
596 pub fn record_draws(
599 &mut self,
600 builder: &mut AutoCommandBufferBuilder<PrimaryAutoCommandBuffer>,
601 pipeline: &Arc<GraphicsPipeline>,
602 frame_index: usize,
603 physics_buffer_index: usize,
604 ) {
605 let mut current_offset = 0;
606
607 builder.bind_pipeline_graphics(pipeline.clone());
608
609 for batch in &self.batches {
610 let count = batch.instances.len() as u32;
611 if count == 0 {
612 continue;
613 }
614
615 builder.bind_vertex_buffers(0, (batch.mesh.vertices.clone(),));
616
617 let requested_tex = batch.mesh.base_color_texture.unwrap_or(0);
618 let descriptor_idx = (requested_tex * 2) + physics_buffer_index;
619
620 builder.bind_descriptor_sets(
621 PipelineBindPoint::Graphics,
622 pipeline.layout().clone(),
623 0,
624 self.descriptor_sets[frame_index][descriptor_idx].clone(),
625 );
626
627 if let Some(indices) = &batch.mesh.indices {
628 builder.bind_index_buffer(indices.clone());
629 builder
630 .draw_indexed(batch.mesh.index_count, count, 0, 0, current_offset)
631 .unwrap();
632 } else {
633 builder
634 .draw(batch.mesh.vertex_count, count, 0, current_offset)
635 .unwrap();
636 }
637
638 current_offset += count;
639 }
640 }
641
642 pub fn record_draws_multi(
645 &mut self,
646 builder: &mut AutoCommandBufferBuilder<PrimaryAutoCommandBuffer>,
647 registry: &ShaderRegistry,
648 frame_index: usize,
649 physics_buffer_index: usize,
650 use_culling: bool,
651 ) {
652 let mut last_shader: Option<ShaderType> = None;
653
654 for batch in &self.batches {
655 if batch.instances.is_empty() {
656 continue;
657 }
658
659 let effective_shader = registry.resolve_shader(batch.shader);
660 let pipeline = registry.get_pipeline(effective_shader);
661
662 if last_shader != Some(effective_shader) {
663 builder.bind_pipeline_graphics(pipeline.clone());
664 last_shader = Some(effective_shader);
665 }
666
667 builder.push_constants(
668 pipeline.layout().clone(),
669 0,
670 MeshPushConstants {
671 visible_list_offset: batch.base_instance_offset,
672 use_culling: if use_culling { 1 } else { 0 },
673 },
674 );
675
676 builder.bind_vertex_buffers(0, (batch.mesh.vertices.clone(),));
677
678 let requested_tex = batch.mesh.base_color_texture.unwrap_or(0);
679 let descriptor_idx = (requested_tex * 2) + physics_buffer_index;
680
681 builder.bind_descriptor_sets(
682 PipelineBindPoint::Graphics,
683 pipeline.layout().clone(),
684 0,
685 self.descriptor_sets[frame_index][descriptor_idx].clone(),
686 );
687
688 if let Some(indices) = &batch.mesh.indices {
689 builder.bind_index_buffer(indices.clone());
690
691 builder
692 .draw_indexed_indirect(batch.indirect_buffer.clone())
693 .unwrap();
694 } else {
695 builder
696 .draw(
697 batch.mesh.vertex_count,
698 batch.instances.len() as u32,
699 0,
700 batch.base_instance_offset,
701 )
702 .unwrap();
703 }
704 }
705 }
706 pub fn prepare_frame_ubo(
707 &mut self,
708 frame_index: usize,
709 view: [[f32; 4]; 4],
710 proj: [[f32; 4]; 4],
711 eye_pos: [f32; 3],
712 ) {
713 let mut ubo = self.frames[frame_index].uniform_buffer.write().unwrap();
714 ubo.view = view;
715 ubo.proj = proj;
716 ubo.eye_pos = eye_pos;
717 ubo.light_pos = self.light_pos;
718 ubo.light_color = self.light_color;
719 ubo.light_intensity = self.light_intensity;
720 }
721
722 fn create_texture_image(
723 memory_allocator: &Arc<StandardMemoryAllocator>,
724 queue: &Arc<Queue>,
725 pixels_rgba: &[u8],
726 width: u32,
727 height: u32,
728 ) -> Arc<ImmutableImage> {
729 let cb_allocator = StandardCommandBufferAllocator::new(
730 queue.device().clone(),
731 StandardCommandBufferAllocatorCreateInfo::default(),
732 );
733 let mut upload_builder = AutoCommandBufferBuilder::primary(
734 &cb_allocator,
735 queue.queue_family_index(),
736 CommandBufferUsage::OneTimeSubmit,
737 )
738 .unwrap();
739 let image = ImmutableImage::from_iter::<u8, _, _, _>(
740 memory_allocator.as_ref(),
741 pixels_rgba.iter().copied(),
742 vulkano::image::ImageDimensions::Dim2d {
743 width,
744 height,
745 array_layers: 1,
746 },
747 vulkano::image::MipmapsCount::Log2,
748 Format::R8G8B8A8_SRGB,
749 &mut upload_builder,
750 )
751 .unwrap();
752 let upload_cmd = upload_builder.build().unwrap();
753 vulkano::sync::now(queue.device().clone())
754 .then_execute(queue.clone(), upload_cmd)
755 .unwrap()
756 .then_signal_fence_and_flush()
757 .unwrap()
758 .wait(None)
759 .unwrap();
760 image
761 }
762
763 fn to_rgba8(tex: &Texture) -> Vec<u8> {
764 match tex.pixels.len() as u32 {
765 len if len == tex.width * tex.height * 4 => tex.pixels.clone(),
766 len if len == tex.width * tex.height * 3 => {
767 let mut out = Vec::with_capacity((tex.width * tex.height * 4) as usize);
768 for rgb in tex.pixels.chunks_exact(3) {
769 out.extend_from_slice(&[rgb[0], rgb[1], rgb[2], 255]);
770 }
771 out
772 }
773 _ => vec![255, 255, 255, 255],
774 }
775 }
776
777 pub fn set_textures(
778 &mut self,
779 pipeline: &Arc<GraphicsPipeline>,
780 textures: &[Texture],
781 queue: &Arc<Queue>,
782 memory_allocator: &Arc<StandardMemoryAllocator>,
783 ) {
784 for tex in textures {
785 if tex.width == 0 || tex.height == 0 {
786 continue;
787 }
788 let pixels_rgba = Self::to_rgba8(tex);
789 let image = Self::create_texture_image(
790 memory_allocator,
791 queue,
792 &pixels_rgba,
793 tex.width,
794 tex.height,
795 );
796 let view = ImageView::new_default(image).unwrap();
797 self.texture_views.push(view);
798 }
799 self.ensure_descriptor_cache(pipeline, textures.len());
800 }
801
802 pub fn set_light(&mut self, position: [f32; 3], color: [f32; 3], intensity: f32) {
803 self.light_pos = position;
804 self.light_color = color;
805 self.light_intensity = intensity;
806 }
807}
808
809pub fn record_compute_physics(
810 builder: &mut AutoCommandBufferBuilder<PrimaryAutoCommandBuffer>,
811 compute_pipeline: &Arc<vulkano::pipeline::ComputePipeline>,
812 compute_set: &Arc<PersistentDescriptorSet>,
813 max_instances: u32,
814 dt: f32,
815 total_objects: u32,
816 num_big_objects: u32,
817) {
818 let workgroups_x = (max_instances as u32 + 255) / 256;
819 if workgroups_x == 0 {
820 return;
821 }
822 builder
823 .bind_pipeline_compute(compute_pipeline.clone())
824 .bind_descriptor_sets(
825 vulkano::pipeline::PipelineBindPoint::Compute,
826 compute_pipeline.layout().clone(),
827 0,
828 compute_set.clone(),
829 )
830 .push_constants(
831 compute_pipeline.layout().clone(),
832 0,
833 crate::scene::object::PhysicsPushConstants {
834 dt,
835 total_objects,
836 offset: 0,
837 count: max_instances as u32,
838 num_big_objects: num_big_objects,
839 _pad: [0, 0, 0],
840 global_gravity: [0.0, -9.81, 0.0, 2.0],
841 },
842 )
843 .dispatch([workgroups_x, 1, 1])
844 .unwrap();
845}
846
847pub fn record_compute_physics_multi(
848 builder: &mut AutoCommandBufferBuilder<PrimaryAutoCommandBuffer>,
849 registry: &crate::rendering::compute_registry::ComputeShaderRegistry,
850 compute_sets: &HashMap<
851 ComputeShaderType,
852 (Arc<PersistentDescriptorSet>, Arc<PersistentDescriptorSet>),
853 >,
854 grid_build_sets: &(Arc<PersistentDescriptorSet>, Arc<PersistentDescriptorSet>),
855 grid_counts: &Subbuffer<[u32]>,
856 dispatches: &[ComputeDispatchInfo],
857 dt: f32,
858 total_objects: u32,
859 cell_size: f32,
860 num_big_objects: u32,
861 ping_pong: bool,
862) {
863 builder.fill_buffer(grid_counts.clone(), 0u32).unwrap();
864 let build_pipeline = registry.get_pipeline(ComputeShaderType::GridBuild);
865 let grid_set = if ping_pong {
866 &grid_build_sets.1
867 } else {
868 &grid_build_sets.0
869 };
870 builder
871 .bind_pipeline_compute(build_pipeline.clone())
872 .bind_descriptor_sets(
873 PipelineBindPoint::Compute,
874 build_pipeline.layout().clone(),
875 0,
876 grid_set.clone(),
877 )
878 .push_constants(
879 build_pipeline.layout().clone(),
880 0,
881 PhysicsPushConstants {
882 dt,
883 total_objects,
884 offset: 0,
885 count: total_objects,
886 num_big_objects: num_big_objects,
887 _pad: [0, 0, 0],
888 global_gravity: [0.0, -9.81, 0.0, cell_size], },
890 )
891 .dispatch([(total_objects + 255) / 256, 1, 1])
892 .unwrap();
893
894 let mut last_bound = None;
895
896 for dispatch in dispatches {
897 let shader_to_use = dispatch.compute_shader;
898 let compute_pipeline = registry.get_pipeline(shader_to_use);
899
900 if shader_to_use == ComputeShaderType::GridBuild {
901 continue;
902 } else {
903 let (set_0, set_1) = compute_sets.get(&shader_to_use).unwrap();
904 let compute_set = if ping_pong { set_1 } else { set_0 };
905 if last_bound != Some(shader_to_use) {
906 builder.bind_pipeline_compute(compute_pipeline.clone());
907 builder.bind_descriptor_sets(
908 PipelineBindPoint::Compute,
909 compute_pipeline.layout().clone(),
910 0,
911 compute_set.clone(),
912 );
913 last_bound = Some(shader_to_use);
914 }
915 let workgroups_x = (dispatch.count + 255) / 256;
916 if workgroups_x > 0 {
917 builder
918 .push_constants(
919 compute_pipeline.layout().clone(),
920 0,
921 PhysicsPushConstants {
922 dt,
923 total_objects,
924 offset: dispatch.offset,
925 count: dispatch.count,
926 num_big_objects: num_big_objects,
927 _pad: [0, 0, 0],
928 global_gravity: [0.0, -9.81, 0.0, cell_size],
929 },
930 )
931 .dispatch([workgroups_x, 1, 1])
932 .unwrap();
933 }
934 }
935 }
936}
937pub fn begin_render_pass_only(
938 builder: &mut AutoCommandBufferBuilder<PrimaryAutoCommandBuffer>,
939 framebuffers: &[Arc<Framebuffer>],
940 img_index: u32,
941 dims: [u32; 2],
942 pipeline: &Arc<GraphicsPipeline>,
943) {
944 builder
945 .begin_render_pass(
946 RenderPassBeginInfo {
947 clear_values: vec![
948 Some([0.01, 0.01, 0.02, 1.0].into()), Some(1.0.into()),
950 ],
951 ..RenderPassBeginInfo::framebuffer(framebuffers[img_index as usize].clone())
952 },
953 SubpassContents::Inline,
954 )
955 .unwrap()
956 .set_viewport(
957 0,
958 vec![Viewport {
959 origin: [0.0, 0.0],
960 dimensions: [dims[0] as f32, dims[1] as f32],
961 depth_range: 0.0..1.0,
962 }],
963 )
964 .bind_pipeline_graphics(pipeline.clone());
965}
966
967pub fn record_compute_physics_spatial(
968 builder: &mut AutoCommandBufferBuilder<PrimaryAutoCommandBuffer>,
969 registry: &ComputeShaderRegistry,
970 compute_sets: &HashMap<
971 ComputeShaderType,
972 (Arc<PersistentDescriptorSet>, Arc<PersistentDescriptorSet>),
973 >,
974 grid_build_sets: &(Arc<PersistentDescriptorSet>, Arc<PersistentDescriptorSet>),
975 grid_counts: &Subbuffer<[u32]>,
976 dispatches: &[ComputeDispatchInfo],
977 dt: f32,
978 total_objects: u32,
979 cell_size: f32,
980 num_big_objects: u32,
981 ping_pong: bool,
982) {
983 let build_pipeline = registry.get_pipeline(ComputeShaderType::GridBuild);
984
985 let mut read_index: usize = 0;
986
987 for dispatch in dispatches {
988 builder.fill_buffer(grid_counts.clone(), 0u32).unwrap();
989
990 let grid_set = if read_index == 0 {
991 &grid_build_sets.0
992 } else {
993 &grid_build_sets.1
994 };
995
996 builder
997 .bind_pipeline_compute(build_pipeline.clone())
998 .bind_descriptor_sets(
999 PipelineBindPoint::Compute,
1000 build_pipeline.layout().clone(),
1001 0,
1002 grid_set.clone(),
1003 )
1004 .push_constants(
1005 build_pipeline.layout().clone(),
1006 0,
1007 PhysicsPushConstants {
1008 dt,
1009 total_objects,
1010 offset: 0,
1011 count: total_objects,
1012 num_big_objects: num_big_objects,
1013 _pad: [0, 0, 0],
1014 global_gravity: [0.0, -9.81, 0.0, cell_size],
1015 },
1016 )
1017 .dispatch([(total_objects + 255) / 256, 1, 1])
1018 .unwrap();
1019
1020 let compute_pipeline = registry.get_pipeline(dispatch.compute_shader);
1021 let (set_0, set_1) = compute_sets.get(&dispatch.compute_shader).unwrap();
1022 let compute_set = if ping_pong { set_1 } else { set_0 };
1023
1024 builder
1025 .bind_pipeline_compute(compute_pipeline.clone())
1026 .bind_descriptor_sets(
1027 PipelineBindPoint::Compute,
1028 compute_pipeline.layout().clone(),
1029 0,
1030 compute_set.clone(),
1031 )
1032 .push_constants(
1033 compute_pipeline.layout().clone(),
1034 0,
1035 PhysicsPushConstants {
1036 dt,
1037 total_objects,
1038 offset: dispatch.offset,
1039 count: dispatch.count,
1040 num_big_objects: num_big_objects,
1041 _pad: [0, 0, 0],
1042 global_gravity: [0.0, -9.81, 0.0, 2.0],
1043 },
1044 )
1045 .dispatch([(dispatch.count + 255) / 256, 1, 1])
1046 .unwrap();
1047
1048 read_index ^= 1;
1049 }
1050}