1use super::*;
2use std::mem::ManuallyDrop;
3
4pub struct VkSubmitData {
5 frame_fence: ManuallyDrop<VkFrameDependent<vk::Fence>>,
6 render_semaphore: ManuallyDrop<VkFrameDependent<vk::Semaphore>>,
7 image_aquire_semaphore: ManuallyDrop<VkFrameDependent<vk::Semaphore>>,
8
9 graphics_command_buffer: VkFrameDependent<vk::CommandBuffer>,
10
11 drop_queue_ref: VkDropQueueRef,
12}
13
14impl VkSubmitData {
15 pub fn new(
16 dev: &Device,
17 frame: &VkFrame,
18 graphics_command_pool: vk::CommandPool,
19 drop_queue_ref: &VkDropQueueRef,
20 ) -> GResult<Self> {
21 let frame_fence = ManuallyDrop::new(VkFrameDependent::from_iter(
22 (0..frame.get_flight_frames_count())
23 .map(|_| new_fence(dev, true))
24 .collect::<GResult<Vec<_>>>()?,
25 ));
26 let render_semaphore = ManuallyDrop::new(VkFrameDependent::from_iter(
27 (0..frame.get_flight_frames_count())
28 .map(|_| new_semaphore(dev))
29 .collect::<GResult<Vec<_>>>()?,
30 ));
31 let image_aquire_semaphore = ManuallyDrop::new(VkFrameDependent::from_iter(
32 (0..frame.get_flight_frames_count())
33 .map(|_| new_semaphore(dev))
34 .collect::<GResult<Vec<_>>>()?,
35 ));
36 let graphics_command_buffer = VkFrameDependent::from_iter(
37 (0..frame.get_flight_frames_count())
38 .map(|_| {
39 let command_buffer_alloc = vk::CommandBufferAllocateInfo::builder()
40 .command_pool(graphics_command_pool)
41 .command_buffer_count(1)
42 .build();
43 Ok(unsafe {
44 dev.allocate_command_buffers(&command_buffer_alloc)
45 .map_err(|e| gpu_api_err!("vulkan submit new command buffer {}", e))?[0]
46 })
47 })
48 .collect::<GResult<Vec<_>>>()?,
49 );
50 Ok(VkSubmitData {
51 frame_fence,
52 render_semaphore,
53 image_aquire_semaphore,
54 graphics_command_buffer,
55 drop_queue_ref: Arc::clone(drop_queue_ref),
56 })
57 }
58}
59
60impl Drop for VkSubmitData {
61 fn drop(&mut self) {
62 let frame_fence = unsafe { ManuallyDrop::take(&mut self.frame_fence).take_all() };
63 let render_semaphore = unsafe { ManuallyDrop::take(&mut self.render_semaphore).take_all() };
64 let image_aquire_semaphore =
65 unsafe { ManuallyDrop::take(&mut self.image_aquire_semaphore).take_all() };
66
67 self.drop_queue_ref
68 .lock()
69 .unwrap()
70 .push(Box::new(move |dev, _| unsafe {
71 frame_fence
72 .into_iter()
73 .for_each(|fence| dev.destroy_fence(fence, None));
74 render_semaphore
75 .into_iter()
76 .for_each(|semaphore| dev.destroy_semaphore(semaphore, None));
77 image_aquire_semaphore
78 .into_iter()
79 .for_each(|semaphore| dev.destroy_semaphore(semaphore, None));
80 }));
81 }
82}
83
84impl VkContext {
85 pub fn submit(&mut self, submit: Submit, ext: Option<SubmitExt>) -> GResult<()> {
86 let ext = ext.unwrap_or_default();
87
88 let frame_fence = *self.submit.frame_fence.get(&self.frame);
89 let render_semaphore = *self.submit.render_semaphore.get(&self.frame);
90 let image_aquire_semaphore = *self.submit.image_aquire_semaphore.get(&self.frame);
91 let graphics_command_buffer = *self.submit.graphics_command_buffer.get(&self.frame);
92
93 unsafe {
95 self.core
96 .dev
97 .wait_for_fences(&[frame_fence], true, std::u64::MAX)
98 .unwrap();
99 self.core.dev.reset_fences(&[frame_fence]).unwrap();
100
101 let (swapchain_image_index, _suboptimal) = if let Some(surface) = &*self.surface_ext {
102 match surface.swapchain.swapchain_ext.acquire_next_image(
103 surface.swapchain.swapchain,
104 std::u64::MAX,
105 image_aquire_semaphore,
106 vk::Fence::null(),
107 ) {
108 Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => {
109 self.surface_extension_set_surface_size(
110 surface.swapchain.extent.width as usize,
111 surface.swapchain.extent.height as usize,
112 )?;
113 return Ok(());
114 }
115 Err(e) => Err(gpu_api_err!("vulkan aquire image {}", e))?,
116 Ok(ret) => ret,
117 }
118 } else {
119 (0, false)
120 };
121
122 self.core
123 .dev
124 .reset_command_buffer(
125 graphics_command_buffer,
126 vk::CommandBufferResetFlags::empty(),
127 )
128 .unwrap();
129
130 let command_create = vk::CommandBufferBeginInfo::builder()
131 .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT)
132 .build();
133
134 self.core
135 .dev
136 .begin_command_buffer(graphics_command_buffer, &command_create)
137 .unwrap();
138
139 submit.vbo_transfers.iter().for_each(|(vbo, data)| {
141 let vbo = self.vbos.get_mut(vbo.id()).unwrap();
142 vbo.cmd_transfer(&self.core.dev.clone(), graphics_command_buffer, data)
143 .unwrap();
144 });
145
146 submit.ibo_transfers.iter().for_each(|(ibo, data)| {
147 let ibo = self.ibos.get_mut(ibo.id()).unwrap();
148 ibo.cmd_transfer(&self.core.dev.clone(), graphics_command_buffer, data)
149 .unwrap();
150 });
151
152 submit.ubo_transfers.iter().for_each(|(ubo, data)| {
153 let ubo = self.ubos.get_mut(ubo.id()).unwrap();
154 ubo.cmd_transfer(&self.core.dev.clone(), graphics_command_buffer, data)
155 .unwrap();
156 });
157
158 submit
159 .dyn_ubo_transfers
160 .iter()
161 .for_each(|(ubo, data, index)| {
162 let ubo = self.dyn_ubos.get_mut(ubo.id()).unwrap();
163 ubo.cmd_transfer(
164 &self.core.dev.clone(),
165 graphics_command_buffer,
166 data,
167 *index,
168 )
169 .unwrap();
170 });
171
172 let graphics_memory_barrier = vk::MemoryBarrier::builder()
174 .src_access_mask(vk::AccessFlags::HOST_WRITE)
175 .dst_access_mask(
176 vk::AccessFlags::INDEX_READ
177 | vk::AccessFlags::VERTEX_ATTRIBUTE_READ
178 | vk::AccessFlags::UNIFORM_READ
179 | vk::AccessFlags::SHADER_READ
180 | vk::AccessFlags::TRANSFER_READ,
181 )
182 .build();
183 self.core.dev.cmd_pipeline_barrier(
184 graphics_command_buffer,
185 vk::PipelineStageFlags::ALL_COMMANDS,
186 vk::PipelineStageFlags::ALL_COMMANDS,
187 vk::DependencyFlags::empty(),
188 &[graphics_memory_barrier],
189 &[],
190 &[],
191 );
192
193 for pass_data in submit.passes.iter() {
195 match pass_data {
196 SubmitPassType::Render(pass_data) => {
197 let pass = self.compiled_passes.get(pass_data.pass.id()).unwrap();
198
199 let mut clear_values = vec![
201 vk::ClearValue::default();
202 pass.attachment_count
203 + pass.resolve_image_offsets.len()
204 ];
205
206 for (&attachment, clear) in pass_data.clear_colors.iter() {
207 let resolved_idx = if pass.resolve_image_offsets.is_empty() {
208 attachment.id()
209 } else {
210 pass.attachment_count + pass.resolve_image_offsets[&attachment.id()]
211 };
212
213 clear_values[resolved_idx] = vk::ClearValue {
214 color: vk::ClearColorValue {
215 float32: [clear.r, clear.g, clear.b, clear.a],
216 },
217 };
218 }
219
220 for (&attachment, clear) in pass_data.clear_depths.iter() {
221 clear_values[attachment.id()] = vk::ClearValue {
222 depth_stencil: vk::ClearDepthStencilValue {
223 depth: clear.depth,
224 stencil: clear.stencil,
225 },
226 };
227 }
228
229 let render_pass_begin = vk::RenderPassBeginInfo::builder()
232 .render_pass(pass.render_pass)
233 .clear_values(&clear_values)
234 .render_area(vk::Rect2D {
235 offset: vk::Offset2D { x: 0, y: 0 },
236 extent: pass.render_extent,
237 })
238 .framebuffer(
239 pass.framebuffer
240 .get_current_framebuffer(swapchain_image_index),
241 )
242 .build();
243 self.core.dev.cmd_begin_render_pass(
244 graphics_command_buffer,
245 &render_pass_begin,
246 vk::SubpassContents::INLINE,
247 );
248 for (step_idx, (step, step_data)) in pass
249 .steps
250 .iter()
251 .zip(pass_data.steps_datas.iter())
252 .enumerate()
253 {
254 if let Some(ibo) = step.index_buffer {
256 let ibo = self.ibos.get(ibo.id()).unwrap();
257 self.core.dev.cmd_bind_index_buffer(
258 graphics_command_buffer,
259 ibo.buffer.buffer,
260 0,
261 match std::mem::size_of::<IndexBufferElement>() {
262 4 => vk::IndexType::UINT32,
263 2 => vk::IndexType::UINT16,
264 _ => {
265 unimplemented!("vulkan bad GpuIndexBufferElement type")
266 }
267 },
268 )
269 }
270
271 let vbo_buffers = step
273 .vertex_buffers
274 .iter()
275 .map(|vbo| {
276 Ok(self
277 .vbos
278 .get(vbo.id())
279 .ok_or(gpu_api_err!("vulkan bad vbo ({})", vbo.id()))?
280 .buffer
281 .buffer)
282 })
283 .collect::<GResult<Vec<_>>>()?;
284 let vbo_offsets = (0..step.vertex_buffers.len())
285 .map(|_| 0)
286 .collect::<Vec<_>>();
287 self.core.dev.cmd_bind_vertex_buffers(
288 graphics_command_buffer,
289 0,
290 &vbo_buffers,
291 &vbo_offsets,
292 );
293
294 for draw in step_data.draws.iter() {
296 let viewport = draw.viewport.unwrap_or(DrawViewport {
298 x: 0.0,
299 y: 0.0,
300 width: pass.original_pass.render_width as f32,
301 height: pass.original_pass.render_height as f32,
302 });
303 let scissor = draw.scissor.unwrap_or(DrawScissor {
304 x: 0,
305 y: 0,
306 width: pass.original_pass.render_width,
307 height: pass.original_pass.render_height,
308 });
309
310 self.core.dev.cmd_set_viewport(
311 graphics_command_buffer,
312 0,
313 &[vk::Viewport {
314 x: viewport.x,
315 y: viewport.y,
316 width: viewport.width,
317 height: viewport.height,
318 min_depth: 0.0,
319 max_depth: 1.0,
320 }],
321 );
322
323 self.core.dev.cmd_set_scissor(
324 graphics_command_buffer,
325 0,
326 &[vk::Rect2D::builder()
327 .offset(vk::Offset2D {
328 x: scissor.x as i32,
329 y: scissor.y as i32,
330 })
331 .extent(vk::Extent2D {
332 width: scissor.width as u32,
333 height: scissor.height as u32,
334 })
335 .build()],
336 );
337
338 self.core.dev.cmd_bind_pipeline(
340 graphics_command_buffer,
341 vk::PipelineBindPoint::GRAPHICS,
342 *pass.pipelines[step_idx].get(&draw.program).ok_or(
343 gpu_api_err!(
344 "vulkan submit draw missing program id {:?}",
345 draw.program
346 ),
347 )?,
348 );
349
350 let program = self.programs.get(draw.program.id()).unwrap();
353 program.descriptors.cmd_bind(
354 self,
355 graphics_command_buffer,
356 vk::PipelineBindPoint::GRAPHICS,
357 program.layout,
358 &draw.dynamic_buffer_indices,
359 )?;
360
361 match draw.ty {
363 DrawType::Draw => {
364 self.core.dev.cmd_draw(
365 graphics_command_buffer,
366 draw.count as u32,
367 draw.instance_count as u32,
368 draw.first as u32,
369 draw.first_instance as u32,
370 );
371 }
372 DrawType::DrawIndexed => {
373 self.core.dev.cmd_draw_indexed(
374 graphics_command_buffer,
375 draw.count as u32,
376 draw.instance_count as u32,
377 draw.first as u32,
378 0,
379 draw.first_instance as u32,
380 );
381 }
382 }
383 }
384
385 if step_idx != pass.steps.len() - 1 {
387 self.core.dev.cmd_next_subpass(
388 graphics_command_buffer,
389 vk::SubpassContents::INLINE,
390 );
391 }
392 }
393 self.core.dev.cmd_end_render_pass(graphics_command_buffer);
394 }
395 SubmitPassType::Compute(pass_data) => {
396 let compute_pass = self
397 .compiled_compute_passes
398 .get(pass_data.compute_pass.id())
399 .ok_or(gpu_api_err!(
400 "vulkan submit compute pass {:?} does not exist",
401 pass_data.compute_pass,
402 ))?;
403
404 unsafe fn compute_barrier(
405 dev: &Device,
406 graphics_command_buffer: vk::CommandBuffer,
407 ) {
408 let memory_barrier = vk::MemoryBarrier::builder()
409 .src_access_mask(vk::AccessFlags::SHADER_READ)
410 .dst_access_mask(vk::AccessFlags::SHADER_WRITE)
411 .build();
412
413 dev.cmd_pipeline_barrier(
414 graphics_command_buffer,
415 vk::PipelineStageFlags::COMPUTE_SHADER,
416 vk::PipelineStageFlags::VERTEX_SHADER,
417 vk::DependencyFlags::empty(),
418 &[memory_barrier],
419 &[],
420 &[],
421 )
422 }
423
424 for dispatch in pass_data.dispatches.iter() {
425 compute_pass
426 .added_programs
427 .contains(&dispatch.program)
428 .then_some(())
429 .ok_or(gpu_api_err!(
430 "vulkan submit compute program {:?} was not added",
431 dispatch.program
432 ))?;
433 let program = self.compute_programs.get(dispatch.program.id()).ok_or(
434 gpu_api_err!(
435 "vulkan submit compute program {:?}",
436 dispatch.program
437 ),
438 )?;
439 self.core.dev.cmd_bind_pipeline(
440 graphics_command_buffer,
441 vk::PipelineBindPoint::COMPUTE,
442 program.pipeline,
443 );
444 program.descriptors.cmd_bind(
445 self,
446 graphics_command_buffer,
447 vk::PipelineBindPoint::COMPUTE,
448 program.layout,
449 &dispatch.dynamic_buffer_indices,
450 )?;
451 self.core.dev.cmd_dispatch(
452 graphics_command_buffer,
453 dispatch.workgroup_count_x as u32,
454 dispatch.workgroup_count_y as u32,
455 dispatch.workgroup_count_z as u32,
456 );
457
458 if dispatch.ty == context::extensions::DispatchType::Blocking {
459 compute_barrier(&self.core.dev, graphics_command_buffer);
460 }
461 }
462
463 if compute_pass.set_blocking {
464 compute_barrier(&self.core.dev, graphics_command_buffer);
465 }
466 }
467 }
468 }
469 submit.ssbo_copy_backs.iter().try_for_each(|ssbo_id| {
471 let ssbo = self.ssbos.get(ssbo_id.id()).ok_or(gpu_api_err!(
472 "vulkan shader storage buffer sync id {:?} does not exist",
473 ssbo_id
474 ))?;
475
476 let barrier = vk::BufferMemoryBarrier::builder()
477 .src_access_mask(vk::AccessFlags::SHADER_WRITE)
478 .dst_access_mask(vk::AccessFlags::TRANSFER_READ)
479 .dst_access_mask(vk::AccessFlags::TRANSFER_READ)
480 .buffer(ssbo.buffer.buffer)
481 .size(vk::WHOLE_SIZE)
482 .src_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
483 .dst_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
484 .build();
485
486 self.core.dev.cmd_pipeline_barrier(
487 graphics_command_buffer,
488 vk::PipelineStageFlags::VERTEX_SHADER
489 | vk::PipelineStageFlags::FRAGMENT_SHADER
490 | vk::PipelineStageFlags::COMPUTE_SHADER,
491 vk::PipelineStageFlags::TRANSFER,
492 vk::DependencyFlags::empty(),
493 &[],
494 &[barrier],
495 &[],
496 );
497
498 let copy_region = vk::BufferCopy::builder()
499 .size(ssbo.buffer.size as u64)
500 .build();
501
502 self.core.dev.cmd_copy_buffer(
503 graphics_command_buffer,
504 ssbo.buffer.buffer,
505 ssbo.staging.as_ref().unwrap().buffer,
506 &[copy_region],
507 );
508
509 let barrier = vk::BufferMemoryBarrier::builder()
510 .src_access_mask(vk::AccessFlags::TRANSFER_WRITE)
511 .dst_access_mask(vk::AccessFlags::HOST_READ)
512 .buffer(ssbo.staging.as_ref().unwrap().buffer)
513 .size(vk::WHOLE_SIZE)
514 .src_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
515 .dst_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
516 .build();
517
518 self.core.dev.cmd_pipeline_barrier(
519 graphics_command_buffer,
520 vk::PipelineStageFlags::TRANSFER,
521 vk::PipelineStageFlags::HOST,
522 vk::DependencyFlags::empty(),
523 &[],
524 &[barrier],
525 &[],
526 );
527
528 Ok(())
529 })?;
530
531 self.core
532 .dev
533 .end_command_buffer(graphics_command_buffer)
534 .unwrap();
535
536 let mut submit_signal_semaphores = vec![];
537 let mut submit_wait_semaphores = vec![];
538
539 let should_present = submit.passes.iter().any(|pass| {
540 match pass {
541 SubmitPassType::Render(pass) => {
542 let pass = &self.compiled_passes[pass.pass.id()];
543 pass.should_present
544 }
545 _ => false,
546
547 }
548 }) ;
549 if should_present {
552 submit_signal_semaphores.push(render_semaphore);
553 submit_wait_semaphores.push(image_aquire_semaphore);
554 }
555 let submit_create = vk::SubmitInfo::builder()
556 .wait_dst_stage_mask(&[vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT])
557 .wait_semaphores(&submit_wait_semaphores)
558 .signal_semaphores(&submit_signal_semaphores)
559 .command_buffers(&[graphics_command_buffer])
560 .build();
561
562 self.core
563 .dev
564 .queue_submit(self.core.graphics_queue, &[submit_create], frame_fence)
565 .unwrap();
566
567 if ext.sync.is_some() {
568 self.core
569 .dev
570 .wait_for_fences(&[frame_fence], true, std::u64::MAX)
571 .unwrap();
572 }
573
574 self.frame.advance_frame();
575
576 if should_present {
577 if let Some(surface) = &*self.surface_ext {
578 let present_create = vk::PresentInfoKHR::builder()
579 .wait_semaphores(&[render_semaphore])
580 .swapchains(&[surface.swapchain.swapchain])
581 .image_indices(&[swapchain_image_index])
582 .build();
583
584 match surface
585 .swapchain
586 .swapchain_ext
587 .queue_present(self.core.graphics_queue, &present_create)
588 {
589 Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => {
590 self.surface_extension_set_surface_size(
591 surface.swapchain.extent.width as usize,
592 surface.swapchain.extent.height as usize,
593 )?;
594 }
595 Err(e) => Err(gpu_api_err!("vulkan queue present {}", e))?,
596 _ => {}
597 };
598 } else {
599 Err(gpu_api_err!(
600 "vulkan tried to render to surface without surface extension"
601 ))?;
602 }
603 }
604 }
605
606 Ok(())
607 }
608
609 pub fn sync_submit() {}
610}