vulkano_taskgraph/graph/
execute.rs

1use super::{
2    BarrierIndex, ClearAttachmentIndex, ExecutableTaskGraph, Instruction, NodeIndex,
3    RenderPassIndex, ResourceAccess, SemaphoreIndex,
4};
5use crate::{
6    command_buffer::RecordingCommandBuffer,
7    linear_map::LinearMap,
8    resource::{
9        BufferAccess, BufferState, DeathRow, ImageAccess, ImageState, Resources, SwapchainState,
10    },
11    ClearValues, Id, InvalidSlotError, ObjectType, TaskContext, TaskError,
12};
13use ash::vk;
14use concurrent_slotmap::epoch;
15use smallvec::{smallvec, SmallVec};
16use std::{
17    error::Error,
18    fmt, mem,
19    ops::Range,
20    ptr,
21    sync::{atomic::Ordering, Arc},
22};
23use vulkano::{
24    buffer::{Buffer, BufferMemory},
25    command_buffer as raw,
26    device::{Device, DeviceOwned, Queue},
27    format::ClearValue,
28    image::{
29        view::{ImageView, ImageViewCreateInfo},
30        Image, ImageSubresourceRange,
31    },
32    render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass},
33    swapchain::{AcquireNextImageInfo, AcquiredImage, Swapchain},
34    sync::{
35        fence::{Fence, FenceCreateFlags, FenceCreateInfo},
36        semaphore::Semaphore,
37        AccessFlags, PipelineStages,
38    },
39    Validated, Version, VulkanError, VulkanObject,
40};
41
42impl<W: ?Sized + 'static> ExecutableTaskGraph<W> {
43    /// Executes the next frame of the [flight] given by `flight_id`.
44    ///
45    /// # Safety
46    ///
47    /// - There must be no other task graphs executing that access any of the same subresources as
48    ///   `self`.
49    /// - A subresource in flight must not be accessed in more than one frame in flight.
50    ///
51    /// # Panics
52    ///
53    /// - Panics if `resource_map` doesn't map the virtual resources of `self` exhaustively.
54    /// - Panics if `self.flight_id()` is invalid.
55    /// - Panics if another thread is already executing a task graph using the flight.
56    /// - Panics if `resource_map` maps to any swapchain that isn't owned by the flight.
57    /// - Panics if the oldest frame of the flight wasn't [waited] on.
58    ///
59    /// [waited]: crate::resource::Flight::wait
60    pub unsafe fn execute(
61        &self,
62        resource_map: ResourceMap<'_>,
63        world: &W,
64        pre_present_notify: impl FnOnce(),
65    ) -> Result {
66        assert!(ptr::eq(
67            resource_map.virtual_resources,
68            &self.graph.resources,
69        ));
70        assert!(resource_map.is_exhaustive());
71
72        let flight_id = self.flight_id;
73
74        // SAFETY: `resource_map` owns an `epoch::Guard`.
75        let flight = unsafe {
76            resource_map
77                .physical_resources
78                .flight_unprotected(flight_id)
79        }
80        .expect("invalid flight");
81
82        let mut flight_state = flight.state.try_lock().unwrap_or_else(|| {
83            panic!(
84                "another thread is already executing a task graph using the flight {flight_id:?}",
85            );
86        });
87
88        // TODO: This call is quite expensive.
89        assert!(
90            flight.current_fence().read().is_signaled()?,
91            "you must wait on the fence for the current frame before submitting more work",
92        );
93
94        for &swapchain_id in &self.swapchains {
95            // SAFETY: We checked that `resource_map` maps the virtual IDs exhaustively.
96            let swapchain_state = unsafe { resource_map.swapchain_unchecked(swapchain_id) };
97
98            assert_eq!(
99                swapchain_state.flight_id(),
100                flight_id,
101                "`resource_map` must not map to any swapchain not owned by the flight \
102                corresponding to `flight_id`",
103            );
104        }
105
106        let current_frame_index = flight.current_frame_index();
107        let death_row = &mut flight_state.death_rows[current_frame_index as usize];
108
109        for object in death_row.drain(..) {
110            // FIXME:
111            drop(object);
112        }
113
114        // SAFETY: We checked that `resource_map` maps the virtual IDs exhaustively.
115        unsafe { self.acquire_images_khr(&resource_map, current_frame_index) }?;
116
117        let mut current_fence = flight.current_fence().write();
118
119        // SAFETY: We checked that the fence has been signalled.
120        unsafe { current_fence.reset_unchecked() }?;
121
122        // SAFETY: We checked that `resource_map` maps the virtual IDs exhaustively.
123        unsafe { self.invalidate_mapped_memory_ranges(&resource_map) }?;
124
125        // SAFETY: We checked that `resource_map` maps the virtual IDs exhaustively.
126        unsafe { self.create_framebuffers(&resource_map) }?;
127
128        let mut state_guard = StateGuard {
129            executable: self,
130            resource_map: &resource_map,
131            current_fence: &mut current_fence,
132            submission_count: 0,
133        };
134
135        let execute_instructions = if self.device().enabled_features().synchronization2 {
136            Self::execute_instructions2
137        } else {
138            Self::execute_instructions
139        };
140
141        // SAFETY: We checked that `resource_map` maps the virtual IDs exhaustively.
142        unsafe {
143            execute_instructions(
144                self,
145                &resource_map,
146                death_row,
147                current_frame_index,
148                state_guard.current_fence,
149                &mut state_guard.submission_count,
150                world,
151            )
152        }?;
153
154        mem::forget(state_guard);
155
156        for semaphore in self.semaphores.borrow().iter() {
157            death_row.push(semaphore.clone());
158        }
159
160        unsafe { flight.next_frame() };
161
162        pre_present_notify();
163
164        // SAFETY: We checked that `resource_map` maps the virtual IDs exhaustively.
165        let res = unsafe { self.present_images_khr(&resource_map, current_frame_index) };
166
167        // SAFETY: We checked that `resource_map` maps the virtual IDs exhaustively.
168        unsafe { self.update_resource_state(&resource_map, &self.last_accesses) };
169
170        resource_map
171            .physical_resources
172            .try_advance_global_and_collect(&resource_map.guard);
173
174        res
175    }
176
177    unsafe fn acquire_images_khr(
178        &self,
179        resource_map: &ResourceMap<'_>,
180        current_frame_index: u32,
181    ) -> Result {
182        for &swapchain_id in &self.swapchains {
183            // SAFETY: The caller must ensure that `resource_map` maps the virtual IDs exhaustively.
184            let swapchain_state = unsafe { resource_map.swapchain_unchecked(swapchain_id) };
185            let semaphore =
186                &swapchain_state.semaphores[current_frame_index as usize].image_available_semaphore;
187
188            // Make sure to not acquire another image index if we already acquired one. This can
189            // happen when using multiple swapchains, if one acquire succeeds and another fails, or
190            // when executing a submission or presenting an image fails.
191            if swapchain_state.current_image_index.load(Ordering::Relaxed) != u32::MAX {
192                continue;
193            }
194
195            let res = unsafe {
196                swapchain_state
197                    .swapchain()
198                    .acquire_next_image(&AcquireNextImageInfo {
199                        semaphore: Some(semaphore.clone()),
200                        ..Default::default()
201                    })
202            };
203
204            match res {
205                Ok(AcquiredImage { image_index, .. }) => {
206                    swapchain_state
207                        .current_image_index
208                        .store(image_index, Ordering::Relaxed);
209                }
210                Err(error) => {
211                    swapchain_state
212                        .current_image_index
213                        .store(u32::MAX, Ordering::Relaxed);
214                    return Err(ExecuteError::Swapchain {
215                        swapchain_id,
216                        error,
217                    });
218                }
219            }
220        }
221
222        Ok(())
223    }
224
225    unsafe fn invalidate_mapped_memory_ranges(&self, resource_map: &ResourceMap<'_>) -> Result {
226        let mut mapped_memory_ranges = Vec::new();
227
228        for &buffer_id in &self.graph.resources.host_reads {
229            // SAFETY: The caller must ensure that `resource_map` maps the virtual IDs exhaustively.
230            let buffer = unsafe { resource_map.buffer_unchecked(buffer_id) }.buffer();
231
232            let allocation = match buffer.memory() {
233                BufferMemory::Normal(a) => a,
234                BufferMemory::Sparse => todo!("`TaskGraph` doesn't support sparse binding yet"),
235                BufferMemory::External => continue,
236                _ => unreachable!(),
237            };
238
239            if allocation.atom_size().is_none() {
240                continue;
241            }
242
243            if unsafe { allocation.mapped_slice_unchecked(..) }.is_err() {
244                continue;
245            }
246
247            // This works because the memory allocator must align allocations to the non-coherent
248            // atom size when the memory is host-visible but not host-coherent.
249            mapped_memory_ranges.push(
250                vk::MappedMemoryRange::default()
251                    .memory(allocation.device_memory().handle())
252                    .offset(allocation.offset())
253                    .size(allocation.size()),
254            );
255        }
256
257        if !mapped_memory_ranges.is_empty() {
258            let fns = self.device().fns();
259            unsafe {
260                (fns.v1_0.invalidate_mapped_memory_ranges)(
261                    self.device().handle(),
262                    mapped_memory_ranges.len() as u32,
263                    mapped_memory_ranges.as_ptr(),
264                )
265            }
266            .result()
267            .map_err(VulkanError::from)?;
268        }
269
270        Ok(())
271    }
272
273    unsafe fn create_framebuffers(&self, resource_map: &ResourceMap<'_>) -> Result {
274        for render_pass_state in self.render_passes.borrow_mut().iter_mut() {
275            // If we're executing this task graph for the first time, there are no framebuffers
276            // yet, so we need to create them.
277            if render_pass_state.framebuffers.is_empty() {
278                unsafe {
279                    create_framebuffers(
280                        &render_pass_state.render_pass,
281                        &render_pass_state.attachments,
282                        &mut render_pass_state.framebuffers,
283                        &self.swapchains,
284                        resource_map,
285                    )
286                }?;
287                continue;
288            }
289
290            // Otherwise we need to recreate the framebuffers for a given render pass if any of its
291            // attachments changed.
292            for (&id, attachment) in render_pass_state
293                .attachments
294                .keys()
295                .zip(render_pass_state.framebuffers[0].attachments())
296            {
297                let image = match id.object_type() {
298                    ObjectType::Image => {
299                        let image_id = unsafe { id.parametrize() };
300
301                        unsafe { resource_map.image_unchecked(image_id) }.image()
302                    }
303                    ObjectType::Swapchain => {
304                        let swapchain_id = unsafe { id.parametrize() };
305
306                        &unsafe { resource_map.swapchain_unchecked(swapchain_id) }.images()[0]
307                    }
308                    _ => unreachable!(),
309                };
310
311                if attachment.image() != image {
312                    unsafe {
313                        create_framebuffers(
314                            &render_pass_state.render_pass,
315                            &render_pass_state.attachments,
316                            &mut render_pass_state.framebuffers,
317                            &self.swapchains,
318                            resource_map,
319                        )
320                    }?;
321                    break;
322                }
323            }
324        }
325
326        Ok(())
327    }
328
329    unsafe fn execute_instructions2(
330        &self,
331        resource_map: &ResourceMap<'_>,
332        death_row: &mut DeathRow,
333        current_frame_index: u32,
334        current_fence: &Fence,
335        submission_count: &mut usize,
336        world: &W,
337    ) -> Result {
338        let mut state = ExecuteState2::new(
339            self,
340            resource_map,
341            death_row,
342            current_frame_index,
343            current_fence,
344            submission_count,
345            world,
346        )?;
347        let mut execute_initial_barriers = true;
348
349        for instruction in self.instructions.iter().cloned() {
350            if execute_initial_barriers {
351                let submission = current_submission!(state);
352                state.initial_pipeline_barrier(submission.initial_barrier_range.clone());
353                execute_initial_barriers = false;
354            }
355
356            match instruction {
357                Instruction::WaitAcquire {
358                    swapchain_id,
359                    stage_mask,
360                } => {
361                    state.wait_acquire(swapchain_id, stage_mask);
362                }
363                Instruction::WaitSemaphore {
364                    semaphore_index,
365                    stage_mask,
366                } => {
367                    state.wait_semaphore(semaphore_index, stage_mask);
368                }
369                Instruction::ExecuteTask { node_index } => {
370                    state.execute_task(node_index)?;
371                }
372                Instruction::PipelineBarrier { barrier_range } => {
373                    state.pipeline_barrier(barrier_range)?;
374                }
375                Instruction::BeginRenderPass { render_pass_index } => {
376                    state.begin_render_pass(render_pass_index)?;
377                }
378                Instruction::NextSubpass => {
379                    state.next_subpass();
380                }
381                Instruction::EndRenderPass => {
382                    state.end_render_pass();
383                }
384                Instruction::ClearAttachments {
385                    node_index,
386                    render_pass_index,
387                    clear_attachment_range,
388                } => {
389                    state.clear_attachments(
390                        node_index,
391                        render_pass_index,
392                        clear_attachment_range,
393                    )?;
394                }
395                Instruction::SignalSemaphore {
396                    semaphore_index,
397                    stage_mask,
398                } => {
399                    state.signal_semaphore(semaphore_index, stage_mask);
400                }
401                Instruction::SignalPrePresent {
402                    swapchain_id,
403                    stage_mask,
404                } => {
405                    state.signal_pre_present(swapchain_id, stage_mask);
406                }
407                Instruction::WaitPrePresent {
408                    swapchain_id,
409                    stage_mask,
410                } => {
411                    state.wait_pre_present(swapchain_id, stage_mask);
412                }
413                Instruction::SignalPresent {
414                    swapchain_id,
415                    stage_mask,
416                } => {
417                    state.signal_present(swapchain_id, stage_mask);
418                }
419                Instruction::FlushSubmit => {
420                    state.flush_submit()?;
421                }
422                Instruction::Submit => {
423                    state.submit()?;
424                    execute_initial_barriers = true;
425                }
426            }
427        }
428
429        Ok(())
430    }
431
432    unsafe fn execute_instructions(
433        &self,
434        resource_map: &ResourceMap<'_>,
435        death_row: &mut DeathRow,
436        current_frame_index: u32,
437        current_fence: &Fence,
438        submission_count: &mut usize,
439        world: &W,
440    ) -> Result {
441        let mut state = ExecuteState::new(
442            self,
443            resource_map,
444            death_row,
445            current_frame_index,
446            current_fence,
447            submission_count,
448            world,
449        )?;
450        let mut execute_initial_barriers = true;
451
452        for instruction in self.instructions.iter().cloned() {
453            if execute_initial_barriers {
454                let submission = current_submission!(state);
455                state.initial_pipeline_barrier(submission.initial_barrier_range.clone());
456                execute_initial_barriers = false;
457            }
458
459            match instruction {
460                Instruction::WaitAcquire {
461                    swapchain_id,
462                    stage_mask,
463                } => {
464                    state.wait_acquire(swapchain_id, stage_mask);
465                }
466                Instruction::WaitSemaphore {
467                    semaphore_index,
468                    stage_mask,
469                } => {
470                    state.wait_semaphore(semaphore_index, stage_mask);
471                }
472                Instruction::ExecuteTask { node_index } => {
473                    state.execute_task(node_index)?;
474                }
475                Instruction::PipelineBarrier { barrier_range } => {
476                    state.pipeline_barrier(barrier_range)?;
477                }
478                Instruction::BeginRenderPass { render_pass_index } => {
479                    state.begin_render_pass(render_pass_index)?;
480                }
481                Instruction::NextSubpass => {
482                    state.next_subpass();
483                }
484                Instruction::EndRenderPass => {
485                    state.end_render_pass();
486                }
487                Instruction::ClearAttachments {
488                    node_index,
489                    render_pass_index,
490                    clear_attachment_range,
491                } => {
492                    state.clear_attachments(
493                        node_index,
494                        render_pass_index,
495                        clear_attachment_range,
496                    )?;
497                }
498                Instruction::SignalSemaphore {
499                    semaphore_index,
500                    stage_mask,
501                } => {
502                    state.signal_semaphore(semaphore_index, stage_mask);
503                }
504                Instruction::SignalPrePresent {
505                    swapchain_id,
506                    stage_mask,
507                } => {
508                    state.signal_pre_present(swapchain_id, stage_mask);
509                }
510                Instruction::WaitPrePresent {
511                    swapchain_id,
512                    stage_mask,
513                } => {
514                    state.wait_pre_present(swapchain_id, stage_mask);
515                }
516                Instruction::SignalPresent {
517                    swapchain_id,
518                    stage_mask,
519                } => {
520                    state.signal_present(swapchain_id, stage_mask);
521                }
522                Instruction::FlushSubmit => {
523                    state.flush_submit()?;
524                }
525                Instruction::Submit => {
526                    state.submit()?;
527                    execute_initial_barriers = true;
528                }
529            }
530        }
531
532        Ok(())
533    }
534
535    unsafe fn flush_mapped_memory_ranges(&self, resource_map: &ResourceMap<'_>) -> Result {
536        let mut mapped_memory_ranges = Vec::new();
537
538        for &buffer_id in &self.graph.resources.host_writes {
539            // SAFETY: The caller must ensure that `resource_map` maps the virtual IDs exhaustively.
540            let buffer = unsafe { resource_map.buffer_unchecked(buffer_id) }.buffer();
541
542            let allocation = match buffer.memory() {
543                BufferMemory::Normal(a) => a,
544                BufferMemory::Sparse => todo!("`TaskGraph` doesn't support sparse binding yet"),
545                BufferMemory::External => continue,
546                _ => unreachable!(),
547            };
548
549            if allocation.atom_size().is_none() {
550                continue;
551            }
552
553            if unsafe { allocation.mapped_slice_unchecked(..) }.is_err() {
554                continue;
555            }
556
557            // This works because the memory allocator must align allocations to the non-coherent
558            // atom size when the memory is host-visible but not host-coherent.
559            mapped_memory_ranges.push(
560                vk::MappedMemoryRange::default()
561                    .memory(allocation.device_memory().handle())
562                    .offset(allocation.offset())
563                    .size(allocation.size()),
564            );
565        }
566
567        if !mapped_memory_ranges.is_empty() {
568            let fns = self.device().fns();
569            unsafe {
570                (fns.v1_0.flush_mapped_memory_ranges)(
571                    self.device().handle(),
572                    mapped_memory_ranges.len() as u32,
573                    mapped_memory_ranges.as_ptr(),
574                )
575            }
576            .result()
577            .map_err(VulkanError::from)?;
578        }
579
580        Ok(())
581    }
582
583    unsafe fn present_images_khr(
584        &self,
585        resource_map: &ResourceMap<'_>,
586        current_frame_index: u32,
587    ) -> Result {
588        let Some(present_queue) = &self.present_queue else {
589            return Ok(());
590        };
591
592        let swapchain_count = self.swapchains.len();
593        let mut semaphores = SmallVec::<[_; 1]>::with_capacity(swapchain_count);
594        let mut swapchains = SmallVec::<[_; 1]>::with_capacity(swapchain_count);
595        let mut image_indices = SmallVec::<[_; 1]>::with_capacity(swapchain_count);
596        let mut results = SmallVec::<[_; 1]>::with_capacity(swapchain_count);
597
598        for &swapchain_id in &self.swapchains {
599            // SAFETY: The caller must ensure that `resource_map` maps the virtual IDs exhaustively.
600            let swapchain_state = unsafe { resource_map.swapchain_unchecked(swapchain_id) };
601            semaphores.push(
602                swapchain_state.semaphores[current_frame_index as usize]
603                    .tasks_complete_semaphore
604                    .handle(),
605            );
606            swapchains.push(swapchain_state.swapchain().handle());
607            image_indices.push(swapchain_state.current_image_index().unwrap());
608            results.push(vk::Result::SUCCESS);
609        }
610
611        let present_info = vk::PresentInfoKHR::default()
612            .wait_semaphores(&semaphores)
613            .swapchains(&swapchains)
614            .image_indices(&image_indices)
615            .results(&mut results);
616
617        let fns = self.device().fns();
618        let queue_present_khr = fns.khr_swapchain.queue_present_khr;
619        let _ = unsafe { queue_present_khr(present_queue.handle(), &present_info) };
620
621        let mut res = Ok(());
622
623        for (&result, &swapchain_id) in results.iter().zip(&self.swapchains) {
624            // SAFETY: The caller must ensure that `resource_map` maps the virtual IDs exhaustively.
625            let swapchain_state = unsafe { resource_map.swapchain_unchecked(swapchain_id) };
626
627            // TODO: Could there be a use case for keeping the old image contents?
628            unsafe { swapchain_state.set_access(ImageAccess::NONE) };
629
630            // In case of these error codes, the semaphore wait operation is not executed.
631            if !matches!(
632                result,
633                vk::Result::ERROR_OUT_OF_HOST_MEMORY
634                    | vk::Result::ERROR_OUT_OF_DEVICE_MEMORY
635                    | vk::Result::ERROR_DEVICE_LOST
636            ) {
637                swapchain_state
638                    .current_image_index
639                    .store(u32::MAX, Ordering::Relaxed);
640            }
641
642            if !matches!(result, vk::Result::SUCCESS | vk::Result::SUBOPTIMAL_KHR) {
643                // Return the first error for consistency with the acquisition logic.
644                if res.is_ok() {
645                    res = Err(ExecuteError::Swapchain {
646                        swapchain_id,
647                        error: Validated::Error(result.into()),
648                    });
649                }
650            }
651        }
652
653        res
654    }
655
656    unsafe fn update_resource_state(
657        &self,
658        resource_map: &ResourceMap<'_>,
659        last_accesses: &[ResourceAccess],
660    ) {
661        for (id, _) in self.graph.resources.iter() {
662            let access = last_accesses[id.index() as usize];
663
664            match id.object_type() {
665                ObjectType::Buffer => {
666                    let id = unsafe { id.parametrize() };
667                    // SAFETY: The caller must ensure that `resource_map` maps the virtual IDs
668                    // exhaustively.
669                    let state = unsafe { resource_map.buffer_unchecked(id) };
670                    let access = BufferAccess::from_masks(
671                        access.stage_mask,
672                        access.access_mask,
673                        access.queue_family_index,
674                    );
675                    unsafe { state.set_access(access) };
676                }
677                ObjectType::Image => {
678                    let id = unsafe { id.parametrize() };
679                    // SAFETY: The caller must ensure that `resource_map` maps the virtual IDs
680                    // exhaustively.
681                    let state = unsafe { resource_map.image_unchecked(id) };
682                    let access = ImageAccess::from_masks(
683                        access.stage_mask,
684                        access.access_mask,
685                        access.image_layout,
686                        access.queue_family_index,
687                    );
688                    unsafe { state.set_access(access) };
689                }
690                ObjectType::Swapchain => {
691                    let id = unsafe { id.parametrize() };
692                    // SAFETY: The caller must ensure that `resource_map` maps the virtual IDs
693                    // exhaustively.
694                    let state = unsafe { resource_map.swapchain_unchecked(id) };
695                    let access = ImageAccess::from_masks(
696                        access.stage_mask,
697                        access.access_mask,
698                        access.image_layout,
699                        access.queue_family_index,
700                    );
701                    unsafe { state.set_access(access) };
702                }
703                _ => unreachable!(),
704            }
705        }
706    }
707}
708
709unsafe fn create_framebuffers(
710    render_pass: &Arc<RenderPass>,
711    attachments: &LinearMap<Id, super::AttachmentState>,
712    framebuffers: &mut Vec<Arc<Framebuffer>>,
713    swapchains: &[Id<Swapchain>],
714    resource_map: &ResourceMap<'_>,
715) -> Result<()> {
716    let swapchain_image_counts = swapchains
717        .iter()
718        .map(|&id| {
719            let swapchain_state = unsafe { resource_map.swapchain_unchecked(id) };
720
721            swapchain_state.swapchain().image_count()
722        })
723        .collect::<SmallVec<[_; 1]>>();
724    let mut swapchain_image_indices: SmallVec<[u32; 1]> =
725        smallvec![0; swapchain_image_counts.len()];
726
727    framebuffers.clear();
728    framebuffers.reserve_exact(swapchain_image_counts.iter().product::<u32>() as usize);
729
730    'outer: loop {
731        let framebuffer = Framebuffer::new(
732            render_pass.clone(),
733            FramebufferCreateInfo {
734                attachments: attachments
735                    .iter()
736                    .map(|(&id, attachment)| {
737                        let image = match id.object_type() {
738                            ObjectType::Image => {
739                                let image_id = unsafe { id.parametrize() };
740
741                                unsafe { resource_map.image_unchecked(image_id) }.image()
742                            }
743                            ObjectType::Swapchain => {
744                                let swapchain_id = unsafe { id.parametrize() };
745                                let swapchain_state =
746                                    unsafe { resource_map.swapchain_unchecked(swapchain_id) };
747                                let i = swapchains.iter().position(|x| x.erase() == id).unwrap();
748                                let image_index = swapchain_image_indices[i];
749
750                                &swapchain_state.images()[image_index as usize]
751                            }
752                            _ => unreachable!(),
753                        };
754
755                        ImageView::new(
756                            image.clone(),
757                            ImageViewCreateInfo {
758                                format: attachment.format,
759                                component_mapping: attachment.component_mapping,
760                                subresource_range: ImageSubresourceRange {
761                                    aspects: attachment.format.aspects(),
762                                    mip_levels: attachment.mip_level..attachment.mip_level + 1,
763                                    // FIXME:
764                                    array_layers: attachment.base_array_layer
765                                        ..attachment.base_array_layer + 1,
766                                },
767                                ..Default::default()
768                            },
769                        )
770                        // FIXME:
771                        .map_err(Validated::unwrap)
772                    })
773                    .collect::<Result<_, _>>()?,
774                ..Default::default()
775            },
776        )
777        // FIXME:
778        .map_err(Validated::unwrap)?;
779
780        framebuffers.push(framebuffer);
781
782        let mut i = 0;
783
784        loop {
785            let Some(image_index) = swapchain_image_indices.get_mut(i) else {
786                break 'outer;
787            };
788
789            *image_index += 1;
790
791            if *image_index == swapchain_image_counts[i] {
792                *image_index = 0;
793                i += 1;
794            } else {
795                break;
796            }
797        }
798    }
799
800    Ok(())
801}
802
803struct ExecuteState2<'a, W: ?Sized + 'static> {
804    executable: &'a ExecutableTaskGraph<W>,
805    resource_map: &'a ResourceMap<'a>,
806    death_row: &'a mut DeathRow,
807    current_frame_index: u32,
808    current_fence: &'a Fence,
809    submission_count: &'a mut usize,
810    world: &'a W,
811    cmd_pipeline_barrier2: vk::PFN_vkCmdPipelineBarrier2,
812    queue_submit2: vk::PFN_vkQueueSubmit2,
813    per_submits: SmallVec<[PerSubmitInfo2; 4]>,
814    current_per_submit: PerSubmitInfo2,
815    current_command_buffer: Option<raw::RecordingCommandBuffer>,
816    command_buffers: Vec<Arc<raw::CommandBuffer>>,
817    current_buffer_barriers_vk: Vec<vk::BufferMemoryBarrier2<'static>>,
818    current_image_barriers_vk: Vec<vk::ImageMemoryBarrier2<'static>>,
819    clear_values: LinearMap<Id, Option<ClearValue>>,
820    clear_values_vk: Vec<vk::ClearValue>,
821    clear_attachments_vk: Vec<vk::ClearAttachment>,
822}
823
824#[derive(Default)]
825struct PerSubmitInfo2 {
826    wait_semaphore_infos_vk: SmallVec<[vk::SemaphoreSubmitInfo<'static>; 4]>,
827    command_buffer_infos_vk: SmallVec<[vk::CommandBufferSubmitInfo<'static>; 1]>,
828    signal_semaphore_infos_vk: SmallVec<[vk::SemaphoreSubmitInfo<'static>; 4]>,
829}
830
831impl<'a, W: ?Sized + 'static> ExecuteState2<'a, W> {
832    fn new(
833        executable: &'a ExecutableTaskGraph<W>,
834        resource_map: &'a ResourceMap<'a>,
835        death_row: &'a mut DeathRow,
836        current_frame_index: u32,
837        current_fence: &'a Fence,
838        submission_count: &'a mut usize,
839        world: &'a W,
840    ) -> Result<Self> {
841        let fns = executable.device().fns();
842        let (cmd_pipeline_barrier2, queue_submit2);
843
844        if executable.device().api_version() >= Version::V1_3 {
845            cmd_pipeline_barrier2 = fns.v1_3.cmd_pipeline_barrier2;
846            queue_submit2 = fns.v1_3.queue_submit2;
847        } else {
848            cmd_pipeline_barrier2 = fns.khr_synchronization2.cmd_pipeline_barrier2_khr;
849            queue_submit2 = fns.khr_synchronization2.queue_submit2_khr;
850        }
851
852        Ok(ExecuteState2 {
853            executable,
854            resource_map,
855            death_row,
856            current_frame_index,
857            current_fence,
858            submission_count,
859            world,
860            cmd_pipeline_barrier2,
861            queue_submit2,
862            per_submits: SmallVec::new(),
863            current_per_submit: PerSubmitInfo2::default(),
864            current_command_buffer: None,
865            command_buffers: Vec::new(),
866            current_buffer_barriers_vk: Vec::new(),
867            current_image_barriers_vk: Vec::new(),
868            clear_values: LinearMap::new(),
869            clear_values_vk: Vec::new(),
870            clear_attachments_vk: Vec::new(),
871        })
872    }
873
874    fn initial_pipeline_barrier(&mut self, barrier_range: Range<BarrierIndex>) {
875        self.convert_initial_barriers(barrier_range);
876    }
877
878    fn convert_initial_barriers(&mut self, barrier_range: Range<BarrierIndex>) {
879        let barrier_range = barrier_range.start as usize..barrier_range.end as usize;
880        let queue_family_index = current_submission!(self).queue.queue_family_index();
881
882        for barrier in &self.executable.barriers[barrier_range] {
883            match barrier.resource.object_type() {
884                ObjectType::Buffer => {
885                    let buffer_id = unsafe { barrier.resource.parametrize() };
886                    let state = unsafe { self.resource_map.buffer_unchecked(buffer_id) };
887                    let buffer = state.buffer();
888                    let access = state.access();
889
890                    let mut src_stage_mask = PipelineStages::empty();
891                    let mut src_access_mask = AccessFlags::empty();
892                    let dst_stage_mask = barrier.dst_stage_mask;
893                    let mut dst_access_mask = barrier.dst_access_mask;
894
895                    if access.queue_family_index() == queue_family_index {
896                        src_stage_mask = access.stage_mask();
897                        src_access_mask = access.access_mask();
898                    }
899
900                    if src_access_mask.contains_writes() && dst_access_mask.contains_reads() {
901                    } else if dst_access_mask.contains_writes() {
902                        src_access_mask = AccessFlags::empty();
903                        dst_access_mask = AccessFlags::empty();
904                    } else {
905                        continue;
906                    }
907
908                    self.current_buffer_barriers_vk.push(
909                        vk::BufferMemoryBarrier2::default()
910                            .src_stage_mask(src_stage_mask.into())
911                            .src_access_mask(src_access_mask.into())
912                            .dst_stage_mask(dst_stage_mask.into())
913                            .dst_access_mask(dst_access_mask.into())
914                            // FIXME:
915                            .src_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
916                            .dst_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
917                            .buffer(buffer.handle())
918                            .offset(0)
919                            .size(buffer.size()),
920                    );
921                }
922                ObjectType::Image => {
923                    let image_id = unsafe { barrier.resource.parametrize() };
924                    let state = unsafe { self.resource_map.image_unchecked(image_id) };
925                    let image = state.image();
926                    let access = state.access();
927
928                    let mut src_stage_mask = PipelineStages::empty();
929                    let mut src_access_mask = AccessFlags::empty();
930                    let dst_stage_mask = barrier.dst_stage_mask;
931                    let mut dst_access_mask = barrier.dst_access_mask;
932
933                    if access.queue_family_index() == queue_family_index {
934                        src_stage_mask = access.stage_mask();
935                        src_access_mask = access.access_mask();
936                    }
937
938                    #[allow(clippy::if_same_then_else)]
939                    if access.image_layout() != barrier.new_layout {
940                    } else if src_access_mask.contains_writes() && dst_access_mask.contains_reads()
941                    {
942                    } else if dst_access_mask.contains_writes() {
943                        src_access_mask = AccessFlags::empty();
944                        dst_access_mask = AccessFlags::empty();
945                    } else {
946                        continue;
947                    }
948
949                    self.current_image_barriers_vk.push(
950                        vk::ImageMemoryBarrier2::default()
951                            .src_stage_mask(src_stage_mask.into())
952                            .src_access_mask(src_access_mask.into())
953                            .dst_stage_mask(dst_stage_mask.into())
954                            .dst_access_mask(dst_access_mask.into())
955                            .old_layout(access.image_layout().into())
956                            .new_layout(barrier.new_layout.into())
957                            // FIXME:
958                            .src_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
959                            .dst_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
960                            .image(image.handle())
961                            .subresource_range(image.subresource_range().to_vk()),
962                    );
963                }
964                ObjectType::Swapchain => {
965                    let swapchain_id = unsafe { barrier.resource.parametrize() };
966                    let state = unsafe { self.resource_map.swapchain_unchecked(swapchain_id) };
967                    let image = state.current_image();
968                    let access = state.access();
969
970                    let mut src_stage_mask = PipelineStages::empty();
971                    let mut src_access_mask = AccessFlags::empty();
972                    let dst_stage_mask = barrier.dst_stage_mask;
973                    let mut dst_access_mask = barrier.dst_access_mask;
974
975                    if access.queue_family_index() == queue_family_index {
976                        src_stage_mask = access.stage_mask();
977                        src_access_mask = access.access_mask();
978                    }
979
980                    #[allow(clippy::if_same_then_else)]
981                    if access.image_layout() != barrier.new_layout {
982                    } else if src_access_mask.contains_writes() && dst_access_mask.contains_reads()
983                    {
984                    } else if dst_access_mask.contains_writes() {
985                        src_access_mask = AccessFlags::empty();
986                        dst_access_mask = AccessFlags::empty();
987                    } else {
988                        continue;
989                    }
990
991                    self.current_image_barriers_vk.push(
992                        vk::ImageMemoryBarrier2::default()
993                            .src_stage_mask(src_stage_mask.into())
994                            .src_access_mask(src_access_mask.into())
995                            .dst_stage_mask(dst_stage_mask.into())
996                            .dst_access_mask(dst_access_mask.into())
997                            .old_layout(access.image_layout().into())
998                            .new_layout(barrier.new_layout.into())
999                            // FIXME:
1000                            .src_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
1001                            .dst_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
1002                            .image(image.handle())
1003                            .subresource_range(image.subresource_range().to_vk()),
1004                    );
1005                }
1006                _ => unreachable!(),
1007            }
1008        }
1009    }
1010
1011    fn wait_acquire(&mut self, swapchain_id: Id<Swapchain>, stage_mask: PipelineStages) {
1012        let swapchain_state = unsafe { self.resource_map.swapchain_unchecked(swapchain_id) };
1013        let semaphore = &swapchain_state.semaphores[self.current_frame_index as usize]
1014            .image_available_semaphore;
1015
1016        self.current_per_submit.wait_semaphore_infos_vk.push(
1017            vk::SemaphoreSubmitInfo::default()
1018                .semaphore(semaphore.handle())
1019                .stage_mask(stage_mask.into()),
1020        );
1021    }
1022
1023    fn wait_semaphore(&mut self, semaphore_index: SemaphoreIndex, stage_mask: PipelineStages) {
1024        self.current_per_submit.wait_semaphore_infos_vk.push(
1025            vk::SemaphoreSubmitInfo::default()
1026                .semaphore(self.executable.semaphores.borrow()[semaphore_index].handle())
1027                .stage_mask(stage_mask.into()),
1028        );
1029    }
1030
1031    fn execute_task(&mut self, node_index: NodeIndex) -> Result {
1032        if !self.current_buffer_barriers_vk.is_empty() || !self.current_image_barriers_vk.is_empty()
1033        {
1034            self.flush_barriers()?;
1035        }
1036
1037        let task_node = unsafe { self.executable.graph.nodes.task_node_unchecked(node_index) };
1038        let task = &task_node.task;
1039        let mut current_command_buffer = unsafe {
1040            RecordingCommandBuffer::new(
1041                current_command_buffer!(self),
1042                self.resource_map,
1043                self.death_row,
1044            )
1045        };
1046        let mut context = TaskContext {
1047            resource_map: self.resource_map,
1048            current_frame_index: self.current_frame_index,
1049            command_buffers: &mut self.command_buffers,
1050        };
1051
1052        unsafe { task.execute(&mut current_command_buffer, &mut context, self.world) }
1053            .map_err(|error| ExecuteError::Task { node_index, error })?;
1054
1055        if !self.command_buffers.is_empty() {
1056            unsafe { self.flush_current_command_buffer() }?;
1057
1058            for command_buffer in self.command_buffers.drain(..) {
1059                self.current_per_submit.command_buffer_infos_vk.push(
1060                    vk::CommandBufferSubmitInfo::default().command_buffer(command_buffer.handle()),
1061                );
1062                self.death_row.push(command_buffer);
1063            }
1064        }
1065
1066        Ok(())
1067    }
1068
1069    fn pipeline_barrier(&mut self, barrier_range: Range<BarrierIndex>) -> Result {
1070        self.convert_barriers(barrier_range);
1071
1072        self.flush_barriers()
1073    }
1074
1075    fn convert_barriers(&mut self, barrier_range: Range<BarrierIndex>) {
1076        let barrier_range = barrier_range.start as usize..barrier_range.end as usize;
1077
1078        for barrier in &self.executable.barriers[barrier_range] {
1079            match barrier.resource.object_type() {
1080                ObjectType::Buffer => {
1081                    let buffer_id = unsafe { barrier.resource.parametrize() };
1082                    let state = unsafe { self.resource_map.buffer_unchecked(buffer_id) };
1083                    let buffer = state.buffer();
1084
1085                    self.current_buffer_barriers_vk.push(
1086                        vk::BufferMemoryBarrier2::default()
1087                            .src_stage_mask(barrier.src_stage_mask.into())
1088                            .src_access_mask(barrier.src_access_mask.into())
1089                            .dst_stage_mask(barrier.dst_stage_mask.into())
1090                            .dst_access_mask(barrier.dst_access_mask.into())
1091                            .src_queue_family_index(barrier.src_queue_family_index)
1092                            .dst_queue_family_index(barrier.dst_queue_family_index)
1093                            .buffer(buffer.handle())
1094                            .offset(0)
1095                            .size(buffer.size()),
1096                    );
1097                }
1098                ObjectType::Image => {
1099                    let image_id = unsafe { barrier.resource.parametrize() };
1100                    let image = unsafe { self.resource_map.image_unchecked(image_id) }.image();
1101
1102                    self.current_image_barriers_vk.push(
1103                        vk::ImageMemoryBarrier2::default()
1104                            .src_stage_mask(barrier.src_stage_mask.into())
1105                            .src_access_mask(barrier.src_access_mask.into())
1106                            .dst_stage_mask(barrier.dst_stage_mask.into())
1107                            .dst_access_mask(barrier.dst_access_mask.into())
1108                            .old_layout(barrier.old_layout.into())
1109                            .new_layout(barrier.new_layout.into())
1110                            .src_queue_family_index(barrier.src_queue_family_index)
1111                            .dst_queue_family_index(barrier.dst_queue_family_index)
1112                            .image(image.handle())
1113                            .subresource_range(image.subresource_range().to_vk()),
1114                    );
1115                }
1116                ObjectType::Swapchain => {
1117                    let swapchain_id = unsafe { barrier.resource.parametrize() };
1118                    let image = unsafe { self.resource_map.swapchain_unchecked(swapchain_id) }
1119                        .current_image();
1120
1121                    self.current_image_barriers_vk.push(
1122                        vk::ImageMemoryBarrier2::default()
1123                            .src_stage_mask(barrier.src_stage_mask.into())
1124                            .src_access_mask(barrier.src_access_mask.into())
1125                            .dst_stage_mask(barrier.dst_stage_mask.into())
1126                            .dst_access_mask(barrier.dst_access_mask.into())
1127                            .old_layout(barrier.old_layout.into())
1128                            .new_layout(barrier.new_layout.into())
1129                            .src_queue_family_index(barrier.src_queue_family_index)
1130                            .dst_queue_family_index(barrier.dst_queue_family_index)
1131                            .image(image.handle())
1132                            .subresource_range(image.subresource_range().to_vk()),
1133                    );
1134                }
1135                _ => unreachable!(),
1136            }
1137        }
1138    }
1139
1140    fn begin_render_pass(&mut self, render_pass_index: RenderPassIndex) -> Result {
1141        if !self.current_buffer_barriers_vk.is_empty() || !self.current_image_barriers_vk.is_empty()
1142        {
1143            self.flush_barriers()?;
1144        }
1145
1146        let render_pass_state = &self.executable.render_passes.borrow()[render_pass_index];
1147        let framebuffer = &render_pass_state.framebuffers
1148            [unsafe { framebuffer_index(self.resource_map, &self.executable.swapchains) }];
1149
1150        // FIXME:
1151        let mut render_area_vk = vk::Rect2D::default();
1152        [render_area_vk.extent.width, render_area_vk.extent.height] = framebuffer.extent();
1153
1154        unsafe {
1155            set_clear_values(
1156                &self.executable.graph.nodes,
1157                self.resource_map,
1158                render_pass_state,
1159                &mut self.clear_values,
1160                &mut self.clear_values_vk,
1161            )
1162        };
1163
1164        let render_pass_begin_info_vk = vk::RenderPassBeginInfo::default()
1165            .render_pass(framebuffer.render_pass().handle())
1166            .framebuffer(framebuffer.handle())
1167            .render_area(render_area_vk)
1168            .clear_values(&self.clear_values_vk);
1169
1170        let fns = self.executable.device().fns();
1171        unsafe {
1172            (fns.v1_0.cmd_begin_render_pass)(
1173                current_command_buffer!(self).handle(),
1174                &render_pass_begin_info_vk,
1175                vk::SubpassContents::INLINE,
1176            )
1177        };
1178
1179        self.death_row.push(framebuffer.clone());
1180
1181        Ok(())
1182    }
1183
1184    fn next_subpass(&mut self) {
1185        let fns = self.executable.device().fns();
1186        unsafe {
1187            (fns.v1_0.cmd_next_subpass)(
1188                self.current_command_buffer.as_ref().unwrap().handle(),
1189                vk::SubpassContents::INLINE,
1190            )
1191        };
1192    }
1193
1194    fn end_render_pass(&mut self) {
1195        let fns = self.executable.device().fns();
1196        unsafe {
1197            (fns.v1_0.cmd_end_render_pass)(self.current_command_buffer.as_ref().unwrap().handle())
1198        };
1199    }
1200
1201    fn clear_attachments(
1202        &mut self,
1203        node_index: NodeIndex,
1204        render_pass_index: RenderPassIndex,
1205        clear_attachment_range: Range<ClearAttachmentIndex>,
1206    ) -> Result {
1207        if !self.current_buffer_barriers_vk.is_empty() || !self.current_image_barriers_vk.is_empty()
1208        {
1209            self.flush_barriers()?;
1210        }
1211
1212        let render_pass_state = &self.executable.render_passes.borrow()[render_pass_index];
1213
1214        let attachments = &self.executable.clear_attachments[clear_attachment_range];
1215
1216        unsafe {
1217            set_clear_attachments(
1218                &self.executable.graph.nodes,
1219                self.resource_map,
1220                node_index,
1221                render_pass_state,
1222                attachments,
1223                &mut self.clear_values,
1224                &mut self.clear_attachments_vk,
1225            )
1226        };
1227
1228        Ok(())
1229    }
1230
1231    fn signal_semaphore(&mut self, semaphore_index: SemaphoreIndex, stage_mask: PipelineStages) {
1232        self.current_per_submit.signal_semaphore_infos_vk.push(
1233            vk::SemaphoreSubmitInfo::default()
1234                .semaphore(self.executable.semaphores.borrow()[semaphore_index].handle())
1235                .stage_mask(stage_mask.into()),
1236        );
1237    }
1238
1239    fn signal_pre_present(&mut self, swapchain_id: Id<Swapchain>, stage_mask: PipelineStages) {
1240        let swapchain_state = unsafe { self.resource_map.swapchain_unchecked(swapchain_id) };
1241        let semaphore = &swapchain_state.semaphores[self.current_frame_index as usize]
1242            .pre_present_complete_semaphore;
1243
1244        self.current_per_submit.signal_semaphore_infos_vk.push(
1245            vk::SemaphoreSubmitInfo::default()
1246                .semaphore(semaphore.handle())
1247                .stage_mask(stage_mask.into()),
1248        );
1249    }
1250
1251    fn wait_pre_present(&mut self, swapchain_id: Id<Swapchain>, stage_mask: PipelineStages) {
1252        let swapchain_state = unsafe { self.resource_map.swapchain_unchecked(swapchain_id) };
1253        let semaphore = &swapchain_state.semaphores[self.current_frame_index as usize]
1254            .pre_present_complete_semaphore;
1255
1256        self.current_per_submit.wait_semaphore_infos_vk.push(
1257            vk::SemaphoreSubmitInfo::default()
1258                .semaphore(semaphore.handle())
1259                .stage_mask(stage_mask.into()),
1260        );
1261    }
1262
1263    fn signal_present(&mut self, swapchain_id: Id<Swapchain>, stage_mask: PipelineStages) {
1264        let swapchain_state = unsafe { self.resource_map.swapchain_unchecked(swapchain_id) };
1265        let semaphore =
1266            &swapchain_state.semaphores[self.current_frame_index as usize].tasks_complete_semaphore;
1267
1268        self.current_per_submit.signal_semaphore_infos_vk.push(
1269            vk::SemaphoreSubmitInfo::default()
1270                .semaphore(semaphore.handle())
1271                .stage_mask(stage_mask.into()),
1272        );
1273    }
1274
1275    fn flush_barriers(&mut self) -> Result {
1276        unsafe {
1277            (self.cmd_pipeline_barrier2)(
1278                current_command_buffer!(self).handle(),
1279                &vk::DependencyInfo::default()
1280                    .buffer_memory_barriers(&self.current_buffer_barriers_vk)
1281                    .image_memory_barriers(&self.current_image_barriers_vk),
1282            )
1283        };
1284
1285        self.current_buffer_barriers_vk.clear();
1286        self.current_image_barriers_vk.clear();
1287
1288        Ok(())
1289    }
1290
1291    fn flush_submit(&mut self) -> Result {
1292        unsafe { self.flush_current_command_buffer() }?;
1293
1294        self.per_submits
1295            .push(mem::take(&mut self.current_per_submit));
1296
1297        Ok(())
1298    }
1299
1300    fn submit(&mut self) -> Result {
1301        unsafe {
1302            self.executable
1303                .flush_mapped_memory_ranges(self.resource_map)
1304        }?;
1305
1306        let submission = current_submission!(self);
1307
1308        let mut submit_infos_vk = SmallVec::<[_; 4]>::with_capacity(self.per_submits.len());
1309        submit_infos_vk.extend(self.per_submits.iter().map(|per_submit| {
1310            vk::SubmitInfo2::default()
1311                .wait_semaphore_infos(&per_submit.wait_semaphore_infos_vk)
1312                .command_buffer_infos(&per_submit.command_buffer_infos_vk)
1313                .signal_semaphore_infos(&per_submit.signal_semaphore_infos_vk)
1314        }));
1315
1316        let max_submission_index = self.executable.submissions.len() - 1;
1317        let fence_handle = if *self.submission_count == max_submission_index {
1318            self.current_fence.handle()
1319        } else {
1320            vk::Fence::null()
1321        };
1322
1323        submission.queue.with(|_guard| {
1324            unsafe {
1325                (self.queue_submit2)(
1326                    submission.queue.handle(),
1327                    submit_infos_vk.len() as u32,
1328                    submit_infos_vk.as_ptr(),
1329                    fence_handle,
1330                )
1331            }
1332            .result()
1333            .map_err(VulkanError::from)
1334        })?;
1335
1336        drop(submit_infos_vk);
1337        self.per_submits.clear();
1338
1339        *self.submission_count += 1;
1340
1341        Ok(())
1342    }
1343
1344    unsafe fn flush_current_command_buffer(&mut self) -> Result {
1345        let current_command_buffer = self.current_command_buffer.take().unwrap();
1346        let command_buffer = unsafe { current_command_buffer.end() }?;
1347        self.current_per_submit
1348            .command_buffer_infos_vk
1349            .push(vk::CommandBufferSubmitInfo::default().command_buffer(command_buffer.handle()));
1350        self.death_row.push(Arc::new(command_buffer));
1351
1352        Ok(())
1353    }
1354}
1355
1356struct ExecuteState<'a, W: ?Sized + 'static> {
1357    executable: &'a ExecutableTaskGraph<W>,
1358    resource_map: &'a ResourceMap<'a>,
1359    death_row: &'a mut DeathRow,
1360    current_frame_index: u32,
1361    current_fence: &'a Fence,
1362    submission_count: &'a mut usize,
1363    world: &'a W,
1364    cmd_pipeline_barrier: vk::PFN_vkCmdPipelineBarrier,
1365    queue_submit: vk::PFN_vkQueueSubmit,
1366    per_submits: SmallVec<[PerSubmitInfo; 4]>,
1367    current_per_submit: PerSubmitInfo,
1368    current_command_buffer: Option<raw::RecordingCommandBuffer>,
1369    command_buffers: Vec<Arc<raw::CommandBuffer>>,
1370    current_buffer_barriers_vk: Vec<vk::BufferMemoryBarrier<'static>>,
1371    current_image_barriers_vk: Vec<vk::ImageMemoryBarrier<'static>>,
1372    current_src_stage_mask_vk: vk::PipelineStageFlags,
1373    current_dst_stage_mask_vk: vk::PipelineStageFlags,
1374    clear_values: LinearMap<Id, Option<ClearValue>>,
1375    clear_values_vk: Vec<vk::ClearValue>,
1376    clear_attachments_vk: Vec<vk::ClearAttachment>,
1377}
1378
1379#[derive(Default)]
1380struct PerSubmitInfo {
1381    wait_semaphores_vk: SmallVec<[vk::Semaphore; 4]>,
1382    wait_dst_stage_mask_vk: SmallVec<[vk::PipelineStageFlags; 4]>,
1383    command_buffers_vk: SmallVec<[vk::CommandBuffer; 1]>,
1384    signal_semaphores_vk: SmallVec<[vk::Semaphore; 4]>,
1385}
1386
1387impl<'a, W: ?Sized + 'static> ExecuteState<'a, W> {
1388    fn new(
1389        executable: &'a ExecutableTaskGraph<W>,
1390        resource_map: &'a ResourceMap<'a>,
1391        death_row: &'a mut DeathRow,
1392        current_frame_index: u32,
1393        current_fence: &'a Fence,
1394        submission_count: &'a mut usize,
1395        world: &'a W,
1396    ) -> Result<Self> {
1397        let fns = executable.device().fns();
1398        let cmd_pipeline_barrier = fns.v1_0.cmd_pipeline_barrier;
1399        let queue_submit = fns.v1_0.queue_submit;
1400
1401        Ok(ExecuteState {
1402            executable,
1403            resource_map,
1404            death_row,
1405            current_frame_index,
1406            current_fence,
1407            submission_count,
1408            world,
1409            cmd_pipeline_barrier,
1410            queue_submit,
1411            per_submits: SmallVec::new(),
1412            current_per_submit: PerSubmitInfo::default(),
1413            current_command_buffer: None,
1414            command_buffers: Vec::new(),
1415            current_buffer_barriers_vk: Vec::new(),
1416            current_image_barriers_vk: Vec::new(),
1417            current_src_stage_mask_vk: vk::PipelineStageFlags::empty(),
1418            current_dst_stage_mask_vk: vk::PipelineStageFlags::empty(),
1419            clear_values: LinearMap::new(),
1420            clear_values_vk: Vec::new(),
1421            clear_attachments_vk: Vec::new(),
1422        })
1423    }
1424
1425    fn initial_pipeline_barrier(&mut self, barrier_range: Range<BarrierIndex>) {
1426        self.convert_initial_barriers(barrier_range);
1427    }
1428
1429    fn convert_initial_barriers(&mut self, barrier_range: Range<BarrierIndex>) {
1430        let barrier_range = barrier_range.start as usize..barrier_range.end as usize;
1431        let queue_family_index = current_submission!(self).queue.queue_family_index();
1432
1433        for barrier in &self.executable.barriers[barrier_range] {
1434            match barrier.resource.object_type() {
1435                ObjectType::Buffer => {
1436                    let buffer_id = unsafe { barrier.resource.parametrize() };
1437                    let state = unsafe { self.resource_map.buffer_unchecked(buffer_id) };
1438                    let buffer = state.buffer();
1439                    let access = state.access();
1440
1441                    let mut src_stage_mask = PipelineStages::empty();
1442                    let mut src_access_mask = AccessFlags::empty();
1443                    let dst_stage_mask = barrier.dst_stage_mask;
1444                    let mut dst_access_mask = barrier.dst_access_mask;
1445
1446                    if access.queue_family_index() == queue_family_index {
1447                        src_stage_mask = access.stage_mask();
1448                        src_access_mask = access.access_mask();
1449                    }
1450
1451                    if src_access_mask.contains_writes() && dst_access_mask.contains_reads() {
1452                    } else if dst_access_mask.contains_writes() {
1453                        src_access_mask = AccessFlags::empty();
1454                        dst_access_mask = AccessFlags::empty();
1455                    } else {
1456                        continue;
1457                    }
1458
1459                    self.current_buffer_barriers_vk.push(
1460                        vk::BufferMemoryBarrier::default()
1461                            .src_access_mask(convert_access_mask(src_access_mask))
1462                            .dst_access_mask(convert_access_mask(dst_access_mask))
1463                            // FIXME:
1464                            .src_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
1465                            .dst_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
1466                            .buffer(buffer.handle())
1467                            .offset(0)
1468                            .size(buffer.size()),
1469                    );
1470
1471                    self.current_src_stage_mask_vk |= convert_stage_mask(src_stage_mask);
1472                    self.current_dst_stage_mask_vk |= convert_stage_mask(dst_stage_mask);
1473                }
1474                ObjectType::Image => {
1475                    let image_id = unsafe { barrier.resource.parametrize() };
1476                    let state = unsafe { self.resource_map.image_unchecked(image_id) };
1477                    let image = state.image();
1478                    let access = state.access();
1479
1480                    let mut src_stage_mask = PipelineStages::empty();
1481                    let mut src_access_mask = AccessFlags::empty();
1482                    let dst_stage_mask = barrier.dst_stage_mask;
1483                    let mut dst_access_mask = barrier.dst_access_mask;
1484
1485                    if access.queue_family_index() == queue_family_index {
1486                        src_stage_mask = access.stage_mask();
1487                        src_access_mask = access.access_mask();
1488                    }
1489
1490                    #[allow(clippy::if_same_then_else)]
1491                    if access.image_layout() != barrier.new_layout {
1492                    } else if src_access_mask.contains_writes() && dst_access_mask.contains_reads()
1493                    {
1494                    } else if dst_access_mask.contains_writes() {
1495                        src_access_mask = AccessFlags::empty();
1496                        dst_access_mask = AccessFlags::empty();
1497                    } else {
1498                        continue;
1499                    }
1500
1501                    self.current_image_barriers_vk.push(
1502                        vk::ImageMemoryBarrier::default()
1503                            .src_access_mask(convert_access_mask(src_access_mask))
1504                            .dst_access_mask(convert_access_mask(dst_access_mask))
1505                            .old_layout(access.image_layout().into())
1506                            .new_layout(barrier.new_layout.into())
1507                            // FIXME:
1508                            .src_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
1509                            .dst_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
1510                            .image(image.handle())
1511                            .subresource_range(image.subresource_range().to_vk()),
1512                    );
1513
1514                    self.current_src_stage_mask_vk |= convert_stage_mask(src_stage_mask);
1515                    self.current_dst_stage_mask_vk |= convert_stage_mask(dst_stage_mask);
1516                }
1517                ObjectType::Swapchain => {
1518                    let swapchain_id = unsafe { barrier.resource.parametrize() };
1519                    let state = unsafe { self.resource_map.swapchain_unchecked(swapchain_id) };
1520                    let image = state.current_image();
1521                    let access = state.access();
1522
1523                    let mut src_stage_mask = PipelineStages::empty();
1524                    let mut src_access_mask = AccessFlags::empty();
1525                    let dst_stage_mask = barrier.dst_stage_mask;
1526                    let mut dst_access_mask = barrier.dst_access_mask;
1527
1528                    if access.queue_family_index() == queue_family_index {
1529                        src_stage_mask = access.stage_mask();
1530                        src_access_mask = access.access_mask();
1531                    }
1532
1533                    #[allow(clippy::if_same_then_else)]
1534                    if access.image_layout() != barrier.new_layout {
1535                    } else if src_access_mask.contains_writes() && dst_access_mask.contains_reads()
1536                    {
1537                    } else if dst_access_mask.contains_writes() {
1538                        src_access_mask = AccessFlags::empty();
1539                        dst_access_mask = AccessFlags::empty();
1540                    } else {
1541                        continue;
1542                    }
1543
1544                    self.current_image_barriers_vk.push(
1545                        vk::ImageMemoryBarrier::default()
1546                            .src_access_mask(convert_access_mask(src_access_mask))
1547                            .dst_access_mask(convert_access_mask(dst_access_mask))
1548                            .old_layout(access.image_layout().into())
1549                            .new_layout(barrier.new_layout.into())
1550                            // FIXME:
1551                            .src_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
1552                            .dst_queue_family_index(vk::QUEUE_FAMILY_IGNORED)
1553                            .image(image.handle())
1554                            .subresource_range(image.subresource_range().to_vk()),
1555                    );
1556
1557                    self.current_src_stage_mask_vk |= convert_stage_mask(src_stage_mask);
1558                    self.current_dst_stage_mask_vk |= convert_stage_mask(dst_stage_mask);
1559                }
1560                _ => unreachable!(),
1561            }
1562        }
1563    }
1564
1565    fn wait_acquire(&mut self, swapchain_id: Id<Swapchain>, stage_mask: PipelineStages) {
1566        let swapchain_state = unsafe { self.resource_map.swapchain_unchecked(swapchain_id) };
1567        let semaphore = &swapchain_state.semaphores[self.current_frame_index as usize]
1568            .image_available_semaphore;
1569
1570        self.current_per_submit
1571            .wait_semaphores_vk
1572            .push(semaphore.handle());
1573        self.current_per_submit
1574            .wait_dst_stage_mask_vk
1575            .push(convert_stage_mask(stage_mask));
1576    }
1577
1578    fn wait_semaphore(&mut self, semaphore_index: SemaphoreIndex, stage_mask: PipelineStages) {
1579        self.current_per_submit
1580            .wait_semaphores_vk
1581            .push(self.executable.semaphores.borrow()[semaphore_index].handle());
1582        self.current_per_submit
1583            .wait_dst_stage_mask_vk
1584            .push(convert_stage_mask(stage_mask));
1585    }
1586
1587    fn execute_task(&mut self, node_index: NodeIndex) -> Result {
1588        if !self.current_buffer_barriers_vk.is_empty() || !self.current_image_barriers_vk.is_empty()
1589        {
1590            self.flush_barriers()?;
1591        }
1592
1593        let task_node = unsafe { self.executable.graph.nodes.task_node_unchecked(node_index) };
1594        let task = &task_node.task;
1595        let mut current_command_buffer = unsafe {
1596            RecordingCommandBuffer::new(
1597                current_command_buffer!(self),
1598                self.resource_map,
1599                self.death_row,
1600            )
1601        };
1602        let mut context = TaskContext {
1603            resource_map: self.resource_map,
1604            current_frame_index: self.current_frame_index,
1605            command_buffers: &mut self.command_buffers,
1606        };
1607
1608        unsafe { task.execute(&mut current_command_buffer, &mut context, self.world) }
1609            .map_err(|error| ExecuteError::Task { node_index, error })?;
1610
1611        if !self.command_buffers.is_empty() {
1612            unsafe { self.flush_current_command_buffer() }?;
1613
1614            for command_buffer in self.command_buffers.drain(..) {
1615                self.current_per_submit
1616                    .command_buffers_vk
1617                    .push(command_buffer.handle());
1618                self.death_row.push(command_buffer);
1619            }
1620        }
1621
1622        Ok(())
1623    }
1624
1625    fn pipeline_barrier(&mut self, barrier_range: Range<BarrierIndex>) -> Result {
1626        self.convert_barriers(barrier_range);
1627
1628        self.flush_barriers()
1629    }
1630
1631    fn convert_barriers(&mut self, barrier_range: Range<BarrierIndex>) {
1632        let barrier_range = barrier_range.start as usize..barrier_range.end as usize;
1633
1634        for barrier in &self.executable.barriers[barrier_range] {
1635            match barrier.resource.object_type() {
1636                ObjectType::Buffer => {
1637                    let buffer_id = unsafe { barrier.resource.parametrize() };
1638                    let state = unsafe { self.resource_map.buffer_unchecked(buffer_id) };
1639                    let buffer = state.buffer();
1640
1641                    self.current_buffer_barriers_vk.push(
1642                        vk::BufferMemoryBarrier::default()
1643                            .src_access_mask(convert_access_mask(barrier.src_access_mask))
1644                            .dst_access_mask(convert_access_mask(barrier.dst_access_mask))
1645                            .src_queue_family_index(barrier.src_queue_family_index)
1646                            .dst_queue_family_index(barrier.dst_queue_family_index)
1647                            .buffer(state.buffer().handle())
1648                            .offset(0)
1649                            .size(buffer.size()),
1650                    );
1651
1652                    self.current_src_stage_mask_vk |= convert_stage_mask(barrier.src_stage_mask);
1653                    self.current_dst_stage_mask_vk |= convert_stage_mask(barrier.dst_stage_mask);
1654                }
1655                ObjectType::Image => {
1656                    let image_id = unsafe { barrier.resource.parametrize() };
1657                    let image = unsafe { self.resource_map.image_unchecked(image_id) }.image();
1658
1659                    self.current_image_barriers_vk.push(
1660                        vk::ImageMemoryBarrier::default()
1661                            .src_access_mask(convert_access_mask(barrier.src_access_mask))
1662                            .dst_access_mask(convert_access_mask(barrier.dst_access_mask))
1663                            .old_layout(barrier.old_layout.into())
1664                            .new_layout(barrier.new_layout.into())
1665                            .src_queue_family_index(barrier.src_queue_family_index)
1666                            .dst_queue_family_index(barrier.dst_queue_family_index)
1667                            .image(image.handle())
1668                            .subresource_range(image.subresource_range().to_vk()),
1669                    );
1670
1671                    self.current_src_stage_mask_vk |= convert_stage_mask(barrier.src_stage_mask);
1672                    self.current_dst_stage_mask_vk |= convert_stage_mask(barrier.dst_stage_mask);
1673                }
1674                ObjectType::Swapchain => {
1675                    let swapchain_id = unsafe { barrier.resource.parametrize() };
1676                    let image = unsafe { self.resource_map.swapchain_unchecked(swapchain_id) }
1677                        .current_image();
1678
1679                    self.current_image_barriers_vk.push(
1680                        vk::ImageMemoryBarrier::default()
1681                            .src_access_mask(convert_access_mask(barrier.src_access_mask))
1682                            .dst_access_mask(convert_access_mask(barrier.dst_access_mask))
1683                            .old_layout(barrier.old_layout.into())
1684                            .new_layout(barrier.new_layout.into())
1685                            .src_queue_family_index(barrier.src_queue_family_index)
1686                            .dst_queue_family_index(barrier.dst_queue_family_index)
1687                            .image(image.handle())
1688                            .subresource_range(image.subresource_range().to_vk()),
1689                    );
1690
1691                    self.current_src_stage_mask_vk |= convert_stage_mask(barrier.src_stage_mask);
1692                    self.current_dst_stage_mask_vk |= convert_stage_mask(barrier.dst_stage_mask);
1693                }
1694                _ => unreachable!(),
1695            }
1696        }
1697    }
1698
1699    fn begin_render_pass(&mut self, render_pass_index: RenderPassIndex) -> Result {
1700        if !self.current_buffer_barriers_vk.is_empty() || !self.current_image_barriers_vk.is_empty()
1701        {
1702            self.flush_barriers()?;
1703        }
1704
1705        let render_pass_state = &self.executable.render_passes.borrow()[render_pass_index];
1706        let framebuffer = &render_pass_state.framebuffers
1707            [unsafe { framebuffer_index(self.resource_map, &self.executable.swapchains) }];
1708
1709        // FIXME:
1710        let mut render_area_vk = vk::Rect2D::default();
1711        [render_area_vk.extent.width, render_area_vk.extent.height] = framebuffer.extent();
1712
1713        unsafe {
1714            set_clear_values(
1715                &self.executable.graph.nodes,
1716                self.resource_map,
1717                render_pass_state,
1718                &mut self.clear_values,
1719                &mut self.clear_values_vk,
1720            )
1721        };
1722
1723        let render_pass_begin_info_vk = vk::RenderPassBeginInfo::default()
1724            .render_pass(framebuffer.render_pass().handle())
1725            .framebuffer(framebuffer.handle())
1726            .render_area(render_area_vk)
1727            .clear_values(&self.clear_values_vk);
1728
1729        let fns = self.executable.device().fns();
1730        unsafe {
1731            (fns.v1_0.cmd_begin_render_pass)(
1732                current_command_buffer!(self).handle(),
1733                &render_pass_begin_info_vk,
1734                vk::SubpassContents::INLINE,
1735            )
1736        };
1737
1738        self.death_row.push(framebuffer.clone());
1739
1740        Ok(())
1741    }
1742
1743    fn next_subpass(&mut self) {
1744        let fns = self.executable.device().fns();
1745        unsafe {
1746            (fns.v1_0.cmd_next_subpass)(
1747                self.current_command_buffer.as_ref().unwrap().handle(),
1748                vk::SubpassContents::INLINE,
1749            )
1750        };
1751    }
1752
1753    fn end_render_pass(&mut self) {
1754        let fns = self.executable.device().fns();
1755        unsafe {
1756            (fns.v1_0.cmd_end_render_pass)(self.current_command_buffer.as_ref().unwrap().handle())
1757        };
1758    }
1759
1760    fn clear_attachments(
1761        &mut self,
1762        node_index: NodeIndex,
1763        render_pass_index: RenderPassIndex,
1764        clear_attachment_range: Range<ClearAttachmentIndex>,
1765    ) -> Result {
1766        if !self.current_buffer_barriers_vk.is_empty() || !self.current_image_barriers_vk.is_empty()
1767        {
1768            self.flush_barriers()?;
1769        }
1770
1771        let render_pass_state = &self.executable.render_passes.borrow()[render_pass_index];
1772
1773        let attachments = &self.executable.clear_attachments[clear_attachment_range];
1774
1775        unsafe {
1776            set_clear_attachments(
1777                &self.executable.graph.nodes,
1778                self.resource_map,
1779                node_index,
1780                render_pass_state,
1781                attachments,
1782                &mut self.clear_values,
1783                &mut self.clear_attachments_vk,
1784            )
1785        };
1786
1787        Ok(())
1788    }
1789
1790    fn signal_semaphore(&mut self, semaphore_index: SemaphoreIndex, _stage_mask: PipelineStages) {
1791        self.current_per_submit
1792            .signal_semaphores_vk
1793            .push(self.executable.semaphores.borrow()[semaphore_index].handle());
1794    }
1795
1796    fn signal_pre_present(&mut self, swapchain_id: Id<Swapchain>, _stage_mask: PipelineStages) {
1797        let swapchain_state = unsafe { self.resource_map.swapchain_unchecked(swapchain_id) };
1798        let semaphore = &swapchain_state.semaphores[self.current_frame_index as usize]
1799            .pre_present_complete_semaphore;
1800
1801        self.current_per_submit
1802            .signal_semaphores_vk
1803            .push(semaphore.handle());
1804    }
1805
1806    fn wait_pre_present(&mut self, swapchain_id: Id<Swapchain>, stage_mask: PipelineStages) {
1807        let swapchain_state = unsafe { self.resource_map.swapchain_unchecked(swapchain_id) };
1808        let semaphore = &swapchain_state.semaphores[self.current_frame_index as usize]
1809            .pre_present_complete_semaphore;
1810
1811        self.current_per_submit
1812            .wait_semaphores_vk
1813            .push(semaphore.handle());
1814        self.current_per_submit
1815            .wait_dst_stage_mask_vk
1816            .push(convert_stage_mask(stage_mask));
1817    }
1818
1819    fn signal_present(&mut self, swapchain_id: Id<Swapchain>, _stage_mask: PipelineStages) {
1820        let swapchain_state = unsafe { self.resource_map.swapchain_unchecked(swapchain_id) };
1821        let semaphore =
1822            &swapchain_state.semaphores[self.current_frame_index as usize].tasks_complete_semaphore;
1823
1824        self.current_per_submit
1825            .signal_semaphores_vk
1826            .push(semaphore.handle());
1827    }
1828
1829    fn flush_submit(&mut self) -> Result {
1830        unsafe { self.flush_current_command_buffer() }?;
1831
1832        self.per_submits
1833            .push(mem::take(&mut self.current_per_submit));
1834
1835        Ok(())
1836    }
1837
1838    fn flush_barriers(&mut self) -> Result {
1839        if self.current_src_stage_mask_vk.is_empty() {
1840            self.current_src_stage_mask_vk = vk::PipelineStageFlags::TOP_OF_PIPE;
1841        }
1842
1843        if self.current_dst_stage_mask_vk.is_empty() {
1844            self.current_dst_stage_mask_vk = vk::PipelineStageFlags::BOTTOM_OF_PIPE;
1845        }
1846
1847        unsafe {
1848            (self.cmd_pipeline_barrier)(
1849                current_command_buffer!(self).handle(),
1850                self.current_src_stage_mask_vk,
1851                self.current_dst_stage_mask_vk,
1852                vk::DependencyFlags::empty(),
1853                0,
1854                ptr::null(),
1855                self.current_buffer_barriers_vk.len() as u32,
1856                self.current_buffer_barriers_vk.as_ptr(),
1857                self.current_image_barriers_vk.len() as u32,
1858                self.current_image_barriers_vk.as_ptr(),
1859            )
1860        };
1861
1862        self.current_buffer_barriers_vk.clear();
1863        self.current_image_barriers_vk.clear();
1864        self.current_src_stage_mask_vk = vk::PipelineStageFlags::empty();
1865        self.current_dst_stage_mask_vk = vk::PipelineStageFlags::empty();
1866
1867        Ok(())
1868    }
1869
1870    fn submit(&mut self) -> Result {
1871        unsafe {
1872            self.executable
1873                .flush_mapped_memory_ranges(self.resource_map)
1874        }?;
1875
1876        let submission = current_submission!(self);
1877
1878        let mut submit_infos_vk = SmallVec::<[_; 4]>::with_capacity(self.per_submits.len());
1879        submit_infos_vk.extend(self.per_submits.iter().map(|per_submit| {
1880            vk::SubmitInfo::default()
1881                .wait_semaphores(&per_submit.wait_semaphores_vk)
1882                .wait_dst_stage_mask(&per_submit.wait_dst_stage_mask_vk)
1883                .command_buffers(&per_submit.command_buffers_vk)
1884                .signal_semaphores(&per_submit.signal_semaphores_vk)
1885        }));
1886
1887        let max_submission_index = self.executable.submissions.len() - 1;
1888        let fence_vk = if *self.submission_count == max_submission_index {
1889            self.current_fence.handle()
1890        } else {
1891            vk::Fence::null()
1892        };
1893
1894        submission.queue.with(|_guard| {
1895            unsafe {
1896                (self.queue_submit)(
1897                    submission.queue.handle(),
1898                    submit_infos_vk.len() as u32,
1899                    submit_infos_vk.as_ptr(),
1900                    fence_vk,
1901                )
1902            }
1903            .result()
1904            .map_err(VulkanError::from)
1905        })?;
1906
1907        drop(submit_infos_vk);
1908        self.per_submits.clear();
1909
1910        *self.submission_count += 1;
1911
1912        Ok(())
1913    }
1914
1915    unsafe fn flush_current_command_buffer(&mut self) -> Result {
1916        let current_command_buffer = self.current_command_buffer.take().unwrap();
1917        let command_buffer = unsafe { current_command_buffer.end() }?;
1918        self.current_per_submit
1919            .command_buffers_vk
1920            .push(command_buffer.handle());
1921        self.death_row.push(Arc::new(command_buffer));
1922
1923        Ok(())
1924    }
1925}
1926
1927macro_rules! current_submission {
1928    ($state:expr) => {
1929        &$state.executable.submissions[*$state.submission_count]
1930    };
1931}
1932use current_submission;
1933
1934macro_rules! current_command_buffer {
1935    ($state:expr) => {{
1936        if $state.current_command_buffer.is_none() {
1937            $state.current_command_buffer = Some(create_command_buffer(
1938                $state.resource_map,
1939                &current_submission!($state).queue,
1940            )?);
1941        }
1942
1943        $state.current_command_buffer.as_mut().unwrap()
1944    }};
1945}
1946use current_command_buffer;
1947
1948fn create_command_buffer(
1949    resource_map: &ResourceMap<'_>,
1950    queue: &Queue,
1951) -> Result<raw::RecordingCommandBuffer, VulkanError> {
1952    let allocator = resource_map.physical_resources.command_buffer_allocator();
1953
1954    // SAFETY: The parameters are valid.
1955    unsafe {
1956        raw::RecordingCommandBuffer::new_unchecked(
1957            allocator.clone(),
1958            queue.queue_family_index(),
1959            raw::CommandBufferLevel::Primary,
1960            raw::CommandBufferBeginInfo {
1961                usage: raw::CommandBufferUsage::OneTimeSubmit,
1962                inheritance_info: None,
1963                ..Default::default()
1964            },
1965        )
1966    }
1967    // This can't panic because we know that the queue family index is active on the device,
1968    // otherwise we wouldn't have a reference to the `Queue`.
1969    .map_err(Validated::unwrap)
1970}
1971
1972unsafe fn framebuffer_index(resource_map: &ResourceMap<'_>, swapchains: &[Id<Swapchain>]) -> usize {
1973    let mut index = 0;
1974    let mut factor = 1;
1975
1976    for &swapchain_id in swapchains {
1977        let swapchain_state = unsafe { resource_map.swapchain_unchecked(swapchain_id) };
1978        let swapchain = swapchain_state.swapchain();
1979        index += swapchain_state.current_image_index().unwrap() * factor;
1980        factor *= swapchain.image_count();
1981    }
1982
1983    index as usize
1984}
1985
1986unsafe fn set_clear_values(
1987    nodes: &super::Nodes<impl ?Sized + 'static>,
1988    resource_map: &ResourceMap<'_>,
1989    render_pass_state: &super::RenderPassState,
1990    clear_values: &mut LinearMap<Id, Option<ClearValue>>,
1991    clear_values_vk: &mut Vec<vk::ClearValue>,
1992) {
1993    clear_values_vk.clear();
1994
1995    if render_pass_state.clear_node_indices.is_empty() {
1996        return;
1997    }
1998
1999    clear_values.clear();
2000    clear_values.extend(render_pass_state.attachments.keys().map(|&id| (id, None)));
2001
2002    for &node_index in &render_pass_state.clear_node_indices {
2003        let task_node = unsafe { nodes.task_node_unchecked(node_index) };
2004
2005        task_node.task.clear_values(&mut ClearValues {
2006            inner: clear_values,
2007            resource_map,
2008        });
2009    }
2010
2011    clear_values_vk.extend(clear_values.values().map(|clear_value| {
2012        clear_value
2013            .as_ref()
2014            .map_or_else(Default::default, ClearValue::to_vk)
2015    }));
2016}
2017
2018unsafe fn set_clear_attachments(
2019    nodes: &super::Nodes<impl ?Sized + 'static>,
2020    resource_map: &ResourceMap<'_>,
2021    node_index: NodeIndex,
2022    render_pass_state: &super::RenderPassState,
2023    attachments: &[Id],
2024    clear_values: &mut LinearMap<Id, Option<ClearValue>>,
2025    clear_attachments_vk: &mut Vec<vk::ClearAttachment>,
2026) {
2027    clear_attachments_vk.clear();
2028    clear_values.clear();
2029    clear_values.extend(attachments.iter().map(|&id| (id, None)));
2030
2031    let task_node = unsafe { nodes.task_node_unchecked(node_index) };
2032
2033    task_node.task.clear_values(&mut ClearValues {
2034        inner: clear_values,
2035        resource_map,
2036    });
2037
2038    clear_attachments_vk.extend(clear_values.iter().map(|(id, clear_value)| {
2039        let attachment_state = render_pass_state.attachments.get(id).unwrap();
2040
2041        vk::ClearAttachment {
2042            aspect_mask: attachment_state.format.aspects().into(),
2043            color_attachment: attachment_state.index,
2044            clear_value: clear_value
2045                .as_ref()
2046                .map_or_else(Default::default, ClearValue::to_vk),
2047        }
2048    }));
2049}
2050
2051fn convert_stage_mask(mut stage_mask: PipelineStages) -> vk::PipelineStageFlags {
2052    const VERTEX_INPUT_FLAGS: PipelineStages =
2053        PipelineStages::INDEX_INPUT.union(PipelineStages::VERTEX_ATTRIBUTE_INPUT);
2054    const TRANSFER_FLAGS: PipelineStages = PipelineStages::COPY
2055        .union(PipelineStages::RESOLVE)
2056        .union(PipelineStages::BLIT)
2057        .union(PipelineStages::CLEAR);
2058
2059    if stage_mask.intersects(VERTEX_INPUT_FLAGS) {
2060        stage_mask -= VERTEX_INPUT_FLAGS;
2061        stage_mask |= PipelineStages::VERTEX_INPUT;
2062    }
2063
2064    if stage_mask.intersects(TRANSFER_FLAGS) {
2065        stage_mask -= TRANSFER_FLAGS;
2066        stage_mask |= PipelineStages::ALL_TRANSFER;
2067    }
2068
2069    stage_mask.into()
2070}
2071
2072fn convert_access_mask(mut access_mask: AccessFlags) -> vk::AccessFlags {
2073    const READ_FLAGS: AccessFlags =
2074        AccessFlags::SHADER_SAMPLED_READ.union(AccessFlags::SHADER_STORAGE_READ);
2075    const WRITE_FLAGS: AccessFlags = AccessFlags::SHADER_STORAGE_WRITE;
2076
2077    if access_mask.intersects(READ_FLAGS) {
2078        access_mask -= READ_FLAGS;
2079        access_mask |= AccessFlags::SHADER_READ;
2080    }
2081
2082    if access_mask.intersects(WRITE_FLAGS) {
2083        access_mask -= WRITE_FLAGS;
2084        access_mask |= AccessFlags::SHADER_WRITE;
2085    }
2086
2087    access_mask.into()
2088}
2089
2090struct StateGuard<'a, W: ?Sized + 'static> {
2091    executable: &'a ExecutableTaskGraph<W>,
2092    resource_map: &'a ResourceMap<'a>,
2093    current_fence: &'a mut Fence,
2094    submission_count: usize,
2095}
2096
2097impl<W: ?Sized + 'static> Drop for StateGuard<'_, W> {
2098    #[cold]
2099    fn drop(&mut self) {
2100        let device = self.executable.device();
2101
2102        // SAFETY: The parameters are valid.
2103        match unsafe {
2104            Fence::new_unchecked(
2105                device.clone(),
2106                FenceCreateInfo {
2107                    flags: FenceCreateFlags::SIGNALED,
2108                    ..Default::default()
2109                },
2110            )
2111        } {
2112            Ok(new_fence) => {
2113                *self.current_fence = new_fence;
2114            }
2115            Err(err) => {
2116                // Device loss is already a form of poisoning built into Vulkan. There's no invalid
2117                // state that can be observed by design.
2118                if err == VulkanError::DeviceLost {
2119                    return;
2120                }
2121
2122                eprintln!(
2123                    "failed to recreate the current fence after failed execution rendering \
2124                    recovery impossible: {err}; aborting",
2125                );
2126                std::process::abort();
2127            }
2128        }
2129
2130        if self.submission_count == 0 {
2131            return;
2132        }
2133
2134        let submissions = &self.executable.submissions;
2135
2136        // We must make sure that invalid state cannot be observed, because if at least one
2137        // submission succeeded while one failed, that means that there are pending semaphore
2138        // signal operations.
2139        for submission in &submissions[0..self.submission_count] {
2140            if let Err(err) = submission.queue.with(|mut guard| guard.wait_idle()) {
2141                if err == VulkanError::DeviceLost {
2142                    return;
2143                }
2144
2145                eprintln!(
2146                    "failed to wait on queue idle after partly failed submissions rendering \
2147                    recovery impossible: {err}; aborting",
2148                );
2149                std::process::abort();
2150            }
2151        }
2152
2153        // But even after waiting for idle, the state of the graph is invalid because some
2154        // semaphores are still signalled, so we have to recreate them.
2155        for semaphore in self.executable.semaphores.borrow_mut().iter_mut() {
2156            // SAFETY: The parameters are valid.
2157            match unsafe { Semaphore::new_unchecked(device.clone(), Default::default()) } {
2158                Ok(new_semaphore) => {
2159                    *semaphore = Arc::new(new_semaphore);
2160                }
2161                Err(err) => {
2162                    if err == VulkanError::DeviceLost {
2163                        return;
2164                    }
2165
2166                    eprintln!(
2167                        "failed to recreate semaphores after partly failed submissions rendering \
2168                        recovery impossible: {err}; aborting",
2169                    );
2170                    std::process::abort();
2171                }
2172            }
2173        }
2174
2175        let mut last_accesses =
2176            vec![ResourceAccess::default(); self.executable.graph.resources.capacity() as usize];
2177        let instruction_range = 0..submissions[self.submission_count - 1].instruction_range.end;
2178
2179        // Determine the last accesses of resources up until before the failed submission.
2180        for instruction in &self.executable.instructions[instruction_range] {
2181            let Instruction::ExecuteTask { node_index } = instruction else {
2182                continue;
2183            };
2184            let task_node = unsafe { self.executable.graph.nodes.task_node_unchecked(*node_index) };
2185
2186            for (id, access) in task_node.accesses.iter() {
2187                let prev_access = &mut last_accesses[id.index() as usize];
2188                let access = ResourceAccess {
2189                    queue_family_index: task_node.queue_family_index,
2190                    ..*access
2191                };
2192
2193                if prev_access.queue_family_index != access.queue_family_index
2194                    || prev_access.image_layout != access.image_layout
2195                    || prev_access.access_mask.contains_writes()
2196                    || access.access_mask.contains_writes()
2197                {
2198                    *prev_access = access;
2199                } else {
2200                    prev_access.stage_mask |= access.stage_mask;
2201                    prev_access.access_mask |= access.access_mask;
2202                }
2203            }
2204        }
2205
2206        // Update the resource state with the correct last accesses.
2207        unsafe {
2208            self.executable
2209                .update_resource_state(self.resource_map, &last_accesses)
2210        };
2211    }
2212}
2213
2214/// Maps [virtual resources] to physical resources.
2215pub struct ResourceMap<'a> {
2216    virtual_resources: &'a super::Resources,
2217    physical_resources: Arc<Resources>,
2218    map: Vec<*const ()>,
2219    len: u32,
2220    guard: epoch::Guard<'a>,
2221}
2222
2223impl<'a> ResourceMap<'a> {
2224    /// Creates a new `ResourceMap` mapping the virtual resources of the given `executable`.
2225    pub fn new(executable: &'a ExecutableTaskGraph<impl ?Sized>) -> Result<Self, InvalidSlotError> {
2226        let virtual_resources = &executable.graph.resources;
2227        let physical_resources = virtual_resources.physical_resources.clone();
2228        let mut map = vec![ptr::null(); virtual_resources.capacity() as usize];
2229        let guard = virtual_resources.physical_resources.pin();
2230
2231        for (&physical_id, &virtual_id) in &virtual_resources.physical_map {
2232            // SAFETY: Virtual IDs inside the `physical_map` are always valid.
2233            let slot = unsafe { map.get_unchecked_mut(virtual_id.index() as usize) };
2234
2235            *slot = match physical_id.object_type() {
2236                ObjectType::Buffer => {
2237                    let physical_id = unsafe { physical_id.parametrize() };
2238
2239                    // SAFETY: We own an `epoch::Guard`.
2240                    <*const _>::cast(unsafe { physical_resources.buffer_unprotected(physical_id) }?)
2241                }
2242                ObjectType::Image => {
2243                    let physical_id = unsafe { physical_id.parametrize() };
2244
2245                    // SAFETY: We own an `epoch::Guard`.
2246                    <*const _>::cast(unsafe { physical_resources.image_unprotected(physical_id) }?)
2247                }
2248                ObjectType::Swapchain => {
2249                    let physical_id = unsafe { physical_id.parametrize() };
2250
2251                    // SAFETY: We own an `epoch::Guard`.
2252                    <*const _>::cast(unsafe {
2253                        physical_resources.swapchain_unprotected(physical_id)
2254                    }?)
2255                }
2256                _ => unreachable!(),
2257            };
2258        }
2259
2260        let len = virtual_resources.physical_map.len() as u32;
2261
2262        Ok(ResourceMap {
2263            virtual_resources,
2264            physical_resources,
2265            map,
2266            len,
2267            guard,
2268        })
2269    }
2270
2271    #[doc(hidden)]
2272    #[inline]
2273    pub fn insert<R: Resource>(
2274        &mut self,
2275        virtual_id: Id<R>,
2276        physical_id: Id<R>,
2277    ) -> Result<(), InvalidSlotError> {
2278        R::insert(self, virtual_id, physical_id)
2279    }
2280
2281    /// Inserts a mapping from the [virtual buffer resource] corresponding to `virtual_id` to the
2282    /// physical resource corresponding to `physical_id`.
2283    ///
2284    /// # Panics
2285    ///
2286    /// - Panics if the physical resource doesn't match the virtual resource.
2287    /// - Panics if the physical resource already has a mapping from another virtual resource.
2288    #[inline]
2289    pub fn insert_buffer(
2290        &mut self,
2291        virtual_id: Id<Buffer>,
2292        physical_id: Id<Buffer>,
2293    ) -> Result<(), InvalidSlotError> {
2294        self.virtual_resources.get(virtual_id.erase())?;
2295
2296        // SAFETY: We own an `epoch::Guard`.
2297        let state = unsafe { self.physical_resources.buffer_unprotected(physical_id) }?;
2298
2299        assert_eq!(
2300            state.buffer().sharing().is_exclusive(),
2301            virtual_id.is_exclusive(),
2302        );
2303
2304        let ptr = <*const _>::cast(state);
2305        let is_duplicate = self.map.iter().any(|&p| p == ptr);
2306
2307        // SAFETY: We checked that `virtual_id` is present in `self.virtual_resources` above, and
2308        // since we initialized `self.map` with a length at least that of `self.virtual_resources`,
2309        // the index must be in bounds.
2310        let slot = unsafe { self.map.get_unchecked_mut(virtual_id.index() as usize) };
2311
2312        if *slot != ptr {
2313            assert!(!is_duplicate);
2314        }
2315
2316        if slot.is_null() {
2317            self.len += 1;
2318        }
2319
2320        *slot = ptr;
2321
2322        Ok(())
2323    }
2324
2325    /// Inserts a mapping from the [virtual buffer resource] corresponding to `virtual_id` to the
2326    /// physical resource corresponding to `physical_id` without doing any checks.
2327    ///
2328    /// # Safety
2329    ///
2330    /// - `virtual_id` must be a valid virtual resource ID.
2331    /// - `physical_id` must be a valid physical resource ID.
2332    /// - The physical resource must match the virtual resource.
2333    /// - The physical resource must not have a mapping from another virtual resource.
2334    #[inline]
2335    pub unsafe fn insert_buffer_unchecked(
2336        &mut self,
2337        virtual_id: Id<Buffer>,
2338        physical_id: Id<Buffer>,
2339    ) {
2340        // SAFETY:
2341        // * The caller must ensure that `physical_id` is a valid ID.
2342        // * We own an `epoch::Guard`.
2343        let state = unsafe {
2344            self.physical_resources
2345                .buffer_unchecked_unprotected(physical_id)
2346        };
2347
2348        // SAFETY: The caller must ensure that `virtual_id` is a valid virtual ID, and since we
2349        // initialized `self.map` with a length at least that of `self.virtual_resources`, the
2350        // index must be in bounds.
2351        let slot = unsafe { self.map.get_unchecked_mut(virtual_id.index() as usize) };
2352
2353        if slot.is_null() {
2354            self.len += 1;
2355        }
2356
2357        *slot = <*const _>::cast(state);
2358    }
2359
2360    /// Inserts a mapping from the [virtual image resource] corresponding to `virtual_id` to the
2361    /// physical resource corresponding to `physical_id`.
2362    ///
2363    /// # Panics
2364    ///
2365    /// - Panics if the physical resource doesn't match the virtual resource.
2366    /// - Panics if the physical resource already has a mapping from another virtual resource.
2367    /// - Panics if `virtual_id` refers to a swapchain image.
2368    #[inline]
2369    pub fn insert_image(
2370        &mut self,
2371        virtual_id: Id<Image>,
2372        physical_id: Id<Image>,
2373    ) -> Result<(), InvalidSlotError> {
2374        assert_ne!(virtual_id.object_type(), ObjectType::Swapchain);
2375
2376        self.virtual_resources.get(virtual_id.erase())?;
2377
2378        // SAFETY: We own an `epoch::Guard`.
2379        let state = unsafe { self.physical_resources.image_unprotected(physical_id) }?;
2380
2381        assert_eq!(
2382            state.image().sharing().is_exclusive(),
2383            virtual_id.is_exclusive(),
2384        );
2385
2386        let ptr = <*const _>::cast(state);
2387        let is_duplicate = self.map.iter().any(|&p| p == ptr);
2388
2389        // SAFETY: We checked that `virtual_id` is present in `self.virtual_resources` above, and
2390        // since we initialized `self.map` with a length at least that of `self.virtual_resources`,
2391        // the index must be in bounds.
2392        let slot = unsafe { self.map.get_unchecked_mut(virtual_id.index() as usize) };
2393
2394        if *slot != ptr {
2395            assert!(!is_duplicate);
2396        }
2397
2398        if slot.is_null() {
2399            self.len += 1;
2400        }
2401
2402        *slot = ptr;
2403
2404        Ok(())
2405    }
2406
2407    /// Inserts a mapping from the [virtual image resource] corresponding to `virtual_id` to the
2408    /// physical resource corresponding to `physical_id` without doing any checks.
2409    ///
2410    /// # Safety
2411    ///
2412    /// - `virtual_id` must be a valid virtual resource ID.
2413    /// - `physical_id` must be a valid physical resource ID.
2414    /// - The physical resource must match the virtual resource.
2415    /// - The physical resource must not have a mapping from another virtual resource.
2416    #[inline]
2417    pub unsafe fn insert_image_unchecked(&mut self, virtual_id: Id<Image>, physical_id: Id<Image>) {
2418        // SAFETY:
2419        // * The caller must ensure that `physical_id` is a valid ID.
2420        // * We own an `epoch::Guard`.
2421        let state = unsafe {
2422            self.physical_resources
2423                .image_unchecked_unprotected(physical_id)
2424        };
2425
2426        // SAFETY: The caller must ensure that `virtual_id` is a valid virtual ID, and since we
2427        // initialized `self.map` with a length at least that of `self.virtual_resources`, the
2428        // index must be in bounds.
2429        let slot = unsafe { self.map.get_unchecked_mut(virtual_id.index() as usize) };
2430
2431        if slot.is_null() {
2432            self.len += 1;
2433        }
2434
2435        *slot = <*const _>::cast(state);
2436    }
2437
2438    /// Inserts a mapping from the [virtual swapchain resource] corresponding to `virtual_id` to
2439    /// the physical resource corresponding to `physical_id`.
2440    ///
2441    /// # Panics
2442    ///
2443    /// - Panics if the physical resource doesn't match the virtual resource.
2444    /// - Panics if the physical resource already has a mapping from another virtual resource.
2445    #[inline]
2446    pub fn insert_swapchain(
2447        &mut self,
2448        virtual_id: Id<Swapchain>,
2449        physical_id: Id<Swapchain>,
2450    ) -> Result<(), InvalidSlotError> {
2451        self.virtual_resources.get(virtual_id.erase())?;
2452
2453        // SAFETY: We own an `epoch::Guard`.
2454        let state = unsafe { self.physical_resources.swapchain_unprotected(physical_id) }?;
2455
2456        assert_eq!(
2457            state.swapchain().image_sharing().is_exclusive(),
2458            virtual_id.is_exclusive(),
2459        );
2460
2461        let ptr = <*const _>::cast(state);
2462        let is_duplicate = self.map.iter().any(|&p| p == ptr);
2463
2464        // SAFETY: We checked that `virtual_id` is present in `self.virtual_resources` above, and
2465        // since we initialized `self.map` with a length at least that of `self.virtual_resources`,
2466        // the index must be in bounds.
2467        let slot = unsafe { self.map.get_unchecked_mut(virtual_id.index() as usize) };
2468
2469        if *slot != ptr {
2470            assert!(!is_duplicate);
2471        }
2472
2473        if slot.is_null() {
2474            self.len += 1;
2475        }
2476
2477        *slot = ptr;
2478
2479        Ok(())
2480    }
2481
2482    /// Inserts a mapping from the [virtual swapchain resource] corresponding to `virtual_id` to
2483    /// the physical resource corresponding to `physical_id` without doing any checks.
2484    ///
2485    /// # Safety
2486    ///
2487    /// - `virtual_id` must be a valid virtual resource ID.
2488    /// - `physical_id` must be a valid physical resource ID.
2489    /// - The physical resource must match the virtual resource.
2490    /// - The physical resource must not have a mapping from another virtual resource.
2491    #[inline]
2492    pub unsafe fn insert_swapchain_unchecked(
2493        &mut self,
2494        virtual_id: Id<Swapchain>,
2495        physical_id: Id<Swapchain>,
2496    ) {
2497        // SAFETY:
2498        // * The caller must ensure that `physical_id` is a valid ID.
2499        // * We own an `epoch::Guard`.
2500        let state = unsafe {
2501            self.physical_resources
2502                .swapchain_unchecked_unprotected(physical_id)
2503        };
2504
2505        // SAFETY: The caller must ensure that `virtual_id` is a valid virtual ID, and since we
2506        // initialized `self.map` with a length at least that of `self.virtual_resources`, the
2507        // index must be in bounds.
2508        let slot = unsafe { self.map.get_unchecked_mut(virtual_id.index() as usize) };
2509
2510        if slot.is_null() {
2511            self.len += 1;
2512        }
2513
2514        *slot = <*const _>::cast(state);
2515    }
2516
2517    pub(crate) fn virtual_resources(&self) -> &super::Resources {
2518        self.virtual_resources
2519    }
2520
2521    /// Returns the `Resources` collection.
2522    #[inline]
2523    #[must_use]
2524    pub fn resources(&self) -> &Arc<Resources> {
2525        &self.physical_resources
2526    }
2527
2528    /// Returns the number of mappings in the map.
2529    #[inline]
2530    #[must_use]
2531    pub fn len(&self) -> u32 {
2532        self.len
2533    }
2534
2535    /// Returns `true` if the map maps every virtual resource.
2536    #[inline]
2537    #[must_use]
2538    pub fn is_exhaustive(&self) -> bool {
2539        // By our own invariant, the map can only contain mappings for virtual resources that are
2540        // present in `self.virtual_resources`. It follows then, that when the length of `self` is
2541        // that of `self.virtual_resources`, that the virtual resources are mapped exhaustively.
2542        self.len() == self.virtual_resources.len()
2543    }
2544
2545    pub(crate) unsafe fn buffer(&self, id: Id<Buffer>) -> Result<&BufferState, InvalidSlotError> {
2546        self.virtual_resources.get(id.erase())?;
2547
2548        // SAFETY: The caller must ensure that a mapping for `id` has been inserted.
2549        Ok(unsafe { self.buffer_unchecked(id) })
2550    }
2551
2552    pub(crate) unsafe fn buffer_unchecked(&self, id: Id<Buffer>) -> &BufferState {
2553        #[cfg(debug_assertions)]
2554        if self.virtual_resources.get(id.erase()).is_err() {
2555            std::process::abort();
2556        }
2557
2558        // SAFETY: The caller must ensure that `id` is a valid virtual ID.
2559        let &slot = unsafe { self.map.get_unchecked(id.index() as usize) };
2560
2561        // SAFETY: The caller must ensure that a mapping for `id` has been inserted.
2562        unsafe { &*slot.cast::<BufferState>() }
2563    }
2564
2565    pub(crate) unsafe fn image(&self, id: Id<Image>) -> Result<&ImageState, InvalidSlotError> {
2566        self.virtual_resources.get(id.erase())?;
2567
2568        // SAFETY: The caller must ensure that a mapping for `id` has been inserted.
2569        Ok(unsafe { self.image_unchecked(id) })
2570    }
2571
2572    pub(crate) unsafe fn image_unchecked(&self, id: Id<Image>) -> &ImageState {
2573        #[cfg(debug_assertions)]
2574        if self.virtual_resources.get(id.erase()).is_err() {
2575            std::process::abort();
2576        }
2577
2578        // SAFETY: The caller must ensure that `id` is a valid virtual ID.
2579        let &slot = unsafe { self.map.get_unchecked(id.index() as usize) };
2580
2581        // SAFETY: The caller must ensure that a mapping for `id` has been inserted.
2582        unsafe { &*slot.cast::<ImageState>() }
2583    }
2584
2585    pub(crate) unsafe fn swapchain(
2586        &self,
2587        id: Id<Swapchain>,
2588    ) -> Result<&SwapchainState, InvalidSlotError> {
2589        self.virtual_resources.get(id.erase())?;
2590
2591        // SAFETY: The caller must ensure that a mapping for `id` has been inserted.
2592        Ok(unsafe { self.swapchain_unchecked(id) })
2593    }
2594
2595    pub(crate) unsafe fn swapchain_unchecked(&self, id: Id<Swapchain>) -> &SwapchainState {
2596        #[cfg(debug_assertions)]
2597        if self.virtual_resources.get(id.erase()).is_err() {
2598            std::process::abort();
2599        }
2600
2601        // SAFETY: The caller must ensure that `id` is a valid virtual ID.
2602        let &slot = unsafe { self.map.get_unchecked(id.index() as usize) };
2603
2604        // SAFETY: The caller must ensure that a mapping for `id` has been inserted.
2605        unsafe { &*slot.cast::<SwapchainState>() }
2606    }
2607}
2608
2609unsafe impl DeviceOwned for ResourceMap<'_> {
2610    #[inline]
2611    fn device(&self) -> &Arc<Device> {
2612        self.physical_resources.device()
2613    }
2614}
2615
2616pub trait Resource: Sized {
2617    fn insert(
2618        map: &mut ResourceMap<'_>,
2619        virtual_id: Id<Self>,
2620        physical_id: Id<Self>,
2621    ) -> Result<(), InvalidSlotError>;
2622}
2623
2624impl Resource for Buffer {
2625    fn insert(
2626        map: &mut ResourceMap<'_>,
2627        virtual_id: Id<Self>,
2628        physical_id: Id<Self>,
2629    ) -> Result<(), InvalidSlotError> {
2630        map.insert_buffer(virtual_id, physical_id)
2631    }
2632}
2633
2634impl Resource for Image {
2635    fn insert(
2636        map: &mut ResourceMap<'_>,
2637        virtual_id: Id<Self>,
2638        physical_id: Id<Self>,
2639    ) -> Result<(), InvalidSlotError> {
2640        map.insert_image(virtual_id, physical_id)
2641    }
2642}
2643
2644impl Resource for Swapchain {
2645    fn insert(
2646        map: &mut ResourceMap<'_>,
2647        virtual_id: Id<Self>,
2648        physical_id: Id<Self>,
2649    ) -> Result<(), InvalidSlotError> {
2650        map.insert_swapchain(virtual_id, physical_id)
2651    }
2652}
2653
2654/// Creates a [`ResourceMap`] containing the given mappings.
2655#[macro_export]
2656macro_rules! resource_map {
2657    ($executable:expr $(, $virtual_id:expr => $physical_id:expr)* $(,)?) => {
2658        match $crate::graph::ResourceMap::new($executable) {
2659            ::std::result::Result::Ok(mut map) => {
2660                $(if let ::std::result::Result::Err(err) = map.insert($virtual_id, $physical_id) {
2661                    ::std::result::Result::Err(err)
2662                } else)* {
2663                    ::std::result::Result::Ok::<_, $crate::InvalidSlotError>(map)
2664                }
2665            }
2666            ::std::result::Result::Err(err) => ::std::result::Result::Err(err),
2667        }
2668    };
2669}
2670
2671type Result<T = (), E = ExecuteError> = ::std::result::Result<T, E>;
2672
2673/// Error that can happen when [executing] an [`ExecutableTaskGraph`].
2674///
2675/// [executing]: ExecutableTaskGraph::execute
2676#[derive(Debug)]
2677pub enum ExecuteError {
2678    Task {
2679        node_index: NodeIndex,
2680        error: TaskError,
2681    },
2682    Swapchain {
2683        swapchain_id: Id<Swapchain>,
2684        error: Validated<VulkanError>,
2685    },
2686    VulkanError(VulkanError),
2687}
2688
2689impl From<VulkanError> for ExecuteError {
2690    fn from(err: VulkanError) -> Self {
2691        Self::VulkanError(err)
2692    }
2693}
2694
2695impl fmt::Display for ExecuteError {
2696    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2697        match self {
2698            Self::Task { node_index, .. } => {
2699                write!(f, "an error occurred while executing task {node_index:?}")
2700            }
2701            Self::Swapchain { swapchain_id, .. } => write!(
2702                f,
2703                "an error occurred while using swapchain {swapchain_id:?}",
2704            ),
2705            Self::VulkanError(_) => f.write_str("a runtime error occurred"),
2706        }
2707    }
2708}
2709
2710impl Error for ExecuteError {
2711    fn source(&self) -> Option<&(dyn Error + 'static)> {
2712        match self {
2713            Self::Task { error, .. } => Some(error),
2714            Self::Swapchain { error, .. } => Some(error),
2715            Self::VulkanError(err) => Some(err),
2716        }
2717    }
2718}