li_wgpu_core/device/
life.rs

1#[cfg(feature = "trace")]
2use crate::device::trace;
3use crate::{
4    device::{
5        queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource},
6        DeviceError,
7    },
8    hal_api::HalApi,
9    hub::{Hub, Token},
10    id,
11    identity::GlobalIdentityHandlerFactory,
12    resource,
13    track::{BindGroupStates, RenderBundleScope, Tracker},
14    RefCount, Stored, SubmissionIndex,
15};
16use smallvec::SmallVec;
17
18use hal::Device as _;
19use parking_lot::Mutex;
20use thiserror::Error;
21
22use std::mem;
23
24/// A struct that keeps lists of resources that are no longer needed by the user.
25#[derive(Debug, Default)]
26pub(super) struct SuspectedResources {
27    pub(super) buffers: Vec<id::Valid<id::BufferId>>,
28    pub(super) textures: Vec<id::Valid<id::TextureId>>,
29    pub(super) texture_views: Vec<id::Valid<id::TextureViewId>>,
30    pub(super) samplers: Vec<id::Valid<id::SamplerId>>,
31    pub(super) bind_groups: Vec<id::Valid<id::BindGroupId>>,
32    pub(super) compute_pipelines: Vec<id::Valid<id::ComputePipelineId>>,
33    pub(super) render_pipelines: Vec<id::Valid<id::RenderPipelineId>>,
34    pub(super) bind_group_layouts: Vec<id::Valid<id::BindGroupLayoutId>>,
35    pub(super) pipeline_layouts: Vec<Stored<id::PipelineLayoutId>>,
36    pub(super) render_bundles: Vec<id::Valid<id::RenderBundleId>>,
37    pub(super) query_sets: Vec<id::Valid<id::QuerySetId>>,
38}
39
40impl SuspectedResources {
41    pub(super) fn clear(&mut self) {
42        self.buffers.clear();
43        self.textures.clear();
44        self.texture_views.clear();
45        self.samplers.clear();
46        self.bind_groups.clear();
47        self.compute_pipelines.clear();
48        self.render_pipelines.clear();
49        self.bind_group_layouts.clear();
50        self.pipeline_layouts.clear();
51        self.render_bundles.clear();
52        self.query_sets.clear();
53    }
54
55    pub(super) fn extend(&mut self, other: &Self) {
56        self.buffers.extend_from_slice(&other.buffers);
57        self.textures.extend_from_slice(&other.textures);
58        self.texture_views.extend_from_slice(&other.texture_views);
59        self.samplers.extend_from_slice(&other.samplers);
60        self.bind_groups.extend_from_slice(&other.bind_groups);
61        self.compute_pipelines
62            .extend_from_slice(&other.compute_pipelines);
63        self.render_pipelines
64            .extend_from_slice(&other.render_pipelines);
65        self.bind_group_layouts
66            .extend_from_slice(&other.bind_group_layouts);
67        self.pipeline_layouts
68            .extend_from_slice(&other.pipeline_layouts);
69        self.render_bundles.extend_from_slice(&other.render_bundles);
70        self.query_sets.extend_from_slice(&other.query_sets);
71    }
72
73    pub(super) fn add_render_bundle_scope<A: HalApi>(&mut self, trackers: &RenderBundleScope<A>) {
74        self.buffers.extend(trackers.buffers.used());
75        self.textures.extend(trackers.textures.used());
76        self.bind_groups.extend(trackers.bind_groups.used());
77        self.render_pipelines
78            .extend(trackers.render_pipelines.used());
79        self.query_sets.extend(trackers.query_sets.used());
80    }
81
82    pub(super) fn add_bind_group_states<A: HalApi>(&mut self, trackers: &BindGroupStates<A>) {
83        self.buffers.extend(trackers.buffers.used());
84        self.textures.extend(trackers.textures.used());
85        self.texture_views.extend(trackers.views.used());
86        self.samplers.extend(trackers.samplers.used());
87    }
88}
89
90/// Raw backend resources that should be freed shortly.
91#[derive(Debug)]
92struct NonReferencedResources<A: hal::Api> {
93    buffers: Vec<A::Buffer>,
94    textures: Vec<A::Texture>,
95    texture_views: Vec<A::TextureView>,
96    samplers: Vec<A::Sampler>,
97    bind_groups: Vec<A::BindGroup>,
98    compute_pipes: Vec<A::ComputePipeline>,
99    render_pipes: Vec<A::RenderPipeline>,
100    bind_group_layouts: Vec<A::BindGroupLayout>,
101    pipeline_layouts: Vec<A::PipelineLayout>,
102    query_sets: Vec<A::QuerySet>,
103}
104
105impl<A: hal::Api> NonReferencedResources<A> {
106    fn new() -> Self {
107        Self {
108            buffers: Vec::new(),
109            textures: Vec::new(),
110            texture_views: Vec::new(),
111            samplers: Vec::new(),
112            bind_groups: Vec::new(),
113            compute_pipes: Vec::new(),
114            render_pipes: Vec::new(),
115            bind_group_layouts: Vec::new(),
116            pipeline_layouts: Vec::new(),
117            query_sets: Vec::new(),
118        }
119    }
120
121    fn extend(&mut self, other: Self) {
122        self.buffers.extend(other.buffers);
123        self.textures.extend(other.textures);
124        self.texture_views.extend(other.texture_views);
125        self.samplers.extend(other.samplers);
126        self.bind_groups.extend(other.bind_groups);
127        self.compute_pipes.extend(other.compute_pipes);
128        self.render_pipes.extend(other.render_pipes);
129        self.query_sets.extend(other.query_sets);
130        assert!(other.bind_group_layouts.is_empty());
131        assert!(other.pipeline_layouts.is_empty());
132    }
133
134    unsafe fn clean(&mut self, device: &A::Device) {
135        if !self.buffers.is_empty() {
136            profiling::scope!("destroy_buffers");
137            for raw in self.buffers.drain(..) {
138                unsafe { device.destroy_buffer(raw) };
139            }
140        }
141        if !self.textures.is_empty() {
142            profiling::scope!("destroy_textures");
143            for raw in self.textures.drain(..) {
144                unsafe { device.destroy_texture(raw) };
145            }
146        }
147        if !self.texture_views.is_empty() {
148            profiling::scope!("destroy_texture_views");
149            for raw in self.texture_views.drain(..) {
150                unsafe { device.destroy_texture_view(raw) };
151            }
152        }
153        if !self.samplers.is_empty() {
154            profiling::scope!("destroy_samplers");
155            for raw in self.samplers.drain(..) {
156                unsafe { device.destroy_sampler(raw) };
157            }
158        }
159        if !self.bind_groups.is_empty() {
160            profiling::scope!("destroy_bind_groups");
161            for raw in self.bind_groups.drain(..) {
162                unsafe { device.destroy_bind_group(raw) };
163            }
164        }
165        if !self.compute_pipes.is_empty() {
166            profiling::scope!("destroy_compute_pipelines");
167            for raw in self.compute_pipes.drain(..) {
168                unsafe { device.destroy_compute_pipeline(raw) };
169            }
170        }
171        if !self.render_pipes.is_empty() {
172            profiling::scope!("destroy_render_pipelines");
173            for raw in self.render_pipes.drain(..) {
174                unsafe { device.destroy_render_pipeline(raw) };
175            }
176        }
177        if !self.bind_group_layouts.is_empty() {
178            profiling::scope!("destroy_bind_group_layouts");
179            for raw in self.bind_group_layouts.drain(..) {
180                unsafe { device.destroy_bind_group_layout(raw) };
181            }
182        }
183        if !self.pipeline_layouts.is_empty() {
184            profiling::scope!("destroy_pipeline_layouts");
185            for raw in self.pipeline_layouts.drain(..) {
186                unsafe { device.destroy_pipeline_layout(raw) };
187            }
188        }
189        if !self.query_sets.is_empty() {
190            profiling::scope!("destroy_query_sets");
191            for raw in self.query_sets.drain(..) {
192                unsafe { device.destroy_query_set(raw) };
193            }
194        }
195    }
196}
197
198/// Resources used by a queue submission, and work to be done once it completes.
199struct ActiveSubmission<A: hal::Api> {
200    /// The index of the submission we track.
201    ///
202    /// When `Device::fence`'s value is greater than or equal to this, our queue
203    /// submission has completed.
204    index: SubmissionIndex,
205
206    /// Resources to be freed once this queue submission has completed.
207    ///
208    /// When the device is polled, for completed submissions,
209    /// `triage_submissions` merges these into
210    /// `LifetimeTracker::free_resources`. From there,
211    /// `LifetimeTracker::cleanup` passes them to the hal to be freed.
212    ///
213    /// This includes things like temporary resources and resources that are
214    /// used by submitted commands but have been dropped by the user (meaning that
215    /// this submission is their last reference.)
216    last_resources: NonReferencedResources<A>,
217
218    /// Buffers to be mapped once this submission has completed.
219    mapped: Vec<id::Valid<id::BufferId>>,
220
221    encoders: Vec<EncoderInFlight<A>>,
222
223    /// List of queue "on_submitted_work_done" closures to be called once this
224    /// submission has completed.
225    work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
226}
227
228#[derive(Clone, Debug, Error)]
229#[non_exhaustive]
230pub enum WaitIdleError {
231    #[error(transparent)]
232    Device(#[from] DeviceError),
233    #[error("Tried to wait using a submission index from the wrong device. Submission index is from device {0:?}. Called poll on device {1:?}.")]
234    WrongSubmissionIndex(id::QueueId, id::DeviceId),
235    #[error("GPU got stuck :(")]
236    StuckGpu,
237}
238
239/// Resource tracking for a device.
240///
241/// ## Host mapping buffers
242///
243/// A buffer cannot be mapped until all active queue submissions that use it
244/// have completed. To that end:
245///
246/// -   Each buffer's `LifeGuard::submission_index` records the index of the
247///     most recent queue submission that uses that buffer.
248///
249/// -   Calling `map_async` adds the buffer to `self.mapped`, and changes
250///     `Buffer::map_state` to prevent it from being used in any new
251///     submissions.
252///
253/// -   When the device is polled, the following `LifetimeTracker` methods decide
254///     what should happen next:
255///
256///     1)  `triage_mapped` drains `self.mapped`, checking the submission index
257///         of each buffer against the queue submissions that have finished
258///         execution. Buffers used by submissions still in flight go in
259///         `self.active[index].mapped`, and the rest go into
260///         `self.ready_to_map`.
261///
262///     2)  `triage_submissions` moves entries in `self.active[i]` for completed
263///         submissions to `self.ready_to_map`.  At this point, both
264///         `self.active` and `self.ready_to_map` are up to date with the given
265///         submission index.
266///
267///     3)  `handle_mapping` drains `self.ready_to_map` and actually maps the
268///         buffers, collecting a list of notification closures to call. But any
269///         buffers that were dropped by the user get moved to
270///         `self.free_resources`.
271///
272///     4)  `cleanup` frees everything in `free_resources`.
273///
274/// Only `self.mapped` holds a `RefCount` for the buffer; it is dropped by
275/// `triage_mapped`.
276pub(super) struct LifetimeTracker<A: hal::Api> {
277    /// Resources that the user has requested be mapped, but which are used by
278    /// queue submissions still in flight.
279    mapped: Vec<Stored<id::BufferId>>,
280
281    /// Buffers can be used in a submission that is yet to be made, by the
282    /// means of `write_buffer()`, so we have a special place for them.
283    pub future_suspected_buffers: Vec<Stored<id::BufferId>>,
284
285    /// Textures can be used in the upcoming submission by `write_texture`.
286    pub future_suspected_textures: Vec<Stored<id::TextureId>>,
287
288    /// Resources whose user handle has died (i.e. drop/destroy has been called)
289    /// and will likely be ready for destruction soon.
290    pub suspected_resources: SuspectedResources,
291
292    /// Resources used by queue submissions still in flight. One entry per
293    /// submission, with older submissions appearing before younger.
294    ///
295    /// Entries are added by `track_submission` and drained by
296    /// `LifetimeTracker::triage_submissions`. Lots of methods contribute data
297    /// to particular entries.
298    active: Vec<ActiveSubmission<A>>,
299
300    /// Raw backend resources that are neither referenced nor used.
301    ///
302    /// These are freed by `LifeTracker::cleanup`, which is called from periodic
303    /// maintenance functions like `Global::device_poll`, and when a device is
304    /// destroyed.
305    free_resources: NonReferencedResources<A>,
306
307    /// Buffers the user has asked us to map, and which are not used by any
308    /// queue submission still in flight.
309    ready_to_map: Vec<id::Valid<id::BufferId>>,
310
311    /// Queue "on_submitted_work_done" closures that were initiated for while there is no
312    /// currently pending submissions. These cannot be immeidately invoked as they
313    /// must happen _after_ all mapped buffer callbacks are mapped, so we defer them
314    /// here until the next time the device is maintained.
315    work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
316}
317
318impl<A: hal::Api> LifetimeTracker<A> {
319    pub fn new() -> Self {
320        Self {
321            mapped: Vec::new(),
322            future_suspected_buffers: Vec::new(),
323            future_suspected_textures: Vec::new(),
324            suspected_resources: SuspectedResources::default(),
325            active: Vec::new(),
326            free_resources: NonReferencedResources::new(),
327            ready_to_map: Vec::new(),
328            work_done_closures: SmallVec::new(),
329        }
330    }
331
332    /// Return true if there are no queue submissions still in flight.
333    pub fn queue_empty(&self) -> bool {
334        self.active.is_empty()
335    }
336
337    /// Start tracking resources associated with a new queue submission.
338    pub fn track_submission(
339        &mut self,
340        index: SubmissionIndex,
341        temp_resources: impl Iterator<Item = TempResource<A>>,
342        encoders: Vec<EncoderInFlight<A>>,
343    ) {
344        let mut last_resources = NonReferencedResources::new();
345        for res in temp_resources {
346            match res {
347                TempResource::Buffer(raw) => last_resources.buffers.push(raw),
348                TempResource::Texture(raw, views) => {
349                    last_resources.textures.push(raw);
350                    last_resources.texture_views.extend(views);
351                }
352            }
353        }
354
355        self.active.push(ActiveSubmission {
356            index,
357            last_resources,
358            mapped: Vec::new(),
359            encoders,
360            work_done_closures: SmallVec::new(),
361        });
362    }
363
364    pub fn post_submit(&mut self) {
365        self.suspected_resources.buffers.extend(
366            self.future_suspected_buffers
367                .drain(..)
368                .map(|stored| stored.value),
369        );
370        self.suspected_resources.textures.extend(
371            self.future_suspected_textures
372                .drain(..)
373                .map(|stored| stored.value),
374        );
375    }
376
377    pub(crate) fn map(&mut self, value: id::Valid<id::BufferId>, ref_count: RefCount) {
378        self.mapped.push(Stored { value, ref_count });
379    }
380
381    /// Sort out the consequences of completed submissions.
382    ///
383    /// Assume that all submissions up through `last_done` have completed.
384    ///
385    /// -   Buffers used by those submissions are now ready to map, if
386    ///     requested. Add any buffers in the submission's [`mapped`] list to
387    ///     [`self.ready_to_map`], where [`LifetimeTracker::handle_mapping`] will find
388    ///     them.
389    ///
390    /// -   Resources whose final use was in those submissions are now ready to
391    ///     free. Add any resources in the submission's [`last_resources`] table
392    ///     to [`self.free_resources`], where [`LifetimeTracker::cleanup`] will find
393    ///     them.
394    ///
395    /// Return a list of [`SubmittedWorkDoneClosure`]s to run.
396    ///
397    /// [`mapped`]: ActiveSubmission::mapped
398    /// [`self.ready_to_map`]: LifetimeTracker::ready_to_map
399    /// [`last_resources`]: ActiveSubmission::last_resources
400    /// [`self.free_resources`]: LifetimeTracker::free_resources
401    /// [`SubmittedWorkDoneClosure`]: crate::device::queue::SubmittedWorkDoneClosure
402    #[must_use]
403    pub fn triage_submissions(
404        &mut self,
405        last_done: SubmissionIndex,
406        command_allocator: &Mutex<super::CommandAllocator<A>>,
407    ) -> SmallVec<[SubmittedWorkDoneClosure; 1]> {
408        profiling::scope!("triage_submissions");
409
410        //TODO: enable when `is_sorted_by_key` is stable
411        //debug_assert!(self.active.is_sorted_by_key(|a| a.index));
412        let done_count = self
413            .active
414            .iter()
415            .position(|a| a.index > last_done)
416            .unwrap_or(self.active.len());
417
418        let mut work_done_closures: SmallVec<_> = self.work_done_closures.drain(..).collect();
419        for a in self.active.drain(..done_count) {
420            log::trace!("Active submission {} is done", a.index);
421            self.free_resources.extend(a.last_resources);
422            self.ready_to_map.extend(a.mapped);
423            for encoder in a.encoders {
424                let raw = unsafe { encoder.land() };
425                command_allocator.lock().release_encoder(raw);
426            }
427            work_done_closures.extend(a.work_done_closures);
428        }
429        work_done_closures
430    }
431
432    pub fn cleanup(&mut self, device: &A::Device) {
433        profiling::scope!("LifetimeTracker::cleanup");
434        unsafe {
435            self.free_resources.clean(device);
436        }
437    }
438
439    pub fn schedule_resource_destruction(
440        &mut self,
441        temp_resource: TempResource<A>,
442        last_submit_index: SubmissionIndex,
443    ) {
444        let resources = self
445            .active
446            .iter_mut()
447            .find(|a| a.index == last_submit_index)
448            .map_or(&mut self.free_resources, |a| &mut a.last_resources);
449        match temp_resource {
450            TempResource::Buffer(raw) => resources.buffers.push(raw),
451            TempResource::Texture(raw, views) => {
452                resources.texture_views.extend(views);
453                resources.textures.push(raw);
454            }
455        }
456    }
457
458    pub fn add_work_done_closure(&mut self, closure: SubmittedWorkDoneClosure) {
459        match self.active.last_mut() {
460            Some(active) => {
461                active.work_done_closures.push(closure);
462            }
463            // We must defer the closure until all previously occuring map_async closures
464            // have fired. This is required by the spec.
465            None => {
466                self.work_done_closures.push(closure);
467            }
468        }
469    }
470}
471
472impl<A: HalApi> LifetimeTracker<A> {
473    /// Identify resources to free, according to `trackers` and `self.suspected_resources`.
474    ///
475    /// Given `trackers`, the [`Tracker`] belonging to same [`Device`] as
476    /// `self`, and `hub`, the [`Hub`] to which that `Device` belongs:
477    ///
478    /// Remove from `trackers` each resource mentioned in
479    /// [`self.suspected_resources`]. If `trackers` held the final reference to
480    /// that resource, add it to the appropriate free list, to be destroyed by
481    /// the hal:
482    ///
483    /// -   Add resources used by queue submissions still in flight to the
484    ///     [`last_resources`] table of the last such submission's entry in
485    ///     [`self.active`]. When that submission has finished execution. the
486    ///     [`triage_submissions`] method will move them to
487    ///     [`self.free_resources`].
488    ///
489    /// -   Add resources that can be freed right now to [`self.free_resources`]
490    ///     directly. [`LifetimeTracker::cleanup`] will take care of them as
491    ///     part of this poll.
492    ///
493    /// ## Entrained resources
494    ///
495    /// This function finds resources that are used only by other resources
496    /// ready to be freed, and adds those to the free lists as well. For
497    /// example, if there's some texture `T` used only by some texture view
498    /// `TV`, then if `TV` can be freed, `T` gets added to the free lists too.
499    ///
500    /// Since `wgpu-core` resource ownership patterns are acyclic, we can visit
501    /// each type that can be owned after all types that could possibly own
502    /// it. This way, we can detect all free-able objects in a single pass,
503    /// simply by starting with types that are roots of the ownership DAG (like
504    /// render bundles) and working our way towards leaf types (like buffers).
505    ///
506    /// [`Device`]: super::Device
507    /// [`self.suspected_resources`]: LifetimeTracker::suspected_resources
508    /// [`last_resources`]: ActiveSubmission::last_resources
509    /// [`self.active`]: LifetimeTracker::active
510    /// [`triage_submissions`]: LifetimeTracker::triage_submissions
511    /// [`self.free_resources`]: LifetimeTracker::free_resources
512    pub(super) fn triage_suspected<G: GlobalIdentityHandlerFactory>(
513        &mut self,
514        hub: &Hub<A, G>,
515        trackers: &Mutex<Tracker<A>>,
516        #[cfg(feature = "trace")] trace: Option<&Mutex<trace::Trace>>,
517        token: &mut Token<super::Device<A>>,
518    ) {
519        profiling::scope!("triage_suspected");
520
521        if !self.suspected_resources.render_bundles.is_empty() {
522            let (mut guard, _) = hub.render_bundles.write(token);
523            let mut trackers = trackers.lock();
524
525            while let Some(id) = self.suspected_resources.render_bundles.pop() {
526                if trackers.bundles.remove_abandoned(id) {
527                    log::debug!("Bundle {:?} will be destroyed", id);
528                    #[cfg(feature = "trace")]
529                    if let Some(t) = trace {
530                        t.lock().add(trace::Action::DestroyRenderBundle(id.0));
531                    }
532
533                    if let Some(res) = hub.render_bundles.unregister_locked(id.0, &mut *guard) {
534                        self.suspected_resources.add_render_bundle_scope(&res.used);
535                    }
536                }
537            }
538        }
539
540        if !self.suspected_resources.bind_groups.is_empty() {
541            let (mut guard, _) = hub.bind_groups.write(token);
542            let mut trackers = trackers.lock();
543
544            while let Some(id) = self.suspected_resources.bind_groups.pop() {
545                if trackers.bind_groups.remove_abandoned(id) {
546                    log::debug!("Bind group {:?} will be destroyed", id);
547                    #[cfg(feature = "trace")]
548                    if let Some(t) = trace {
549                        t.lock().add(trace::Action::DestroyBindGroup(id.0));
550                    }
551
552                    if let Some(res) = hub.bind_groups.unregister_locked(id.0, &mut *guard) {
553                        self.suspected_resources.add_bind_group_states(&res.used);
554
555                        self.suspected_resources
556                            .bind_group_layouts
557                            .push(res.layout_id);
558
559                        let submit_index = res.life_guard.life_count();
560                        self.active
561                            .iter_mut()
562                            .find(|a| a.index == submit_index)
563                            .map_or(&mut self.free_resources, |a| &mut a.last_resources)
564                            .bind_groups
565                            .push(res.raw);
566                    }
567                }
568            }
569        }
570
571        if !self.suspected_resources.texture_views.is_empty() {
572            let (mut guard, _) = hub.texture_views.write(token);
573            let mut trackers = trackers.lock();
574
575            let mut list = mem::take(&mut self.suspected_resources.texture_views);
576            for id in list.drain(..) {
577                if trackers.views.remove_abandoned(id) {
578                    log::debug!("Texture view {:?} will be destroyed", id);
579                    #[cfg(feature = "trace")]
580                    if let Some(t) = trace {
581                        t.lock().add(trace::Action::DestroyTextureView(id.0));
582                    }
583
584                    if let Some(res) = hub.texture_views.unregister_locked(id.0, &mut *guard) {
585                        self.suspected_resources.textures.push(res.parent_id.value);
586                        let submit_index = res.life_guard.life_count();
587                        self.active
588                            .iter_mut()
589                            .find(|a| a.index == submit_index)
590                            .map_or(&mut self.free_resources, |a| &mut a.last_resources)
591                            .texture_views
592                            .push(res.raw);
593                    }
594                }
595            }
596            self.suspected_resources.texture_views = list;
597        }
598
599        if !self.suspected_resources.textures.is_empty() {
600            let (mut guard, _) = hub.textures.write(token);
601            let mut trackers = trackers.lock();
602
603            for id in self.suspected_resources.textures.drain(..) {
604                if trackers.textures.remove_abandoned(id) {
605                    log::debug!("Texture {:?} will be destroyed", id);
606                    #[cfg(feature = "trace")]
607                    if let Some(t) = trace {
608                        t.lock().add(trace::Action::DestroyTexture(id.0));
609                    }
610
611                    if let Some(res) = hub.textures.unregister_locked(id.0, &mut *guard) {
612                        let submit_index = res.life_guard.life_count();
613                        let raw = match res.inner {
614                            resource::TextureInner::Native { raw: Some(raw) } => raw,
615                            _ => continue,
616                        };
617                        let non_referenced_resources = self
618                            .active
619                            .iter_mut()
620                            .find(|a| a.index == submit_index)
621                            .map_or(&mut self.free_resources, |a| &mut a.last_resources);
622
623                        non_referenced_resources.textures.push(raw);
624                        if let resource::TextureClearMode::RenderPass { clear_views, .. } =
625                            res.clear_mode
626                        {
627                            non_referenced_resources
628                                .texture_views
629                                .extend(clear_views.into_iter());
630                        }
631                    }
632                }
633            }
634        }
635
636        if !self.suspected_resources.samplers.is_empty() {
637            let (mut guard, _) = hub.samplers.write(token);
638            let mut trackers = trackers.lock();
639
640            for id in self.suspected_resources.samplers.drain(..) {
641                if trackers.samplers.remove_abandoned(id) {
642                    log::debug!("Sampler {:?} will be destroyed", id);
643                    #[cfg(feature = "trace")]
644                    if let Some(t) = trace {
645                        t.lock().add(trace::Action::DestroySampler(id.0));
646                    }
647
648                    if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *guard) {
649                        let submit_index = res.life_guard.life_count();
650                        self.active
651                            .iter_mut()
652                            .find(|a| a.index == submit_index)
653                            .map_or(&mut self.free_resources, |a| &mut a.last_resources)
654                            .samplers
655                            .push(res.raw);
656                    }
657                }
658            }
659        }
660
661        if !self.suspected_resources.buffers.is_empty() {
662            let (mut guard, _) = hub.buffers.write(token);
663            let mut trackers = trackers.lock();
664
665            for id in self.suspected_resources.buffers.drain(..) {
666                if trackers.buffers.remove_abandoned(id) {
667                    log::debug!("Buffer {:?} will be destroyed", id);
668                    #[cfg(feature = "trace")]
669                    if let Some(t) = trace {
670                        t.lock().add(trace::Action::DestroyBuffer(id.0));
671                    }
672
673                    if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) {
674                        let submit_index = res.life_guard.life_count();
675                        if let resource::BufferMapState::Init { stage_buffer, .. } = res.map_state {
676                            self.free_resources.buffers.push(stage_buffer);
677                        }
678                        self.active
679                            .iter_mut()
680                            .find(|a| a.index == submit_index)
681                            .map_or(&mut self.free_resources, |a| &mut a.last_resources)
682                            .buffers
683                            .extend(res.raw);
684                    }
685                }
686            }
687        }
688
689        if !self.suspected_resources.compute_pipelines.is_empty() {
690            let (mut guard, _) = hub.compute_pipelines.write(token);
691            let mut trackers = trackers.lock();
692
693            for id in self.suspected_resources.compute_pipelines.drain(..) {
694                if trackers.compute_pipelines.remove_abandoned(id) {
695                    log::debug!("Compute pipeline {:?} will be destroyed", id);
696                    #[cfg(feature = "trace")]
697                    if let Some(t) = trace {
698                        t.lock().add(trace::Action::DestroyComputePipeline(id.0));
699                    }
700
701                    if let Some(res) = hub.compute_pipelines.unregister_locked(id.0, &mut *guard) {
702                        let submit_index = res.life_guard.life_count();
703                        self.active
704                            .iter_mut()
705                            .find(|a| a.index == submit_index)
706                            .map_or(&mut self.free_resources, |a| &mut a.last_resources)
707                            .compute_pipes
708                            .push(res.raw);
709                    }
710                }
711            }
712        }
713
714        if !self.suspected_resources.render_pipelines.is_empty() {
715            let (mut guard, _) = hub.render_pipelines.write(token);
716            let mut trackers = trackers.lock();
717
718            for id in self.suspected_resources.render_pipelines.drain(..) {
719                if trackers.render_pipelines.remove_abandoned(id) {
720                    log::debug!("Render pipeline {:?} will be destroyed", id);
721                    #[cfg(feature = "trace")]
722                    if let Some(t) = trace {
723                        t.lock().add(trace::Action::DestroyRenderPipeline(id.0));
724                    }
725
726                    if let Some(res) = hub.render_pipelines.unregister_locked(id.0, &mut *guard) {
727                        let submit_index = res.life_guard.life_count();
728                        self.active
729                            .iter_mut()
730                            .find(|a| a.index == submit_index)
731                            .map_or(&mut self.free_resources, |a| &mut a.last_resources)
732                            .render_pipes
733                            .push(res.raw);
734                    }
735                }
736            }
737        }
738
739        if !self.suspected_resources.pipeline_layouts.is_empty() {
740            let (mut guard, _) = hub.pipeline_layouts.write(token);
741
742            for Stored {
743                value: id,
744                ref_count,
745            } in self.suspected_resources.pipeline_layouts.drain(..)
746            {
747                //Note: this has to happen after all the suspected pipelines are destroyed
748                if ref_count.load() == 1 {
749                    log::debug!("Pipeline layout {:?} will be destroyed", id);
750                    #[cfg(feature = "trace")]
751                    if let Some(t) = trace {
752                        t.lock().add(trace::Action::DestroyPipelineLayout(id.0));
753                    }
754
755                    if let Some(lay) = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard) {
756                        self.suspected_resources
757                            .bind_group_layouts
758                            .extend_from_slice(&lay.bind_group_layout_ids);
759                        self.free_resources.pipeline_layouts.push(lay.raw);
760                    }
761                }
762            }
763        }
764
765        if !self.suspected_resources.bind_group_layouts.is_empty() {
766            let (mut guard, _) = hub.bind_group_layouts.write(token);
767
768            for id in self.suspected_resources.bind_group_layouts.drain(..) {
769                //Note: this has to happen after all the suspected pipelines are destroyed
770                //Note: nothing else can bump the refcount since the guard is locked exclusively
771                //Note: same BGL can appear multiple times in the list, but only the last
772                // encounter could drop the refcount to 0.
773                let mut bgl_to_check = Some(id);
774                while let Some(id) = bgl_to_check.take() {
775                    let bgl = &guard[id];
776                    if bgl.multi_ref_count.dec_and_check_empty() {
777                        // If This layout points to a compatible one, go over the latter
778                        // to decrement the ref count and potentially destroy it.
779                        bgl_to_check = bgl.as_duplicate();
780
781                        log::debug!("Bind group layout {:?} will be destroyed", id);
782                        #[cfg(feature = "trace")]
783                        if let Some(t) = trace {
784                            t.lock().add(trace::Action::DestroyBindGroupLayout(id.0));
785                        }
786                        if let Some(lay) =
787                            hub.bind_group_layouts.unregister_locked(id.0, &mut *guard)
788                        {
789                            if let Some(inner) = lay.into_inner() {
790                                self.free_resources.bind_group_layouts.push(inner.raw);
791                            }
792                        }
793                    }
794                }
795            }
796        }
797
798        if !self.suspected_resources.query_sets.is_empty() {
799            let (mut guard, _) = hub.query_sets.write(token);
800            let mut trackers = trackers.lock();
801
802            for id in self.suspected_resources.query_sets.drain(..) {
803                if trackers.query_sets.remove_abandoned(id) {
804                    log::debug!("Query set {:?} will be destroyed", id);
805                    // #[cfg(feature = "trace")]
806                    // trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0)));
807                    if let Some(res) = hub.query_sets.unregister_locked(id.0, &mut *guard) {
808                        let submit_index = res.life_guard.life_count();
809                        self.active
810                            .iter_mut()
811                            .find(|a| a.index == submit_index)
812                            .map_or(&mut self.free_resources, |a| &mut a.last_resources)
813                            .query_sets
814                            .push(res.raw);
815                    }
816                }
817            }
818        }
819    }
820
821    /// Determine which buffers are ready to map, and which must wait for the
822    /// GPU.
823    ///
824    /// See the documentation for [`LifetimeTracker`] for details.
825    pub(super) fn triage_mapped<G: GlobalIdentityHandlerFactory>(
826        &mut self,
827        hub: &Hub<A, G>,
828        token: &mut Token<super::Device<A>>,
829    ) {
830        if self.mapped.is_empty() {
831            return;
832        }
833        let (buffer_guard, _) = hub.buffers.read(token);
834
835        for stored in self.mapped.drain(..) {
836            let resource_id = stored.value;
837            let buf = &buffer_guard[resource_id];
838
839            let submit_index = buf.life_guard.life_count();
840            log::trace!(
841                "Mapping of {:?} at submission {:?} gets assigned to active {:?}",
842                resource_id,
843                submit_index,
844                self.active.iter().position(|a| a.index == submit_index)
845            );
846
847            self.active
848                .iter_mut()
849                .find(|a| a.index == submit_index)
850                .map_or(&mut self.ready_to_map, |a| &mut a.mapped)
851                .push(resource_id);
852        }
853    }
854
855    /// Map the buffers in `self.ready_to_map`.
856    ///
857    /// Return a list of mapping notifications to send.
858    ///
859    /// See the documentation for [`LifetimeTracker`] for details.
860    #[must_use]
861    pub(super) fn handle_mapping<G: GlobalIdentityHandlerFactory>(
862        &mut self,
863        hub: &Hub<A, G>,
864        raw: &A::Device,
865        trackers: &Mutex<Tracker<A>>,
866        token: &mut Token<super::Device<A>>,
867    ) -> Vec<super::BufferMapPendingClosure> {
868        if self.ready_to_map.is_empty() {
869            return Vec::new();
870        }
871        let (mut buffer_guard, _) = hub.buffers.write(token);
872        let mut pending_callbacks: Vec<super::BufferMapPendingClosure> =
873            Vec::with_capacity(self.ready_to_map.len());
874        let mut trackers = trackers.lock();
875        for buffer_id in self.ready_to_map.drain(..) {
876            let buffer = &mut buffer_guard[buffer_id];
877            if buffer.life_guard.ref_count.is_none() && trackers.buffers.remove_abandoned(buffer_id)
878            {
879                buffer.map_state = resource::BufferMapState::Idle;
880                log::debug!("Mapping request is dropped because the buffer is destroyed.");
881                if let Some(buf) = hub
882                    .buffers
883                    .unregister_locked(buffer_id.0, &mut *buffer_guard)
884                {
885                    self.free_resources.buffers.extend(buf.raw);
886                }
887            } else {
888                let mapping = match std::mem::replace(
889                    &mut buffer.map_state,
890                    resource::BufferMapState::Idle,
891                ) {
892                    resource::BufferMapState::Waiting(pending_mapping) => pending_mapping,
893                    // Mapping cancelled
894                    resource::BufferMapState::Idle => continue,
895                    // Mapping queued at least twice by map -> unmap -> map
896                    // and was already successfully mapped below
897                    active @ resource::BufferMapState::Active { .. } => {
898                        buffer.map_state = active;
899                        continue;
900                    }
901                    _ => panic!("No pending mapping."),
902                };
903                let status = if mapping.range.start != mapping.range.end {
904                    log::debug!("Buffer {:?} map state -> Active", buffer_id);
905                    let host = mapping.op.host;
906                    let size = mapping.range.end - mapping.range.start;
907                    match super::map_buffer(raw, buffer, mapping.range.start, size, host) {
908                        Ok(ptr) => {
909                            buffer.map_state = resource::BufferMapState::Active {
910                                ptr,
911                                range: mapping.range.start..mapping.range.start + size,
912                                host,
913                            };
914                            Ok(())
915                        }
916                        Err(e) => {
917                            log::error!("Mapping failed {:?}", e);
918                            Err(e)
919                        }
920                    }
921                } else {
922                    buffer.map_state = resource::BufferMapState::Active {
923                        ptr: std::ptr::NonNull::dangling(),
924                        range: mapping.range,
925                        host: mapping.op.host,
926                    };
927                    Ok(())
928                };
929                pending_callbacks.push((mapping.op, status));
930            }
931        }
932        pending_callbacks
933    }
934}