wgpu_core/device/
resource.rs

1use alloc::{
2    borrow::Cow,
3    boxed::Box,
4    string::{String, ToString as _},
5    sync::{Arc, Weak},
6    vec::Vec,
7};
8use core::{
9    fmt,
10    mem::{self, ManuallyDrop},
11    num::NonZeroU32,
12    sync::atomic::{AtomicBool, Ordering},
13};
14
15use arrayvec::ArrayVec;
16use bitflags::Flags;
17use smallvec::SmallVec;
18use wgt::{
19    math::align_to, DeviceLostReason, TextureFormat, TextureSampleType, TextureSelector,
20    TextureViewDimension,
21};
22
23#[cfg(feature = "trace")]
24use crate::device::trace;
25use crate::{
26    binding_model::{self, BindGroup, BindGroupLayout, BindGroupLayoutEntryError},
27    command, conv,
28    device::{
29        bgl, create_validator, life::WaitIdleError, map_buffer, AttachmentData,
30        DeviceLostInvocation, HostMap, MissingDownlevelFlags, MissingFeatures, RenderPassContext,
31        CLEANUP_WAIT_MS,
32    },
33    hal_label,
34    init_tracker::{
35        BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange,
36        TextureInitTrackerAction,
37    },
38    instance::{Adapter, RequestDeviceError},
39    lock::{rank, Mutex, RwLock},
40    pipeline,
41    pool::ResourcePool,
42    resource::{
43        self, AccelerationStructure, Buffer, Fallible, Labeled, ParentDevice, QuerySet, Sampler,
44        StagingBuffer, Texture, TextureView, TextureViewNotRenderableReason, Tlas, TrackingData,
45    },
46    resource_log,
47    snatch::{SnatchGuard, SnatchLock, Snatchable},
48    timestamp_normalization::TIMESTAMP_NORMALIZATION_BUFFER_USES,
49    track::{BindGroupStates, DeviceTracker, TrackerIndexAllocators, UsageScope, UsageScopePool},
50    validation::{self, validate_color_attachment_bytes_per_sample},
51    weak_vec::WeakVec,
52    FastHashMap, LabelHelpers, OnceCellOrLock,
53};
54
55use super::{
56    queue::Queue, DeviceDescriptor, DeviceError, DeviceLostClosure, UserClosures,
57    ENTRYPOINT_FAILURE_ERROR, ZERO_BUFFER_SIZE,
58};
59
60#[cfg(supports_64bit_atomics)]
61use core::sync::atomic::AtomicU64;
62#[cfg(not(supports_64bit_atomics))]
63use portable_atomic::AtomicU64;
64
65pub(crate) struct CommandIndices {
66    /// The index of the last command submission that was attempted.
67    ///
68    /// Note that `fence` may never be signalled with this value, if the command
69    /// submission failed. If you need to wait for everything running on a
70    /// `Queue` to complete, wait for [`last_successful_submission_index`].
71    ///
72    /// [`last_successful_submission_index`]: Device::last_successful_submission_index
73    pub(crate) active_submission_index: hal::FenceValue,
74    pub(crate) next_acceleration_structure_build_command_index: u64,
75}
76
77/// Structure describing a logical device. Some members are internally mutable,
78/// stored behind mutexes.
79pub struct Device {
80    raw: Box<dyn hal::DynDevice>,
81    pub(crate) adapter: Arc<Adapter>,
82    pub(crate) queue: OnceCellOrLock<Weak<Queue>>,
83    pub(crate) zero_buffer: ManuallyDrop<Box<dyn hal::DynBuffer>>,
84    /// The `label` from the descriptor used to create the resource.
85    label: String,
86
87    pub(crate) command_allocator: command::CommandAllocator,
88
89    pub(crate) command_indices: RwLock<CommandIndices>,
90
91    /// The index of the last successful submission to this device's
92    /// [`hal::Queue`].
93    ///
94    /// Unlike [`active_submission_index`], which is incremented each time
95    /// submission is attempted, this is updated only when submission succeeds,
96    /// so waiting for this value won't hang waiting for work that was never
97    /// submitted.
98    ///
99    /// [`active_submission_index`]: CommandIndices::active_submission_index
100    pub(crate) last_successful_submission_index: hal::AtomicFenceValue,
101
102    // NOTE: if both are needed, the `snatchable_lock` must be consistently acquired before the
103    // `fence` lock to avoid deadlocks.
104    pub(crate) fence: RwLock<ManuallyDrop<Box<dyn hal::DynFence>>>,
105    pub(crate) snatchable_lock: SnatchLock,
106
107    /// Is this device valid? Valid is closely associated with "lose the device",
108    /// which can be triggered by various methods, including at the end of device
109    /// destroy, and by any GPU errors that cause us to no longer trust the state
110    /// of the device. Ideally we would like to fold valid into the storage of
111    /// the device itself (for example as an Error enum), but unfortunately we
112    /// need to continue to be able to retrieve the device in poll_devices to
113    /// determine if it can be dropped. If our internal accesses of devices were
114    /// done through ref-counted references and external accesses checked for
115    /// Error enums, we wouldn't need this. For now, we need it. All the call
116    /// sites where we check it are areas that should be revisited if we start
117    /// using ref-counted references for internal access.
118    pub(crate) valid: AtomicBool,
119
120    /// Closure to be called on "lose the device". This is invoked directly by
121    /// device.lose or by the UserCallbacks returned from maintain when the device
122    /// has been destroyed and its queues are empty.
123    pub(crate) device_lost_closure: Mutex<Option<DeviceLostClosure>>,
124
125    /// Stores the state of buffers and textures.
126    pub(crate) trackers: Mutex<DeviceTracker>,
127    pub(crate) tracker_indices: TrackerIndexAllocators,
128    /// Pool of bind group layouts, allowing deduplication.
129    pub(crate) bgl_pool: ResourcePool<bgl::EntryMap, BindGroupLayout>,
130    pub(crate) alignments: hal::Alignments,
131    pub(crate) limits: wgt::Limits,
132    pub(crate) features: wgt::Features,
133    pub(crate) downlevel: wgt::DownlevelCapabilities,
134    pub(crate) instance_flags: wgt::InstanceFlags,
135    pub(crate) deferred_destroy: Mutex<Vec<DeferredDestroy>>,
136    pub(crate) usage_scopes: UsageScopePool,
137    pub(crate) indirect_validation: Option<crate::indirect_validation::IndirectValidation>,
138    // Optional so that we can late-initialize this after the queue is created.
139    pub(crate) timestamp_normalizer:
140        OnceCellOrLock<crate::timestamp_normalization::TimestampNormalizer>,
141    // needs to be dropped last
142    #[cfg(feature = "trace")]
143    pub(crate) trace: Mutex<Option<trace::Trace>>,
144}
145
146pub(crate) enum DeferredDestroy {
147    TextureViews(WeakVec<TextureView>),
148    BindGroups(WeakVec<BindGroup>),
149}
150
151impl fmt::Debug for Device {
152    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
153        f.debug_struct("Device")
154            .field("label", &self.label())
155            .field("limits", &self.limits)
156            .field("features", &self.features)
157            .field("downlevel", &self.downlevel)
158            .finish()
159    }
160}
161
162impl Drop for Device {
163    fn drop(&mut self) {
164        resource_log!("Drop {}", self.error_ident());
165
166        // SAFETY: We are in the Drop impl and we don't use self.zero_buffer anymore after this point.
167        let zero_buffer = unsafe { ManuallyDrop::take(&mut self.zero_buffer) };
168        // SAFETY: We are in the Drop impl and we don't use self.fence anymore after this point.
169        let fence = unsafe { ManuallyDrop::take(&mut self.fence.write()) };
170        if let Some(indirect_validation) = self.indirect_validation.take() {
171            indirect_validation.dispose(self.raw.as_ref());
172        }
173        if let Some(timestamp_normalizer) = self.timestamp_normalizer.take() {
174            timestamp_normalizer.dispose(self.raw.as_ref());
175        }
176        unsafe {
177            self.raw.destroy_buffer(zero_buffer);
178            self.raw.destroy_fence(fence);
179        }
180    }
181}
182
183impl Device {
184    pub(crate) fn raw(&self) -> &dyn hal::DynDevice {
185        self.raw.as_ref()
186    }
187    pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> {
188        if self.features.contains(feature) {
189            Ok(())
190        } else {
191            Err(MissingFeatures(feature))
192        }
193    }
194
195    pub(crate) fn require_downlevel_flags(
196        &self,
197        flags: wgt::DownlevelFlags,
198    ) -> Result<(), MissingDownlevelFlags> {
199        if self.downlevel.flags.contains(flags) {
200            Ok(())
201        } else {
202            Err(MissingDownlevelFlags(flags))
203        }
204    }
205}
206
207impl Device {
208    pub(crate) fn new(
209        raw_device: Box<dyn hal::DynDevice>,
210        adapter: &Arc<Adapter>,
211        desc: &DeviceDescriptor,
212        instance_flags: wgt::InstanceFlags,
213    ) -> Result<Self, DeviceError> {
214        #[cfg(not(feature = "trace"))]
215        match &desc.trace {
216            wgt::Trace::Off => {}
217            _ => {
218                log::error!("wgpu-core feature 'trace' is not enabled");
219            }
220        };
221        #[cfg(feature = "trace")]
222        let trace_dir_name: Option<&std::path::PathBuf> = match &desc.trace {
223            wgt::Trace::Off => None,
224            wgt::Trace::Directory(d) => Some(d),
225            // The enum is non_exhaustive, so we must have a fallback arm (that should be
226            // unreachable in practice).
227            t => {
228                log::error!("unimplemented wgpu_types::Trace variant {t:?}");
229                None
230            }
231        };
232
233        let fence = unsafe { raw_device.create_fence() }.map_err(DeviceError::from_hal)?;
234
235        let command_allocator = command::CommandAllocator::new();
236
237        let rt_uses = if desc
238            .required_features
239            .contains(wgt::Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE)
240        {
241            wgt::BufferUses::TOP_LEVEL_ACCELERATION_STRUCTURE_INPUT
242        } else {
243            wgt::BufferUses::empty()
244        };
245
246        // Create zeroed buffer used for texture clears (and raytracing if required).
247        let zero_buffer = unsafe {
248            raw_device.create_buffer(&hal::BufferDescriptor {
249                label: hal_label(Some("(wgpu internal) zero init buffer"), instance_flags),
250                size: ZERO_BUFFER_SIZE,
251                usage: wgt::BufferUses::COPY_SRC | wgt::BufferUses::COPY_DST | rt_uses,
252                memory_flags: hal::MemoryFlags::empty(),
253            })
254        }
255        .map_err(DeviceError::from_hal)?;
256
257        let alignments = adapter.raw.capabilities.alignments.clone();
258        let downlevel = adapter.raw.capabilities.downlevel.clone();
259
260        let enable_indirect_validation = instance_flags
261            .contains(wgt::InstanceFlags::VALIDATION_INDIRECT_CALL)
262            && downlevel
263                .flags
264                .contains(wgt::DownlevelFlags::INDIRECT_EXECUTION);
265
266        let indirect_validation = if enable_indirect_validation {
267            Some(crate::indirect_validation::IndirectValidation::new(
268                raw_device.as_ref(),
269                &desc.required_limits,
270                &desc.required_features,
271            )?)
272        } else {
273            None
274        };
275
276        Ok(Self {
277            raw: raw_device,
278            adapter: adapter.clone(),
279            queue: OnceCellOrLock::new(),
280            zero_buffer: ManuallyDrop::new(zero_buffer),
281            label: desc.label.to_string(),
282            command_allocator,
283            command_indices: RwLock::new(
284                rank::DEVICE_COMMAND_INDICES,
285                CommandIndices {
286                    active_submission_index: 0,
287                    // By starting at one, we can put the result in a NonZeroU64.
288                    next_acceleration_structure_build_command_index: 1,
289                },
290            ),
291            last_successful_submission_index: AtomicU64::new(0),
292            fence: RwLock::new(rank::DEVICE_FENCE, ManuallyDrop::new(fence)),
293            snatchable_lock: unsafe { SnatchLock::new(rank::DEVICE_SNATCHABLE_LOCK) },
294            valid: AtomicBool::new(true),
295            device_lost_closure: Mutex::new(rank::DEVICE_LOST_CLOSURE, None),
296            trackers: Mutex::new(rank::DEVICE_TRACKERS, DeviceTracker::new()),
297            tracker_indices: TrackerIndexAllocators::new(),
298            bgl_pool: ResourcePool::new(),
299            #[cfg(feature = "trace")]
300            trace: Mutex::new(
301                rank::DEVICE_TRACE,
302                trace_dir_name.and_then(|path| match trace::Trace::new(path.clone()) {
303                    Ok(mut trace) => {
304                        trace.add(trace::Action::Init {
305                            desc: wgt::DeviceDescriptor {
306                                trace: wgt::Trace::Off,
307                                ..desc.clone()
308                            },
309                            backend: adapter.backend(),
310                        });
311                        Some(trace)
312                    }
313                    Err(e) => {
314                        log::error!("Unable to start a trace in '{path:?}': {e}");
315                        None
316                    }
317                }),
318            ),
319            alignments,
320            limits: desc.required_limits.clone(),
321            features: desc.required_features,
322            downlevel,
323            instance_flags,
324            deferred_destroy: Mutex::new(rank::DEVICE_DEFERRED_DESTROY, Vec::new()),
325            usage_scopes: Mutex::new(rank::DEVICE_USAGE_SCOPES, Default::default()),
326            timestamp_normalizer: OnceCellOrLock::new(),
327            indirect_validation,
328        })
329    }
330
331    pub fn late_init_resources_with_queue(&self) -> Result<(), RequestDeviceError> {
332        let queue = self.get_queue().unwrap();
333
334        let timestamp_normalizer = crate::timestamp_normalization::TimestampNormalizer::new(
335            self,
336            queue.get_timestamp_period(),
337        )?;
338
339        self.timestamp_normalizer
340            .set(timestamp_normalizer)
341            .unwrap_or_else(|_| panic!("Called late_init_resources_with_queue twice"));
342
343        Ok(())
344    }
345
346    /// Returns the backend this device is using.
347    pub fn backend(&self) -> wgt::Backend {
348        self.adapter.backend()
349    }
350
351    pub fn is_valid(&self) -> bool {
352        self.valid.load(Ordering::Acquire)
353    }
354
355    pub fn check_is_valid(&self) -> Result<(), DeviceError> {
356        if self.is_valid() {
357            Ok(())
358        } else {
359            Err(DeviceError::Invalid(self.error_ident()))
360        }
361    }
362
363    pub fn handle_hal_error(&self, error: hal::DeviceError) -> DeviceError {
364        match error {
365            hal::DeviceError::OutOfMemory => {}
366            hal::DeviceError::Lost
367            | hal::DeviceError::ResourceCreationFailed
368            | hal::DeviceError::Unexpected => {
369                self.lose(&error.to_string());
370            }
371        }
372        DeviceError::from_hal(error)
373    }
374
375    /// Run some destroy operations that were deferred.
376    ///
377    /// Destroying the resources requires taking a write lock on the device's snatch lock,
378    /// so a good reason for deferring resource destruction is when we don't know for sure
379    /// how risky it is to take the lock (typically, it shouldn't be taken from the drop
380    /// implementation of a reference-counted structure).
381    /// The snatch lock must not be held while this function is called.
382    pub(crate) fn deferred_resource_destruction(&self) {
383        let deferred_destroy = mem::take(&mut *self.deferred_destroy.lock());
384        for item in deferred_destroy {
385            match item {
386                DeferredDestroy::TextureViews(views) => {
387                    for view in views {
388                        let Some(view) = view.upgrade() else {
389                            continue;
390                        };
391                        let Some(raw_view) = view.raw.snatch(&mut self.snatchable_lock.write())
392                        else {
393                            continue;
394                        };
395
396                        resource_log!("Destroy raw {}", view.error_ident());
397
398                        unsafe {
399                            self.raw().destroy_texture_view(raw_view);
400                        }
401                    }
402                }
403                DeferredDestroy::BindGroups(bind_groups) => {
404                    for bind_group in bind_groups {
405                        let Some(bind_group) = bind_group.upgrade() else {
406                            continue;
407                        };
408                        let Some(raw_bind_group) =
409                            bind_group.raw.snatch(&mut self.snatchable_lock.write())
410                        else {
411                            continue;
412                        };
413
414                        resource_log!("Destroy raw {}", bind_group.error_ident());
415
416                        unsafe {
417                            self.raw().destroy_bind_group(raw_bind_group);
418                        }
419                    }
420                }
421            }
422        }
423    }
424
425    pub fn get_queue(&self) -> Option<Arc<Queue>> {
426        self.queue.get().as_ref()?.upgrade()
427    }
428
429    pub fn set_queue(&self, queue: &Arc<Queue>) {
430        assert!(self.queue.set(Arc::downgrade(queue)).is_ok());
431    }
432
433    /// Check the current status of the GPU and process any submissions that have
434    /// finished.
435    ///
436    /// The `poll_type` argument tells if this function should wait for a particular
437    /// submission index to complete, or if it should just poll the current status.
438    ///
439    /// This will process _all_ completed submissions, even if the caller only asked
440    /// us to poll to a given submission index.
441    ///
442    /// Return a pair `(closures, result)`, where:
443    ///
444    /// - `closures` is a list of callbacks that need to be invoked informing the user
445    ///   about various things occurring. These happen and should be handled even if
446    ///   this function returns an error, hence they are outside of the result.
447    ///
448    /// - `results` is a boolean indicating the result of the wait operation, including
449    ///   if there was a timeout or a validation error.
450    pub(crate) fn maintain<'this>(
451        &'this self,
452        fence: crate::lock::RwLockReadGuard<ManuallyDrop<Box<dyn hal::DynFence>>>,
453        poll_type: wgt::PollType<crate::SubmissionIndex>,
454        snatch_guard: SnatchGuard,
455    ) -> (UserClosures, Result<wgt::PollStatus, WaitIdleError>) {
456        profiling::scope!("Device::maintain");
457
458        let mut user_closures = UserClosures::default();
459
460        // If a wait was requested, determine which submission index to wait for.
461        let wait_submission_index = match poll_type {
462            wgt::PollType::WaitForSubmissionIndex(submission_index) => {
463                let last_successful_submission_index = self
464                    .last_successful_submission_index
465                    .load(Ordering::Acquire);
466
467                if submission_index > last_successful_submission_index {
468                    let result = Err(WaitIdleError::WrongSubmissionIndex(
469                        submission_index,
470                        last_successful_submission_index,
471                    ));
472
473                    return (user_closures, result);
474                }
475
476                Some(submission_index)
477            }
478            wgt::PollType::Wait => Some(
479                self.last_successful_submission_index
480                    .load(Ordering::Acquire),
481            ),
482            wgt::PollType::Poll => None,
483        };
484
485        // Wait for the submission index if requested.
486        if let Some(target_submission_index) = wait_submission_index {
487            log::trace!("Device::maintain: waiting for submission index {target_submission_index}");
488
489            let wait_result = unsafe {
490                self.raw()
491                    .wait(fence.as_ref(), target_submission_index, CLEANUP_WAIT_MS)
492            };
493
494            // This error match is only about `DeviceErrors`. At this stage we do not care if
495            // the wait succeeded or not, and the `Ok(bool)`` variant is ignored.
496            if let Err(e) = wait_result {
497                let hal_error: WaitIdleError = self.handle_hal_error(e).into();
498                return (user_closures, Err(hal_error));
499            }
500        }
501
502        // Get the currently finished submission index. This may be higher than the requested
503        // wait, or it may be less than the requested wait if the wait failed.
504        let fence_value_result = unsafe { self.raw().get_fence_value(fence.as_ref()) };
505        let current_finished_submission = match fence_value_result {
506            Ok(fence_value) => fence_value,
507            Err(e) => {
508                let hal_error: WaitIdleError = self.handle_hal_error(e).into();
509                return (user_closures, Err(hal_error));
510            }
511        };
512
513        // Maintain all finished submissions on the queue, updating the relevant user closures and collecting if the queue is empty.
514        //
515        // We don't use the result of the wait here, as we want to progress forward as far as possible
516        // and the wait could have been for submissions that finished long ago.
517        let mut queue_empty = false;
518        if let Some(queue) = self.get_queue() {
519            let queue_result = queue.maintain(current_finished_submission, &snatch_guard);
520            (
521                user_closures.submissions,
522                user_closures.mappings,
523                queue_empty,
524            ) = queue_result
525        };
526
527        // Based on the queue empty status, and the current finished submission index, determine the result of the poll.
528        let result = if queue_empty {
529            if let Some(wait_submission_index) = wait_submission_index {
530                // Assert to ensure that if we received a queue empty status, the fence shows the correct value.
531                // This is defensive, as this should never be hit.
532                assert!(
533                    current_finished_submission >= wait_submission_index,
534                    "If the queue is empty, the current submission index ({}) should be at least the wait submission index ({})",
535                    current_finished_submission,
536                    wait_submission_index
537                );
538            }
539
540            Ok(wgt::PollStatus::QueueEmpty)
541        } else if let Some(wait_submission_index) = wait_submission_index {
542            // This is theoretically possible to succeed more than checking on the poll result
543            // as submissions could have finished in the time between the timeout resolving,
544            // the thread getting scheduled again, and us checking the fence value.
545            if current_finished_submission >= wait_submission_index {
546                Ok(wgt::PollStatus::WaitSucceeded)
547            } else {
548                Err(WaitIdleError::Timeout)
549            }
550        } else {
551            Ok(wgt::PollStatus::Poll)
552        };
553
554        // Detect if we have been destroyed and now need to lose the device.
555        //
556        // If we are invalid (set at start of destroy) and our queue is empty,
557        // and we have a DeviceLostClosure, return the closure to be called by
558        // our caller. This will complete the steps for both destroy and for
559        // "lose the device".
560        let mut should_release_gpu_resource = false;
561        if !self.is_valid() && queue_empty {
562            // We can release gpu resources associated with this device (but not
563            // while holding the life_tracker lock).
564            should_release_gpu_resource = true;
565
566            // If we have a DeviceLostClosure, build an invocation with the
567            // reason DeviceLostReason::Destroyed and no message.
568            if let Some(device_lost_closure) = self.device_lost_closure.lock().take() {
569                user_closures
570                    .device_lost_invocations
571                    .push(DeviceLostInvocation {
572                        closure: device_lost_closure,
573                        reason: DeviceLostReason::Destroyed,
574                        message: String::new(),
575                    });
576            }
577        }
578
579        // Don't hold the locks while calling release_gpu_resources.
580        drop(fence);
581        drop(snatch_guard);
582
583        if should_release_gpu_resource {
584            self.release_gpu_resources();
585        }
586
587        (user_closures, result)
588    }
589
590    pub(crate) fn create_buffer(
591        self: &Arc<Self>,
592        desc: &resource::BufferDescriptor,
593    ) -> Result<Arc<Buffer>, resource::CreateBufferError> {
594        self.check_is_valid()?;
595
596        if desc.size > self.limits.max_buffer_size {
597            return Err(resource::CreateBufferError::MaxBufferSize {
598                requested: desc.size,
599                maximum: self.limits.max_buffer_size,
600            });
601        }
602
603        if desc.usage.contains(wgt::BufferUsages::INDEX)
604            && desc.usage.contains(
605                wgt::BufferUsages::VERTEX
606                    | wgt::BufferUsages::UNIFORM
607                    | wgt::BufferUsages::INDIRECT
608                    | wgt::BufferUsages::STORAGE,
609            )
610        {
611            self.require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER)?;
612        }
613
614        if desc.usage.is_empty() || desc.usage.contains_unknown_bits() {
615            return Err(resource::CreateBufferError::InvalidUsage(desc.usage));
616        }
617
618        if !self
619            .features
620            .contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS)
621        {
622            use wgt::BufferUsages as Bu;
623            let write_mismatch = desc.usage.contains(Bu::MAP_WRITE)
624                && !(Bu::MAP_WRITE | Bu::COPY_SRC).contains(desc.usage);
625            let read_mismatch = desc.usage.contains(Bu::MAP_READ)
626                && !(Bu::MAP_READ | Bu::COPY_DST).contains(desc.usage);
627            if write_mismatch || read_mismatch {
628                return Err(resource::CreateBufferError::UsageMismatch(desc.usage));
629            }
630        }
631
632        let mut usage = conv::map_buffer_usage(desc.usage);
633
634        if desc.usage.contains(wgt::BufferUsages::INDIRECT) {
635            self.require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION)?;
636            // We are going to be reading from it, internally;
637            // when validating the content of the buffer
638            usage |= wgt::BufferUses::STORAGE_READ_ONLY | wgt::BufferUses::STORAGE_READ_WRITE;
639        }
640
641        if desc.usage.contains(wgt::BufferUsages::QUERY_RESOLVE) {
642            usage |= TIMESTAMP_NORMALIZATION_BUFFER_USES;
643        }
644
645        if desc.mapped_at_creation {
646            if desc.size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
647                return Err(resource::CreateBufferError::UnalignedSize);
648            }
649            if !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) {
650                // we are going to be copying into it, internally
651                usage |= wgt::BufferUses::COPY_DST;
652            }
653        } else {
654            // We are required to zero out (initialize) all memory. This is done
655            // on demand using clear_buffer which requires write transfer usage!
656            usage |= wgt::BufferUses::COPY_DST;
657        }
658
659        let actual_size = if desc.size == 0 {
660            wgt::COPY_BUFFER_ALIGNMENT
661        } else if desc.usage.contains(wgt::BufferUsages::VERTEX) {
662            // Bumping the size by 1 so that we can bind an empty range at the
663            // end of the buffer.
664            desc.size + 1
665        } else {
666            desc.size
667        };
668        let clear_remainder = actual_size % wgt::COPY_BUFFER_ALIGNMENT;
669        let aligned_size = if clear_remainder != 0 {
670            actual_size + wgt::COPY_BUFFER_ALIGNMENT - clear_remainder
671        } else {
672            actual_size
673        };
674
675        let hal_desc = hal::BufferDescriptor {
676            label: desc.label.to_hal(self.instance_flags),
677            size: aligned_size,
678            usage,
679            memory_flags: hal::MemoryFlags::empty(),
680        };
681        let buffer =
682            unsafe { self.raw().create_buffer(&hal_desc) }.map_err(|e| self.handle_hal_error(e))?;
683
684        let timestamp_normalization_bind_group = Snatchable::new(
685            self.timestamp_normalizer
686                .get()
687                .unwrap()
688                .create_normalization_bind_group(
689                    self,
690                    &*buffer,
691                    desc.label.as_deref(),
692                    desc.size,
693                    desc.usage,
694                )?,
695        );
696
697        let indirect_validation_bind_groups =
698            self.create_indirect_validation_bind_groups(buffer.as_ref(), desc.size, desc.usage)?;
699
700        let buffer = Buffer {
701            raw: Snatchable::new(buffer),
702            device: self.clone(),
703            usage: desc.usage,
704            size: desc.size,
705            initialization_status: RwLock::new(
706                rank::BUFFER_INITIALIZATION_STATUS,
707                BufferInitTracker::new(aligned_size),
708            ),
709            map_state: Mutex::new(rank::BUFFER_MAP_STATE, resource::BufferMapState::Idle),
710            label: desc.label.to_string(),
711            tracking_data: TrackingData::new(self.tracker_indices.buffers.clone()),
712            bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, WeakVec::new()),
713            timestamp_normalization_bind_group,
714            indirect_validation_bind_groups,
715        };
716
717        let buffer = Arc::new(buffer);
718
719        let buffer_use = if !desc.mapped_at_creation {
720            wgt::BufferUses::empty()
721        } else if desc.usage.contains(wgt::BufferUsages::MAP_WRITE) {
722            // buffer is mappable, so we are just doing that at start
723            let map_size = buffer.size;
724            let mapping = if map_size == 0 {
725                hal::BufferMapping {
726                    ptr: core::ptr::NonNull::dangling(),
727                    is_coherent: true,
728                }
729            } else {
730                let snatch_guard: SnatchGuard = self.snatchable_lock.read();
731                map_buffer(&buffer, 0, map_size, HostMap::Write, &snatch_guard)?
732            };
733            *buffer.map_state.lock() = resource::BufferMapState::Active {
734                mapping,
735                range: 0..map_size,
736                host: HostMap::Write,
737            };
738            wgt::BufferUses::MAP_WRITE
739        } else {
740            let mut staging_buffer =
741                StagingBuffer::new(self, wgt::BufferSize::new(aligned_size).unwrap())?;
742
743            // Zero initialize memory and then mark the buffer as initialized
744            // (it's guaranteed that this is the case by the time the buffer is usable)
745            staging_buffer.write_zeros();
746            buffer.initialization_status.write().drain(0..aligned_size);
747
748            *buffer.map_state.lock() = resource::BufferMapState::Init { staging_buffer };
749            wgt::BufferUses::COPY_DST
750        };
751
752        self.trackers
753            .lock()
754            .buffers
755            .insert_single(&buffer, buffer_use);
756
757        Ok(buffer)
758    }
759
760    pub(crate) fn create_texture_from_hal(
761        self: &Arc<Self>,
762        hal_texture: Box<dyn hal::DynTexture>,
763        desc: &resource::TextureDescriptor,
764    ) -> Result<Arc<Texture>, resource::CreateTextureError> {
765        let format_features = self
766            .describe_format_features(desc.format)
767            .map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error))?;
768
769        unsafe { self.raw().add_raw_texture(&*hal_texture) };
770
771        let texture = Texture::new(
772            self,
773            resource::TextureInner::Native { raw: hal_texture },
774            conv::map_texture_usage(desc.usage, desc.format.into(), format_features.flags),
775            desc,
776            format_features,
777            resource::TextureClearMode::None,
778            false,
779        );
780
781        let texture = Arc::new(texture);
782
783        self.trackers
784            .lock()
785            .textures
786            .insert_single(&texture, wgt::TextureUses::UNINITIALIZED);
787
788        Ok(texture)
789    }
790
791    pub(crate) fn create_buffer_from_hal(
792        self: &Arc<Self>,
793        hal_buffer: Box<dyn hal::DynBuffer>,
794        desc: &resource::BufferDescriptor,
795    ) -> (Fallible<Buffer>, Option<resource::CreateBufferError>) {
796        let timestamp_normalization_bind_group = match self
797            .timestamp_normalizer
798            .get()
799            .unwrap()
800            .create_normalization_bind_group(
801                self,
802                &*hal_buffer,
803                desc.label.as_deref(),
804                desc.size,
805                desc.usage,
806            ) {
807            Ok(bg) => Snatchable::new(bg),
808            Err(e) => {
809                return (
810                    Fallible::Invalid(Arc::new(desc.label.to_string())),
811                    Some(e.into()),
812                )
813            }
814        };
815
816        let indirect_validation_bind_groups = match self.create_indirect_validation_bind_groups(
817            hal_buffer.as_ref(),
818            desc.size,
819            desc.usage,
820        ) {
821            Ok(ok) => ok,
822            Err(e) => return (Fallible::Invalid(Arc::new(desc.label.to_string())), Some(e)),
823        };
824
825        unsafe { self.raw().add_raw_buffer(&*hal_buffer) };
826
827        let buffer = Buffer {
828            raw: Snatchable::new(hal_buffer),
829            device: self.clone(),
830            usage: desc.usage,
831            size: desc.size,
832            initialization_status: RwLock::new(
833                rank::BUFFER_INITIALIZATION_STATUS,
834                BufferInitTracker::new(0),
835            ),
836            map_state: Mutex::new(rank::BUFFER_MAP_STATE, resource::BufferMapState::Idle),
837            label: desc.label.to_string(),
838            tracking_data: TrackingData::new(self.tracker_indices.buffers.clone()),
839            bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, WeakVec::new()),
840            timestamp_normalization_bind_group,
841            indirect_validation_bind_groups,
842        };
843
844        let buffer = Arc::new(buffer);
845
846        self.trackers
847            .lock()
848            .buffers
849            .insert_single(&buffer, wgt::BufferUses::empty());
850
851        (Fallible::Valid(buffer), None)
852    }
853
854    fn create_indirect_validation_bind_groups(
855        &self,
856        raw_buffer: &dyn hal::DynBuffer,
857        buffer_size: u64,
858        usage: wgt::BufferUsages,
859    ) -> Result<Snatchable<crate::indirect_validation::BindGroups>, resource::CreateBufferError>
860    {
861        if !usage.contains(wgt::BufferUsages::INDIRECT) {
862            return Ok(Snatchable::empty());
863        }
864
865        let Some(ref indirect_validation) = self.indirect_validation else {
866            return Ok(Snatchable::empty());
867        };
868
869        let bind_groups = crate::indirect_validation::BindGroups::new(
870            indirect_validation,
871            self,
872            buffer_size,
873            raw_buffer,
874        )
875        .map_err(resource::CreateBufferError::IndirectValidationBindGroup)?;
876
877        if let Some(bind_groups) = bind_groups {
878            Ok(Snatchable::new(bind_groups))
879        } else {
880            Ok(Snatchable::empty())
881        }
882    }
883
884    pub(crate) fn create_texture(
885        self: &Arc<Self>,
886        desc: &resource::TextureDescriptor,
887    ) -> Result<Arc<Texture>, resource::CreateTextureError> {
888        use resource::{CreateTextureError, TextureDimensionError};
889
890        self.check_is_valid()?;
891
892        if desc.usage.is_empty() || desc.usage.contains_unknown_bits() {
893            return Err(CreateTextureError::InvalidUsage(desc.usage));
894        }
895
896        conv::check_texture_dimension_size(
897            desc.dimension,
898            desc.size,
899            desc.sample_count,
900            &self.limits,
901        )?;
902
903        if desc.dimension != wgt::TextureDimension::D2 {
904            // Depth textures can only be 2D
905            if desc.format.is_depth_stencil_format() {
906                return Err(CreateTextureError::InvalidDepthDimension(
907                    desc.dimension,
908                    desc.format,
909                ));
910            }
911            // Renderable textures can only be 2D
912            if desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
913                return Err(CreateTextureError::InvalidDimensionUsages(
914                    wgt::TextureUsages::RENDER_ATTACHMENT,
915                    desc.dimension,
916                ));
917            }
918        }
919
920        if desc.dimension != wgt::TextureDimension::D2
921            && desc.dimension != wgt::TextureDimension::D3
922        {
923            // Compressed textures can only be 2D or 3D
924            if desc.format.is_compressed() {
925                return Err(CreateTextureError::InvalidCompressedDimension(
926                    desc.dimension,
927                    desc.format,
928                ));
929            }
930        }
931
932        if desc.format.is_compressed() {
933            let (block_width, block_height) = desc.format.block_dimensions();
934
935            if desc.size.width % block_width != 0 {
936                return Err(CreateTextureError::InvalidDimension(
937                    TextureDimensionError::NotMultipleOfBlockWidth {
938                        width: desc.size.width,
939                        block_width,
940                        format: desc.format,
941                    },
942                ));
943            }
944
945            if desc.size.height % block_height != 0 {
946                return Err(CreateTextureError::InvalidDimension(
947                    TextureDimensionError::NotMultipleOfBlockHeight {
948                        height: desc.size.height,
949                        block_height,
950                        format: desc.format,
951                    },
952                ));
953            }
954
955            if desc.dimension == wgt::TextureDimension::D3 {
956                // Only BCn formats with Sliced 3D feature can be used for 3D textures
957                if desc.format.is_bcn() {
958                    self.require_features(wgt::Features::TEXTURE_COMPRESSION_BC_SLICED_3D)
959                        .map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?;
960                } else {
961                    return Err(CreateTextureError::InvalidCompressedDimension(
962                        desc.dimension,
963                        desc.format,
964                    ));
965                }
966            }
967        }
968
969        {
970            let (width_multiple, height_multiple) = desc.format.size_multiple_requirement();
971
972            if desc.size.width % width_multiple != 0 {
973                return Err(CreateTextureError::InvalidDimension(
974                    TextureDimensionError::WidthNotMultipleOf {
975                        width: desc.size.width,
976                        multiple: width_multiple,
977                        format: desc.format,
978                    },
979                ));
980            }
981
982            if desc.size.height % height_multiple != 0 {
983                return Err(CreateTextureError::InvalidDimension(
984                    TextureDimensionError::HeightNotMultipleOf {
985                        height: desc.size.height,
986                        multiple: height_multiple,
987                        format: desc.format,
988                    },
989                ));
990            }
991        }
992
993        let format_features = self
994            .describe_format_features(desc.format)
995            .map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?;
996
997        if desc.sample_count > 1 {
998            if desc.mip_level_count != 1 {
999                return Err(CreateTextureError::InvalidMipLevelCount {
1000                    requested: desc.mip_level_count,
1001                    maximum: 1,
1002                });
1003            }
1004
1005            if desc.size.depth_or_array_layers != 1 {
1006                return Err(CreateTextureError::InvalidDimension(
1007                    TextureDimensionError::MultisampledDepthOrArrayLayer(
1008                        desc.size.depth_or_array_layers,
1009                    ),
1010                ));
1011            }
1012
1013            if desc.usage.contains(wgt::TextureUsages::STORAGE_BINDING) {
1014                return Err(CreateTextureError::InvalidMultisampledStorageBinding);
1015            }
1016
1017            if !desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
1018                return Err(CreateTextureError::MultisampledNotRenderAttachment);
1019            }
1020
1021            if !format_features.flags.intersects(
1022                wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4
1023                    | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X2
1024                    | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X8
1025                    | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X16,
1026            ) {
1027                return Err(CreateTextureError::InvalidMultisampledFormat(desc.format));
1028            }
1029
1030            if !format_features
1031                .flags
1032                .sample_count_supported(desc.sample_count)
1033            {
1034                return Err(CreateTextureError::InvalidSampleCount(
1035                    desc.sample_count,
1036                    desc.format,
1037                    desc.format
1038                        .guaranteed_format_features(self.features)
1039                        .flags
1040                        .supported_sample_counts(),
1041                    self.adapter
1042                        .get_texture_format_features(desc.format)
1043                        .flags
1044                        .supported_sample_counts(),
1045                ));
1046            };
1047        }
1048
1049        let mips = desc.mip_level_count;
1050        let max_levels_allowed = desc.size.max_mips(desc.dimension).min(hal::MAX_MIP_LEVELS);
1051        if mips == 0 || mips > max_levels_allowed {
1052            return Err(CreateTextureError::InvalidMipLevelCount {
1053                requested: mips,
1054                maximum: max_levels_allowed,
1055            });
1056        }
1057
1058        let missing_allowed_usages = desc.usage - format_features.allowed_usages;
1059        if !missing_allowed_usages.is_empty() {
1060            // detect downlevel incompatibilities
1061            let wgpu_allowed_usages = desc
1062                .format
1063                .guaranteed_format_features(self.features)
1064                .allowed_usages;
1065            let wgpu_missing_usages = desc.usage - wgpu_allowed_usages;
1066            return Err(CreateTextureError::InvalidFormatUsages(
1067                missing_allowed_usages,
1068                desc.format,
1069                wgpu_missing_usages.is_empty(),
1070            ));
1071        }
1072
1073        let mut hal_view_formats = Vec::new();
1074        for format in desc.view_formats.iter() {
1075            if desc.format == *format {
1076                continue;
1077            }
1078            if desc.format.remove_srgb_suffix() != format.remove_srgb_suffix() {
1079                return Err(CreateTextureError::InvalidViewFormat(*format, desc.format));
1080            }
1081            hal_view_formats.push(*format);
1082        }
1083        if !hal_view_formats.is_empty() {
1084            self.require_downlevel_flags(wgt::DownlevelFlags::VIEW_FORMATS)?;
1085        }
1086
1087        let hal_usage = conv::map_texture_usage_for_texture(desc, &format_features);
1088
1089        let hal_desc = hal::TextureDescriptor {
1090            label: desc.label.to_hal(self.instance_flags),
1091            size: desc.size,
1092            mip_level_count: desc.mip_level_count,
1093            sample_count: desc.sample_count,
1094            dimension: desc.dimension,
1095            format: desc.format,
1096            usage: hal_usage,
1097            memory_flags: hal::MemoryFlags::empty(),
1098            view_formats: hal_view_formats,
1099        };
1100
1101        let raw_texture = unsafe { self.raw().create_texture(&hal_desc) }
1102            .map_err(|e| self.handle_hal_error(e))?;
1103
1104        let clear_mode = if hal_usage
1105            .intersects(wgt::TextureUses::DEPTH_STENCIL_WRITE | wgt::TextureUses::COLOR_TARGET)
1106        {
1107            let (is_color, usage) = if desc.format.is_depth_stencil_format() {
1108                (false, wgt::TextureUses::DEPTH_STENCIL_WRITE)
1109            } else {
1110                (true, wgt::TextureUses::COLOR_TARGET)
1111            };
1112            let dimension = match desc.dimension {
1113                wgt::TextureDimension::D1 => TextureViewDimension::D1,
1114                wgt::TextureDimension::D2 => TextureViewDimension::D2,
1115                wgt::TextureDimension::D3 => unreachable!(),
1116            };
1117
1118            let clear_label = hal_label(
1119                Some("(wgpu internal) clear texture view"),
1120                self.instance_flags,
1121            );
1122
1123            let mut clear_views = SmallVec::new();
1124            for mip_level in 0..desc.mip_level_count {
1125                for array_layer in 0..desc.size.depth_or_array_layers {
1126                    macro_rules! push_clear_view {
1127                        ($format:expr, $aspect:expr) => {
1128                            let desc = hal::TextureViewDescriptor {
1129                                label: clear_label,
1130                                format: $format,
1131                                dimension,
1132                                usage,
1133                                range: wgt::ImageSubresourceRange {
1134                                    aspect: $aspect,
1135                                    base_mip_level: mip_level,
1136                                    mip_level_count: Some(1),
1137                                    base_array_layer: array_layer,
1138                                    array_layer_count: Some(1),
1139                                },
1140                            };
1141                            clear_views.push(ManuallyDrop::new(
1142                                unsafe {
1143                                    self.raw().create_texture_view(raw_texture.as_ref(), &desc)
1144                                }
1145                                .map_err(|e| self.handle_hal_error(e))?,
1146                            ));
1147                        };
1148                    }
1149
1150                    if let Some(planes) = desc.format.planes() {
1151                        for plane in 0..planes {
1152                            let aspect = wgt::TextureAspect::from_plane(plane).unwrap();
1153                            let format = desc.format.aspect_specific_format(aspect).unwrap();
1154                            push_clear_view!(format, aspect);
1155                        }
1156                    } else {
1157                        push_clear_view!(desc.format, wgt::TextureAspect::All);
1158                    }
1159                }
1160            }
1161            resource::TextureClearMode::RenderPass {
1162                clear_views,
1163                is_color,
1164            }
1165        } else {
1166            resource::TextureClearMode::BufferCopy
1167        };
1168
1169        let texture = Texture::new(
1170            self,
1171            resource::TextureInner::Native { raw: raw_texture },
1172            hal_usage,
1173            desc,
1174            format_features,
1175            clear_mode,
1176            true,
1177        );
1178
1179        let texture = Arc::new(texture);
1180
1181        self.trackers
1182            .lock()
1183            .textures
1184            .insert_single(&texture, wgt::TextureUses::UNINITIALIZED);
1185
1186        Ok(texture)
1187    }
1188
1189    pub(crate) fn create_texture_view(
1190        self: &Arc<Self>,
1191        texture: &Arc<Texture>,
1192        desc: &resource::TextureViewDescriptor,
1193    ) -> Result<Arc<TextureView>, resource::CreateTextureViewError> {
1194        self.check_is_valid()?;
1195
1196        let snatch_guard = texture.device.snatchable_lock.read();
1197
1198        let texture_raw = texture.try_raw(&snatch_guard)?;
1199
1200        // resolve TextureViewDescriptor defaults
1201        // https://gpuweb.github.io/gpuweb/#abstract-opdef-resolving-gputextureviewdescriptor-defaults
1202        let resolved_format = desc.format.unwrap_or_else(|| {
1203            texture
1204                .desc
1205                .format
1206                .aspect_specific_format(desc.range.aspect)
1207                .unwrap_or(texture.desc.format)
1208        });
1209
1210        let resolved_dimension = desc
1211            .dimension
1212            .unwrap_or_else(|| match texture.desc.dimension {
1213                wgt::TextureDimension::D1 => TextureViewDimension::D1,
1214                wgt::TextureDimension::D2 => {
1215                    if texture.desc.array_layer_count() == 1 {
1216                        TextureViewDimension::D2
1217                    } else {
1218                        TextureViewDimension::D2Array
1219                    }
1220                }
1221                wgt::TextureDimension::D3 => TextureViewDimension::D3,
1222            });
1223
1224        let resolved_mip_level_count = desc.range.mip_level_count.unwrap_or_else(|| {
1225            texture
1226                .desc
1227                .mip_level_count
1228                .saturating_sub(desc.range.base_mip_level)
1229        });
1230
1231        let resolved_array_layer_count =
1232            desc.range
1233                .array_layer_count
1234                .unwrap_or_else(|| match resolved_dimension {
1235                    TextureViewDimension::D1
1236                    | TextureViewDimension::D2
1237                    | TextureViewDimension::D3 => 1,
1238                    TextureViewDimension::Cube => 6,
1239                    TextureViewDimension::D2Array | TextureViewDimension::CubeArray => texture
1240                        .desc
1241                        .array_layer_count()
1242                        .saturating_sub(desc.range.base_array_layer),
1243                });
1244
1245        let resolved_usage = {
1246            let usage = desc.usage.unwrap_or(wgt::TextureUsages::empty());
1247            if usage.is_empty() {
1248                texture.desc.usage
1249            } else if texture.desc.usage.contains(usage) {
1250                usage
1251            } else {
1252                return Err(resource::CreateTextureViewError::InvalidTextureViewUsage {
1253                    view: usage,
1254                    texture: texture.desc.usage,
1255                });
1256            }
1257        };
1258
1259        let format_features = self.describe_format_features(resolved_format)?;
1260        let allowed_format_usages = format_features.allowed_usages;
1261        if resolved_usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
1262            && !allowed_format_usages.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
1263        {
1264            return Err(
1265                resource::CreateTextureViewError::TextureViewFormatNotRenderable(resolved_format),
1266            );
1267        }
1268
1269        if resolved_usage.contains(wgt::TextureUsages::STORAGE_BINDING)
1270            && !allowed_format_usages.contains(wgt::TextureUsages::STORAGE_BINDING)
1271        {
1272            return Err(
1273                resource::CreateTextureViewError::TextureViewFormatNotStorage(resolved_format),
1274            );
1275        }
1276
1277        // validate TextureViewDescriptor
1278
1279        let aspects = hal::FormatAspects::new(texture.desc.format, desc.range.aspect);
1280        if aspects.is_empty() {
1281            return Err(resource::CreateTextureViewError::InvalidAspect {
1282                texture_format: texture.desc.format,
1283                requested_aspect: desc.range.aspect,
1284            });
1285        }
1286
1287        let format_is_good = if desc.range.aspect == wgt::TextureAspect::All {
1288            resolved_format == texture.desc.format
1289                || texture.desc.view_formats.contains(&resolved_format)
1290        } else {
1291            Some(resolved_format)
1292                == texture
1293                    .desc
1294                    .format
1295                    .aspect_specific_format(desc.range.aspect)
1296        };
1297        if !format_is_good {
1298            return Err(resource::CreateTextureViewError::FormatReinterpretation {
1299                texture: texture.desc.format,
1300                view: resolved_format,
1301            });
1302        }
1303
1304        // check if multisampled texture is seen as anything but 2D
1305        if texture.desc.sample_count > 1 && resolved_dimension != TextureViewDimension::D2 {
1306            return Err(
1307                resource::CreateTextureViewError::InvalidMultisampledTextureViewDimension(
1308                    resolved_dimension,
1309                ),
1310            );
1311        }
1312
1313        // check if the dimension is compatible with the texture
1314        if texture.desc.dimension != resolved_dimension.compatible_texture_dimension() {
1315            return Err(
1316                resource::CreateTextureViewError::InvalidTextureViewDimension {
1317                    view: resolved_dimension,
1318                    texture: texture.desc.dimension,
1319                },
1320            );
1321        }
1322
1323        match resolved_dimension {
1324            TextureViewDimension::D1 | TextureViewDimension::D2 | TextureViewDimension::D3 => {
1325                if resolved_array_layer_count != 1 {
1326                    return Err(resource::CreateTextureViewError::InvalidArrayLayerCount {
1327                        requested: resolved_array_layer_count,
1328                        dim: resolved_dimension,
1329                    });
1330                }
1331            }
1332            TextureViewDimension::Cube => {
1333                if resolved_array_layer_count != 6 {
1334                    return Err(
1335                        resource::CreateTextureViewError::InvalidCubemapTextureDepth {
1336                            depth: resolved_array_layer_count,
1337                        },
1338                    );
1339                }
1340            }
1341            TextureViewDimension::CubeArray => {
1342                if resolved_array_layer_count % 6 != 0 {
1343                    return Err(
1344                        resource::CreateTextureViewError::InvalidCubemapArrayTextureDepth {
1345                            depth: resolved_array_layer_count,
1346                        },
1347                    );
1348                }
1349            }
1350            _ => {}
1351        }
1352
1353        match resolved_dimension {
1354            TextureViewDimension::Cube | TextureViewDimension::CubeArray => {
1355                if texture.desc.size.width != texture.desc.size.height {
1356                    return Err(resource::CreateTextureViewError::InvalidCubeTextureViewSize);
1357                }
1358            }
1359            _ => {}
1360        }
1361
1362        if resolved_mip_level_count == 0 {
1363            return Err(resource::CreateTextureViewError::ZeroMipLevelCount);
1364        }
1365
1366        let mip_level_end = desc
1367            .range
1368            .base_mip_level
1369            .saturating_add(resolved_mip_level_count);
1370
1371        let level_end = texture.desc.mip_level_count;
1372        if mip_level_end > level_end {
1373            return Err(resource::CreateTextureViewError::TooManyMipLevels {
1374                requested: mip_level_end,
1375                total: level_end,
1376            });
1377        }
1378
1379        if resolved_array_layer_count == 0 {
1380            return Err(resource::CreateTextureViewError::ZeroArrayLayerCount);
1381        }
1382
1383        let array_layer_end = desc
1384            .range
1385            .base_array_layer
1386            .saturating_add(resolved_array_layer_count);
1387
1388        let layer_end = texture.desc.array_layer_count();
1389        if array_layer_end > layer_end {
1390            return Err(resource::CreateTextureViewError::TooManyArrayLayers {
1391                requested: array_layer_end,
1392                total: layer_end,
1393            });
1394        };
1395
1396        // https://gpuweb.github.io/gpuweb/#abstract-opdef-renderable-texture-view
1397        let render_extent = 'error: {
1398            if !resolved_usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
1399                break 'error Err(TextureViewNotRenderableReason::Usage(resolved_usage));
1400            }
1401
1402            if !(resolved_dimension == TextureViewDimension::D2
1403                || (self.features.contains(wgt::Features::MULTIVIEW)
1404                    && resolved_dimension == TextureViewDimension::D2Array))
1405            {
1406                break 'error Err(TextureViewNotRenderableReason::Dimension(
1407                    resolved_dimension,
1408                ));
1409            }
1410
1411            if resolved_mip_level_count != 1 {
1412                break 'error Err(TextureViewNotRenderableReason::MipLevelCount(
1413                    resolved_mip_level_count,
1414                ));
1415            }
1416
1417            if resolved_array_layer_count != 1
1418                && !(self.features.contains(wgt::Features::MULTIVIEW))
1419            {
1420                break 'error Err(TextureViewNotRenderableReason::ArrayLayerCount(
1421                    resolved_array_layer_count,
1422                ));
1423            }
1424
1425            if aspects != hal::FormatAspects::from(texture.desc.format) {
1426                break 'error Err(TextureViewNotRenderableReason::Aspects(aspects));
1427            }
1428
1429            Ok(texture
1430                .desc
1431                .compute_render_extent(desc.range.base_mip_level))
1432        };
1433
1434        // filter the usages based on the other criteria
1435        let usage = {
1436            let resolved_hal_usage = conv::map_texture_usage(
1437                resolved_usage,
1438                resolved_format.into(),
1439                format_features.flags,
1440            );
1441            let mask_copy = !(wgt::TextureUses::COPY_SRC | wgt::TextureUses::COPY_DST);
1442            let mask_dimension = match resolved_dimension {
1443                TextureViewDimension::Cube | TextureViewDimension::CubeArray => {
1444                    wgt::TextureUses::RESOURCE
1445                }
1446                TextureViewDimension::D3 => {
1447                    wgt::TextureUses::RESOURCE
1448                        | wgt::TextureUses::STORAGE_READ_ONLY
1449                        | wgt::TextureUses::STORAGE_WRITE_ONLY
1450                        | wgt::TextureUses::STORAGE_READ_WRITE
1451                }
1452                _ => wgt::TextureUses::all(),
1453            };
1454            let mask_mip_level = if resolved_mip_level_count == 1 {
1455                wgt::TextureUses::all()
1456            } else {
1457                wgt::TextureUses::RESOURCE
1458            };
1459            resolved_hal_usage & mask_copy & mask_dimension & mask_mip_level
1460        };
1461
1462        // use the combined depth-stencil format for the view
1463        let format = if resolved_format.is_depth_stencil_component(texture.desc.format) {
1464            texture.desc.format
1465        } else {
1466            resolved_format
1467        };
1468
1469        let resolved_range = wgt::ImageSubresourceRange {
1470            aspect: desc.range.aspect,
1471            base_mip_level: desc.range.base_mip_level,
1472            mip_level_count: Some(resolved_mip_level_count),
1473            base_array_layer: desc.range.base_array_layer,
1474            array_layer_count: Some(resolved_array_layer_count),
1475        };
1476
1477        let hal_desc = hal::TextureViewDescriptor {
1478            label: desc.label.to_hal(self.instance_flags),
1479            format,
1480            dimension: resolved_dimension,
1481            usage,
1482            range: resolved_range,
1483        };
1484
1485        let raw = unsafe { self.raw().create_texture_view(texture_raw, &hal_desc) }
1486            .map_err(|e| self.handle_hal_error(e))?;
1487
1488        let selector = TextureSelector {
1489            mips: desc.range.base_mip_level..mip_level_end,
1490            layers: desc.range.base_array_layer..array_layer_end,
1491        };
1492
1493        let view = TextureView {
1494            raw: Snatchable::new(raw),
1495            parent: texture.clone(),
1496            device: self.clone(),
1497            desc: resource::HalTextureViewDescriptor {
1498                texture_format: texture.desc.format,
1499                format: resolved_format,
1500                dimension: resolved_dimension,
1501                usage: resolved_usage,
1502                range: resolved_range,
1503            },
1504            format_features: texture.format_features,
1505            render_extent,
1506            samples: texture.desc.sample_count,
1507            selector,
1508            label: desc.label.to_string(),
1509            tracking_data: TrackingData::new(self.tracker_indices.texture_views.clone()),
1510        };
1511
1512        let view = Arc::new(view);
1513
1514        {
1515            let mut views = texture.views.lock();
1516            views.push(Arc::downgrade(&view));
1517        }
1518
1519        Ok(view)
1520    }
1521
1522    pub(crate) fn create_sampler(
1523        self: &Arc<Self>,
1524        desc: &resource::SamplerDescriptor,
1525    ) -> Result<Arc<Sampler>, resource::CreateSamplerError> {
1526        self.check_is_valid()?;
1527
1528        if desc
1529            .address_modes
1530            .iter()
1531            .any(|am| am == &wgt::AddressMode::ClampToBorder)
1532        {
1533            self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER)?;
1534        }
1535
1536        if desc.border_color == Some(wgt::SamplerBorderColor::Zero) {
1537            self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_ZERO)?;
1538        }
1539
1540        if desc.lod_min_clamp < 0.0 {
1541            return Err(resource::CreateSamplerError::InvalidLodMinClamp(
1542                desc.lod_min_clamp,
1543            ));
1544        }
1545        if desc.lod_max_clamp < desc.lod_min_clamp {
1546            return Err(resource::CreateSamplerError::InvalidLodMaxClamp {
1547                lod_min_clamp: desc.lod_min_clamp,
1548                lod_max_clamp: desc.lod_max_clamp,
1549            });
1550        }
1551
1552        if desc.anisotropy_clamp < 1 {
1553            return Err(resource::CreateSamplerError::InvalidAnisotropy(
1554                desc.anisotropy_clamp,
1555            ));
1556        }
1557
1558        if desc.anisotropy_clamp != 1 {
1559            if !matches!(desc.min_filter, wgt::FilterMode::Linear) {
1560                return Err(
1561                    resource::CreateSamplerError::InvalidFilterModeWithAnisotropy {
1562                        filter_type: resource::SamplerFilterErrorType::MinFilter,
1563                        filter_mode: desc.min_filter,
1564                        anisotropic_clamp: desc.anisotropy_clamp,
1565                    },
1566                );
1567            }
1568            if !matches!(desc.mag_filter, wgt::FilterMode::Linear) {
1569                return Err(
1570                    resource::CreateSamplerError::InvalidFilterModeWithAnisotropy {
1571                        filter_type: resource::SamplerFilterErrorType::MagFilter,
1572                        filter_mode: desc.mag_filter,
1573                        anisotropic_clamp: desc.anisotropy_clamp,
1574                    },
1575                );
1576            }
1577            if !matches!(desc.mipmap_filter, wgt::FilterMode::Linear) {
1578                return Err(
1579                    resource::CreateSamplerError::InvalidFilterModeWithAnisotropy {
1580                        filter_type: resource::SamplerFilterErrorType::MipmapFilter,
1581                        filter_mode: desc.mipmap_filter,
1582                        anisotropic_clamp: desc.anisotropy_clamp,
1583                    },
1584                );
1585            }
1586        }
1587
1588        let anisotropy_clamp = if self
1589            .downlevel
1590            .flags
1591            .contains(wgt::DownlevelFlags::ANISOTROPIC_FILTERING)
1592        {
1593            // Clamp anisotropy clamp to [1, 16] per the wgpu-hal interface
1594            desc.anisotropy_clamp.min(16)
1595        } else {
1596            // If it isn't supported, set this unconditionally to 1
1597            1
1598        };
1599
1600        //TODO: check for wgt::DownlevelFlags::COMPARISON_SAMPLERS
1601
1602        let hal_desc = hal::SamplerDescriptor {
1603            label: desc.label.to_hal(self.instance_flags),
1604            address_modes: desc.address_modes,
1605            mag_filter: desc.mag_filter,
1606            min_filter: desc.min_filter,
1607            mipmap_filter: desc.mipmap_filter,
1608            lod_clamp: desc.lod_min_clamp..desc.lod_max_clamp,
1609            compare: desc.compare,
1610            anisotropy_clamp,
1611            border_color: desc.border_color,
1612        };
1613
1614        let raw = unsafe { self.raw().create_sampler(&hal_desc) }
1615            .map_err(|e| self.handle_hal_error(e))?;
1616
1617        let sampler = Sampler {
1618            raw: ManuallyDrop::new(raw),
1619            device: self.clone(),
1620            label: desc.label.to_string(),
1621            tracking_data: TrackingData::new(self.tracker_indices.samplers.clone()),
1622            comparison: desc.compare.is_some(),
1623            filtering: desc.min_filter == wgt::FilterMode::Linear
1624                || desc.mag_filter == wgt::FilterMode::Linear
1625                || desc.mipmap_filter == wgt::FilterMode::Linear,
1626        };
1627
1628        let sampler = Arc::new(sampler);
1629
1630        Ok(sampler)
1631    }
1632
1633    pub(crate) fn create_shader_module<'a>(
1634        self: &Arc<Self>,
1635        desc: &pipeline::ShaderModuleDescriptor<'a>,
1636        source: pipeline::ShaderModuleSource<'a>,
1637    ) -> Result<Arc<pipeline::ShaderModule>, pipeline::CreateShaderModuleError> {
1638        self.check_is_valid()?;
1639
1640        let (module, source) = match source {
1641            #[cfg(feature = "wgsl")]
1642            pipeline::ShaderModuleSource::Wgsl(code) => {
1643                profiling::scope!("naga::front::wgsl::parse_str");
1644                let module = naga::front::wgsl::parse_str(&code).map_err(|inner| {
1645                    pipeline::CreateShaderModuleError::Parsing(naga::error::ShaderError {
1646                        source: code.to_string(),
1647                        label: desc.label.as_ref().map(|l| l.to_string()),
1648                        inner: Box::new(inner),
1649                    })
1650                })?;
1651                (Cow::Owned(module), code.into_owned())
1652            }
1653            #[cfg(feature = "spirv")]
1654            pipeline::ShaderModuleSource::SpirV(spv, options) => {
1655                let parser = naga::front::spv::Frontend::new(spv.iter().cloned(), &options);
1656                profiling::scope!("naga::front::spv::Frontend");
1657                let module = parser.parse().map_err(|inner| {
1658                    pipeline::CreateShaderModuleError::ParsingSpirV(naga::error::ShaderError {
1659                        source: String::new(),
1660                        label: desc.label.as_ref().map(|l| l.to_string()),
1661                        inner: Box::new(inner),
1662                    })
1663                })?;
1664                (Cow::Owned(module), String::new())
1665            }
1666            #[cfg(feature = "glsl")]
1667            pipeline::ShaderModuleSource::Glsl(code, options) => {
1668                let mut parser = naga::front::glsl::Frontend::default();
1669                profiling::scope!("naga::front::glsl::Frontend.parse");
1670                let module = parser.parse(&options, &code).map_err(|inner| {
1671                    pipeline::CreateShaderModuleError::ParsingGlsl(naga::error::ShaderError {
1672                        source: code.to_string(),
1673                        label: desc.label.as_ref().map(|l| l.to_string()),
1674                        inner: Box::new(inner),
1675                    })
1676                })?;
1677                (Cow::Owned(module), code.into_owned())
1678            }
1679            pipeline::ShaderModuleSource::Naga(module) => (module, String::new()),
1680            pipeline::ShaderModuleSource::Dummy(_) => panic!("found `ShaderModuleSource::Dummy`"),
1681        };
1682        for (_, var) in module.global_variables.iter() {
1683            match var.binding {
1684                Some(br) if br.group >= self.limits.max_bind_groups => {
1685                    return Err(pipeline::CreateShaderModuleError::InvalidGroupIndex {
1686                        bind: br,
1687                        group: br.group,
1688                        limit: self.limits.max_bind_groups,
1689                    });
1690                }
1691                _ => continue,
1692            };
1693        }
1694
1695        profiling::scope!("naga::validate");
1696        let debug_source =
1697            if self.instance_flags.contains(wgt::InstanceFlags::DEBUG) && !source.is_empty() {
1698                Some(hal::DebugSource {
1699                    file_name: Cow::Owned(
1700                        desc.label
1701                            .as_ref()
1702                            .map_or("shader".to_string(), |l| l.to_string()),
1703                    ),
1704                    source_code: Cow::Owned(source.clone()),
1705                })
1706            } else {
1707                None
1708            };
1709
1710        let info = create_validator(
1711            self.features,
1712            self.downlevel.flags,
1713            naga::valid::ValidationFlags::all(),
1714        )
1715        .validate(&module)
1716        .map_err(|inner| {
1717            pipeline::CreateShaderModuleError::Validation(naga::error::ShaderError {
1718                source,
1719                label: desc.label.as_ref().map(|l| l.to_string()),
1720                inner: Box::new(inner),
1721            })
1722        })?;
1723
1724        let interface = validation::Interface::new(&module, &info, self.limits.clone());
1725        let hal_shader = hal::ShaderInput::Naga(hal::NagaShader {
1726            module,
1727            info,
1728            debug_source,
1729        });
1730        let hal_desc = hal::ShaderModuleDescriptor {
1731            label: desc.label.to_hal(self.instance_flags),
1732            runtime_checks: desc.runtime_checks,
1733        };
1734        let raw = match unsafe { self.raw().create_shader_module(&hal_desc, hal_shader) } {
1735            Ok(raw) => raw,
1736            Err(error) => {
1737                return Err(match error {
1738                    hal::ShaderError::Device(error) => {
1739                        pipeline::CreateShaderModuleError::Device(self.handle_hal_error(error))
1740                    }
1741                    hal::ShaderError::Compilation(ref msg) => {
1742                        log::error!("Shader error: {}", msg);
1743                        pipeline::CreateShaderModuleError::Generation
1744                    }
1745                })
1746            }
1747        };
1748
1749        let module = pipeline::ShaderModule {
1750            raw: ManuallyDrop::new(raw),
1751            device: self.clone(),
1752            interface: Some(interface),
1753            label: desc.label.to_string(),
1754        };
1755
1756        let module = Arc::new(module);
1757
1758        Ok(module)
1759    }
1760
1761    #[allow(unused_unsafe)]
1762    pub(crate) unsafe fn create_shader_module_passthrough<'a>(
1763        self: &Arc<Self>,
1764        descriptor: &pipeline::ShaderModuleDescriptorPassthrough<'a>,
1765    ) -> Result<Arc<pipeline::ShaderModule>, pipeline::CreateShaderModuleError> {
1766        self.check_is_valid()?;
1767        let hal_shader = match descriptor {
1768            pipeline::ShaderModuleDescriptorPassthrough::SpirV(inner) => {
1769                self.require_features(wgt::Features::SPIRV_SHADER_PASSTHROUGH)?;
1770                hal::ShaderInput::SpirV(&inner.source)
1771            }
1772            pipeline::ShaderModuleDescriptorPassthrough::Msl(inner) => {
1773                self.require_features(wgt::Features::MSL_SHADER_PASSTHROUGH)?;
1774                hal::ShaderInput::Msl {
1775                    shader: inner.source.to_string(),
1776                    entry_point: inner.entry_point.to_string(),
1777                    num_workgroups: inner.num_workgroups,
1778                }
1779            }
1780        };
1781
1782        let hal_desc = hal::ShaderModuleDescriptor {
1783            label: descriptor.label().to_hal(self.instance_flags),
1784            runtime_checks: wgt::ShaderRuntimeChecks::unchecked(),
1785        };
1786
1787        let raw = match unsafe { self.raw().create_shader_module(&hal_desc, hal_shader) } {
1788            Ok(raw) => raw,
1789            Err(error) => {
1790                return Err(match error {
1791                    hal::ShaderError::Device(error) => {
1792                        pipeline::CreateShaderModuleError::Device(self.handle_hal_error(error))
1793                    }
1794                    hal::ShaderError::Compilation(ref msg) => {
1795                        log::error!("Shader error: {}", msg);
1796                        pipeline::CreateShaderModuleError::Generation
1797                    }
1798                })
1799            }
1800        };
1801
1802        let module = pipeline::ShaderModule {
1803            raw: ManuallyDrop::new(raw),
1804            device: self.clone(),
1805            interface: None,
1806            label: descriptor.label().to_string(),
1807        };
1808
1809        Ok(Arc::new(module))
1810    }
1811
1812    pub(crate) fn create_command_encoder(
1813        self: &Arc<Self>,
1814        label: &crate::Label,
1815    ) -> Result<Arc<command::CommandBuffer>, DeviceError> {
1816        self.check_is_valid()?;
1817
1818        let queue = self.get_queue().unwrap();
1819
1820        let encoder = self
1821            .command_allocator
1822            .acquire_encoder(self.raw(), queue.raw())
1823            .map_err(|e| self.handle_hal_error(e))?;
1824
1825        let command_buffer = command::CommandBuffer::new(encoder, self, label);
1826
1827        let command_buffer = Arc::new(command_buffer);
1828
1829        Ok(command_buffer)
1830    }
1831
1832    /// Generate information about late-validated buffer bindings for pipelines.
1833    //TODO: should this be combined with `get_introspection_bind_group_layouts` in some way?
1834    fn make_late_sized_buffer_groups(
1835        shader_binding_sizes: &FastHashMap<naga::ResourceBinding, wgt::BufferSize>,
1836        layout: &binding_model::PipelineLayout,
1837    ) -> ArrayVec<pipeline::LateSizedBufferGroup, { hal::MAX_BIND_GROUPS }> {
1838        // Given the shader-required binding sizes and the pipeline layout,
1839        // return the filtered list of them in the layout order,
1840        // removing those with given `min_binding_size`.
1841        layout
1842            .bind_group_layouts
1843            .iter()
1844            .enumerate()
1845            .map(|(group_index, bgl)| pipeline::LateSizedBufferGroup {
1846                shader_sizes: bgl
1847                    .entries
1848                    .values()
1849                    .filter_map(|entry| match entry.ty {
1850                        wgt::BindingType::Buffer {
1851                            min_binding_size: None,
1852                            ..
1853                        } => {
1854                            let rb = naga::ResourceBinding {
1855                                group: group_index as u32,
1856                                binding: entry.binding,
1857                            };
1858                            let shader_size =
1859                                shader_binding_sizes.get(&rb).map_or(0, |nz| nz.get());
1860                            Some(shader_size)
1861                        }
1862                        _ => None,
1863                    })
1864                    .collect(),
1865            })
1866            .collect()
1867    }
1868
1869    pub(crate) fn create_bind_group_layout(
1870        self: &Arc<Self>,
1871        label: &crate::Label,
1872        entry_map: bgl::EntryMap,
1873        origin: bgl::Origin,
1874    ) -> Result<Arc<BindGroupLayout>, binding_model::CreateBindGroupLayoutError> {
1875        #[derive(PartialEq)]
1876        enum WritableStorage {
1877            Yes,
1878            No,
1879        }
1880
1881        for entry in entry_map.values() {
1882            use wgt::BindingType as Bt;
1883
1884            let mut required_features = wgt::Features::empty();
1885            let mut required_downlevel_flags = wgt::DownlevelFlags::empty();
1886            let (array_feature, writable_storage) = match entry.ty {
1887                Bt::Buffer {
1888                    ty: wgt::BufferBindingType::Uniform,
1889                    has_dynamic_offset: false,
1890                    min_binding_size: _,
1891                } => (
1892                    Some(wgt::Features::BUFFER_BINDING_ARRAY),
1893                    WritableStorage::No,
1894                ),
1895                Bt::Buffer {
1896                    ty: wgt::BufferBindingType::Uniform,
1897                    has_dynamic_offset: true,
1898                    min_binding_size: _,
1899                } => (
1900                    Some(wgt::Features::BUFFER_BINDING_ARRAY),
1901                    WritableStorage::No,
1902                ),
1903                Bt::Buffer {
1904                    ty: wgt::BufferBindingType::Storage { read_only },
1905                    ..
1906                } => (
1907                    Some(
1908                        wgt::Features::BUFFER_BINDING_ARRAY
1909                            | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY,
1910                    ),
1911                    match read_only {
1912                        true => WritableStorage::No,
1913                        false => WritableStorage::Yes,
1914                    },
1915                ),
1916                Bt::Sampler { .. } => (
1917                    Some(wgt::Features::TEXTURE_BINDING_ARRAY),
1918                    WritableStorage::No,
1919                ),
1920                Bt::Texture {
1921                    multisampled: true,
1922                    sample_type: TextureSampleType::Float { filterable: true },
1923                    ..
1924                } => {
1925                    return Err(binding_model::CreateBindGroupLayoutError::Entry {
1926                        binding: entry.binding,
1927                        error:
1928                            BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled,
1929                    });
1930                }
1931                Bt::Texture {
1932                    multisampled,
1933                    view_dimension,
1934                    ..
1935                } => {
1936                    if multisampled && view_dimension != TextureViewDimension::D2 {
1937                        return Err(binding_model::CreateBindGroupLayoutError::Entry {
1938                            binding: entry.binding,
1939                            error: BindGroupLayoutEntryError::Non2DMultisampled(view_dimension),
1940                        });
1941                    }
1942
1943                    (
1944                        Some(wgt::Features::TEXTURE_BINDING_ARRAY),
1945                        WritableStorage::No,
1946                    )
1947                }
1948                Bt::StorageTexture {
1949                    access,
1950                    view_dimension,
1951                    format: _,
1952                } => {
1953                    match view_dimension {
1954                        TextureViewDimension::Cube | TextureViewDimension::CubeArray => {
1955                            return Err(binding_model::CreateBindGroupLayoutError::Entry {
1956                                binding: entry.binding,
1957                                error: BindGroupLayoutEntryError::StorageTextureCube,
1958                            })
1959                        }
1960                        _ => (),
1961                    }
1962                    match access {
1963                        wgt::StorageTextureAccess::Atomic
1964                            if !self.features.contains(wgt::Features::TEXTURE_ATOMIC) =>
1965                        {
1966                            return Err(binding_model::CreateBindGroupLayoutError::Entry {
1967                                binding: entry.binding,
1968                                error: BindGroupLayoutEntryError::StorageTextureAtomic,
1969                            });
1970                        }
1971                        wgt::StorageTextureAccess::ReadOnly
1972                        | wgt::StorageTextureAccess::ReadWrite
1973                            if !self.features.contains(
1974                                wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
1975                            ) =>
1976                        {
1977                            return Err(binding_model::CreateBindGroupLayoutError::Entry {
1978                                binding: entry.binding,
1979                                error: BindGroupLayoutEntryError::StorageTextureReadWrite,
1980                            });
1981                        }
1982                        _ => (),
1983                    }
1984                    (
1985                        Some(
1986                            wgt::Features::TEXTURE_BINDING_ARRAY
1987                                | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY,
1988                        ),
1989                        match access {
1990                            wgt::StorageTextureAccess::WriteOnly => WritableStorage::Yes,
1991                            wgt::StorageTextureAccess::ReadOnly => {
1992                                required_features |=
1993                                    wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES;
1994                                WritableStorage::No
1995                            }
1996                            wgt::StorageTextureAccess::ReadWrite => {
1997                                required_features |=
1998                                    wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES;
1999                                WritableStorage::Yes
2000                            }
2001                            wgt::StorageTextureAccess::Atomic => {
2002                                required_features |= wgt::Features::TEXTURE_ATOMIC;
2003                                WritableStorage::Yes
2004                            }
2005                        },
2006                    )
2007                }
2008                Bt::AccelerationStructure { .. } => (None, WritableStorage::No),
2009            };
2010
2011            // Validate the count parameter
2012            if entry.count.is_some() {
2013                required_features |= array_feature
2014                    .ok_or(BindGroupLayoutEntryError::ArrayUnsupported)
2015                    .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
2016                        binding: entry.binding,
2017                        error,
2018                    })?;
2019            }
2020
2021            if entry.visibility.contains_unknown_bits() {
2022                return Err(
2023                    binding_model::CreateBindGroupLayoutError::InvalidVisibility(entry.visibility),
2024                );
2025            }
2026
2027            if entry.visibility.contains(wgt::ShaderStages::VERTEX) {
2028                if writable_storage == WritableStorage::Yes {
2029                    required_features |= wgt::Features::VERTEX_WRITABLE_STORAGE;
2030                }
2031                if let Bt::Buffer {
2032                    ty: wgt::BufferBindingType::Storage { .. },
2033                    ..
2034                } = entry.ty
2035                {
2036                    required_downlevel_flags |= wgt::DownlevelFlags::VERTEX_STORAGE;
2037                }
2038            }
2039            if writable_storage == WritableStorage::Yes
2040                && entry.visibility.contains(wgt::ShaderStages::FRAGMENT)
2041            {
2042                required_downlevel_flags |= wgt::DownlevelFlags::FRAGMENT_WRITABLE_STORAGE;
2043            }
2044
2045            self.require_features(required_features)
2046                .map_err(BindGroupLayoutEntryError::MissingFeatures)
2047                .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
2048                    binding: entry.binding,
2049                    error,
2050                })?;
2051            self.require_downlevel_flags(required_downlevel_flags)
2052                .map_err(BindGroupLayoutEntryError::MissingDownlevelFlags)
2053                .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
2054                    binding: entry.binding,
2055                    error,
2056                })?;
2057        }
2058
2059        let bgl_flags = conv::bind_group_layout_flags(self.features);
2060
2061        let hal_bindings = entry_map.values().copied().collect::<Vec<_>>();
2062        let hal_desc = hal::BindGroupLayoutDescriptor {
2063            label: label.to_hal(self.instance_flags),
2064            flags: bgl_flags,
2065            entries: &hal_bindings,
2066        };
2067
2068        let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
2069        for entry in entry_map.values() {
2070            count_validator.add_binding(entry);
2071        }
2072        // If a single bind group layout violates limits, the pipeline layout is
2073        // definitely going to violate limits too, lets catch it now.
2074        count_validator
2075            .validate(&self.limits)
2076            .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?;
2077
2078        // Validate that binding arrays don't conflict with dynamic offsets.
2079        count_validator.validate_binding_arrays()?;
2080
2081        let raw = unsafe { self.raw().create_bind_group_layout(&hal_desc) }
2082            .map_err(|e| self.handle_hal_error(e))?;
2083
2084        let bgl = BindGroupLayout {
2085            raw: ManuallyDrop::new(raw),
2086            device: self.clone(),
2087            entries: entry_map,
2088            origin,
2089            exclusive_pipeline: OnceCellOrLock::new(),
2090            binding_count_validator: count_validator,
2091            label: label.to_string(),
2092        };
2093
2094        let bgl = Arc::new(bgl);
2095
2096        Ok(bgl)
2097    }
2098
2099    fn create_buffer_binding<'a>(
2100        &self,
2101        bb: &'a binding_model::ResolvedBufferBinding,
2102        binding: u32,
2103        decl: &wgt::BindGroupLayoutEntry,
2104        used_buffer_ranges: &mut Vec<BufferInitTrackerAction>,
2105        dynamic_binding_info: &mut Vec<binding_model::BindGroupDynamicBindingData>,
2106        late_buffer_binding_sizes: &mut FastHashMap<u32, wgt::BufferSize>,
2107        used: &mut BindGroupStates,
2108        snatch_guard: &'a SnatchGuard<'a>,
2109    ) -> Result<hal::BufferBinding<'a, dyn hal::DynBuffer>, binding_model::CreateBindGroupError>
2110    {
2111        use crate::binding_model::CreateBindGroupError as Error;
2112
2113        let (binding_ty, dynamic, min_size) = match decl.ty {
2114            wgt::BindingType::Buffer {
2115                ty,
2116                has_dynamic_offset,
2117                min_binding_size,
2118            } => (ty, has_dynamic_offset, min_binding_size),
2119            _ => {
2120                return Err(Error::WrongBindingType {
2121                    binding,
2122                    actual: decl.ty,
2123                    expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer",
2124                })
2125            }
2126        };
2127
2128        let (pub_usage, internal_use, range_limit) = match binding_ty {
2129            wgt::BufferBindingType::Uniform => (
2130                wgt::BufferUsages::UNIFORM,
2131                wgt::BufferUses::UNIFORM,
2132                self.limits.max_uniform_buffer_binding_size,
2133            ),
2134            wgt::BufferBindingType::Storage { read_only } => (
2135                wgt::BufferUsages::STORAGE,
2136                if read_only {
2137                    wgt::BufferUses::STORAGE_READ_ONLY
2138                } else {
2139                    wgt::BufferUses::STORAGE_READ_WRITE
2140                },
2141                self.limits.max_storage_buffer_binding_size,
2142            ),
2143        };
2144
2145        let (align, align_limit_name) =
2146            binding_model::buffer_binding_type_alignment(&self.limits, binding_ty);
2147        if bb.offset % align as u64 != 0 {
2148            return Err(Error::UnalignedBufferOffset(
2149                bb.offset,
2150                align_limit_name,
2151                align,
2152            ));
2153        }
2154
2155        let buffer = &bb.buffer;
2156
2157        used.buffers.insert_single(buffer.clone(), internal_use);
2158
2159        buffer.same_device(self)?;
2160
2161        buffer.check_usage(pub_usage)?;
2162        let raw_buffer = buffer.try_raw(snatch_guard)?;
2163
2164        let (bind_size, bind_end) = match bb.size {
2165            Some(size) => {
2166                let end = bb.offset + size.get();
2167                if end > buffer.size {
2168                    return Err(Error::BindingRangeTooLarge {
2169                        buffer: buffer.error_ident(),
2170                        range: bb.offset..end,
2171                        size: buffer.size,
2172                    });
2173                }
2174                (size.get(), end)
2175            }
2176            None => {
2177                if buffer.size < bb.offset {
2178                    return Err(Error::BindingRangeTooLarge {
2179                        buffer: buffer.error_ident(),
2180                        range: bb.offset..bb.offset,
2181                        size: buffer.size,
2182                    });
2183                }
2184                (buffer.size - bb.offset, buffer.size)
2185            }
2186        };
2187
2188        if bind_size > range_limit as u64 {
2189            return Err(Error::BufferRangeTooLarge {
2190                binding,
2191                given: bind_size as u32,
2192                limit: range_limit,
2193            });
2194        }
2195
2196        // Record binding info for validating dynamic offsets
2197        if dynamic {
2198            dynamic_binding_info.push(binding_model::BindGroupDynamicBindingData {
2199                binding_idx: binding,
2200                buffer_size: buffer.size,
2201                binding_range: bb.offset..bind_end,
2202                maximum_dynamic_offset: buffer.size - bind_end,
2203                binding_type: binding_ty,
2204            });
2205        }
2206
2207        if let Some(non_zero) = min_size {
2208            let min_size = non_zero.get();
2209            if min_size > bind_size {
2210                return Err(Error::BindingSizeTooSmall {
2211                    buffer: buffer.error_ident(),
2212                    actual: bind_size,
2213                    min: min_size,
2214                });
2215            }
2216        } else {
2217            let late_size = wgt::BufferSize::new(bind_size)
2218                .ok_or_else(|| Error::BindingZeroSize(buffer.error_ident()))?;
2219            late_buffer_binding_sizes.insert(binding, late_size);
2220        }
2221
2222        // This was checked against the device's alignment requirements above,
2223        // which should always be a multiple of `COPY_BUFFER_ALIGNMENT`.
2224        assert_eq!(bb.offset % wgt::COPY_BUFFER_ALIGNMENT, 0);
2225
2226        // `wgpu_hal` only restricts shader access to bound buffer regions with
2227        // a certain resolution. For the sake of lazy initialization, round up
2228        // the size of the bound range to reflect how much of the buffer is
2229        // actually going to be visible to the shader.
2230        let bounds_check_alignment =
2231            binding_model::buffer_binding_type_bounds_check_alignment(&self.alignments, binding_ty);
2232        let visible_size = align_to(bind_size, bounds_check_alignment);
2233
2234        used_buffer_ranges.extend(buffer.initialization_status.read().create_action(
2235            buffer,
2236            bb.offset..bb.offset + visible_size,
2237            MemoryInitKind::NeedsInitializedMemory,
2238        ));
2239
2240        Ok(hal::BufferBinding {
2241            buffer: raw_buffer,
2242            offset: bb.offset,
2243            size: bb.size,
2244        })
2245    }
2246
2247    fn create_sampler_binding<'a>(
2248        &self,
2249        used: &mut BindGroupStates,
2250        binding: u32,
2251        decl: &wgt::BindGroupLayoutEntry,
2252        sampler: &'a Arc<Sampler>,
2253    ) -> Result<&'a dyn hal::DynSampler, binding_model::CreateBindGroupError> {
2254        use crate::binding_model::CreateBindGroupError as Error;
2255
2256        used.samplers.insert_single(sampler.clone());
2257
2258        sampler.same_device(self)?;
2259
2260        match decl.ty {
2261            wgt::BindingType::Sampler(ty) => {
2262                let (allowed_filtering, allowed_comparison) = match ty {
2263                    wgt::SamplerBindingType::Filtering => (None, false),
2264                    wgt::SamplerBindingType::NonFiltering => (Some(false), false),
2265                    wgt::SamplerBindingType::Comparison => (None, true),
2266                };
2267                if let Some(allowed_filtering) = allowed_filtering {
2268                    if allowed_filtering != sampler.filtering {
2269                        return Err(Error::WrongSamplerFiltering {
2270                            binding,
2271                            layout_flt: allowed_filtering,
2272                            sampler_flt: sampler.filtering,
2273                        });
2274                    }
2275                }
2276                if allowed_comparison != sampler.comparison {
2277                    return Err(Error::WrongSamplerComparison {
2278                        binding,
2279                        layout_cmp: allowed_comparison,
2280                        sampler_cmp: sampler.comparison,
2281                    });
2282                }
2283            }
2284            _ => {
2285                return Err(Error::WrongBindingType {
2286                    binding,
2287                    actual: decl.ty,
2288                    expected: "Sampler",
2289                })
2290            }
2291        }
2292
2293        Ok(sampler.raw())
2294    }
2295
2296    fn create_texture_binding<'a>(
2297        &self,
2298        binding: u32,
2299        decl: &wgt::BindGroupLayoutEntry,
2300        view: &'a Arc<TextureView>,
2301        used: &mut BindGroupStates,
2302        used_texture_ranges: &mut Vec<TextureInitTrackerAction>,
2303        snatch_guard: &'a SnatchGuard<'a>,
2304    ) -> Result<hal::TextureBinding<'a, dyn hal::DynTextureView>, binding_model::CreateBindGroupError>
2305    {
2306        view.same_device(self)?;
2307
2308        let internal_use = self.texture_use_parameters(
2309            binding,
2310            decl,
2311            view,
2312            "SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture",
2313        )?;
2314
2315        used.views.insert_single(view.clone(), internal_use);
2316
2317        let texture = &view.parent;
2318
2319        used_texture_ranges.push(TextureInitTrackerAction {
2320            texture: texture.clone(),
2321            range: TextureInitRange {
2322                mip_range: view.desc.range.mip_range(texture.desc.mip_level_count),
2323                layer_range: view
2324                    .desc
2325                    .range
2326                    .layer_range(texture.desc.array_layer_count()),
2327            },
2328            kind: MemoryInitKind::NeedsInitializedMemory,
2329        });
2330
2331        Ok(hal::TextureBinding {
2332            view: view.try_raw(snatch_guard)?,
2333            usage: internal_use,
2334        })
2335    }
2336
2337    fn create_tlas_binding<'a>(
2338        self: &Arc<Self>,
2339        used: &mut BindGroupStates,
2340        binding: u32,
2341        decl: &wgt::BindGroupLayoutEntry,
2342        tlas: &'a Arc<Tlas>,
2343        snatch_guard: &'a SnatchGuard<'a>,
2344    ) -> Result<&'a dyn hal::DynAccelerationStructure, binding_model::CreateBindGroupError> {
2345        use crate::binding_model::CreateBindGroupError as Error;
2346
2347        used.acceleration_structures.insert_single(tlas.clone());
2348
2349        tlas.same_device(self)?;
2350
2351        match decl.ty {
2352            wgt::BindingType::AccelerationStructure { vertex_return } => {
2353                if vertex_return
2354                    && !tlas.flags.contains(
2355                        wgpu_types::AccelerationStructureFlags::ALLOW_RAY_HIT_VERTEX_RETURN,
2356                    )
2357                {
2358                    return Err(Error::MissingTLASVertexReturn { binding });
2359                }
2360            }
2361            _ => {
2362                return Err(Error::WrongBindingType {
2363                    binding,
2364                    actual: decl.ty,
2365                    expected: "Tlas",
2366                });
2367            }
2368        }
2369
2370        Ok(tlas.try_raw(snatch_guard)?)
2371    }
2372
2373    // This function expects the provided bind group layout to be resolved
2374    // (not passing a duplicate) beforehand.
2375    pub(crate) fn create_bind_group(
2376        self: &Arc<Self>,
2377        desc: binding_model::ResolvedBindGroupDescriptor,
2378    ) -> Result<Arc<BindGroup>, binding_model::CreateBindGroupError> {
2379        use crate::binding_model::{CreateBindGroupError as Error, ResolvedBindingResource as Br};
2380
2381        let layout = desc.layout;
2382
2383        self.check_is_valid()?;
2384        layout.same_device(self)?;
2385
2386        {
2387            // Check that the number of entries in the descriptor matches
2388            // the number of entries in the layout.
2389            let actual = desc.entries.len();
2390            let expected = layout.entries.len();
2391            if actual != expected {
2392                return Err(Error::BindingsNumMismatch { expected, actual });
2393            }
2394        }
2395
2396        // TODO: arrayvec/smallvec, or re-use allocations
2397        // Record binding info for dynamic offset validation
2398        let mut dynamic_binding_info = Vec::new();
2399        // Map of binding -> shader reflected size
2400        //Note: we can't collect into a vector right away because
2401        // it needs to be in BGL iteration order, not BG entry order.
2402        let mut late_buffer_binding_sizes = FastHashMap::default();
2403        // fill out the descriptors
2404        let mut used = BindGroupStates::new();
2405
2406        let mut used_buffer_ranges = Vec::new();
2407        let mut used_texture_ranges = Vec::new();
2408        let mut hal_entries = Vec::with_capacity(desc.entries.len());
2409        let mut hal_buffers = Vec::new();
2410        let mut hal_samplers = Vec::new();
2411        let mut hal_textures = Vec::new();
2412        let mut hal_tlas_s = Vec::new();
2413        let snatch_guard = self.snatchable_lock.read();
2414        for entry in desc.entries.iter() {
2415            let binding = entry.binding;
2416            // Find the corresponding declaration in the layout
2417            let decl = layout
2418                .entries
2419                .get(binding)
2420                .ok_or(Error::MissingBindingDeclaration(binding))?;
2421            let (res_index, count) = match entry.resource {
2422                Br::Buffer(ref bb) => {
2423                    let bb = self.create_buffer_binding(
2424                        bb,
2425                        binding,
2426                        decl,
2427                        &mut used_buffer_ranges,
2428                        &mut dynamic_binding_info,
2429                        &mut late_buffer_binding_sizes,
2430                        &mut used,
2431                        &snatch_guard,
2432                    )?;
2433
2434                    let res_index = hal_buffers.len();
2435                    hal_buffers.push(bb);
2436                    (res_index, 1)
2437                }
2438                Br::BufferArray(ref bindings_array) => {
2439                    let num_bindings = bindings_array.len();
2440                    Self::check_array_binding(self.features, decl.count, num_bindings)?;
2441
2442                    let res_index = hal_buffers.len();
2443                    for bb in bindings_array.iter() {
2444                        let bb = self.create_buffer_binding(
2445                            bb,
2446                            binding,
2447                            decl,
2448                            &mut used_buffer_ranges,
2449                            &mut dynamic_binding_info,
2450                            &mut late_buffer_binding_sizes,
2451                            &mut used,
2452                            &snatch_guard,
2453                        )?;
2454                        hal_buffers.push(bb);
2455                    }
2456                    (res_index, num_bindings)
2457                }
2458                Br::Sampler(ref sampler) => {
2459                    let sampler = self.create_sampler_binding(&mut used, binding, decl, sampler)?;
2460
2461                    let res_index = hal_samplers.len();
2462                    hal_samplers.push(sampler);
2463                    (res_index, 1)
2464                }
2465                Br::SamplerArray(ref samplers) => {
2466                    let num_bindings = samplers.len();
2467                    Self::check_array_binding(self.features, decl.count, num_bindings)?;
2468
2469                    let res_index = hal_samplers.len();
2470                    for sampler in samplers.iter() {
2471                        let sampler =
2472                            self.create_sampler_binding(&mut used, binding, decl, sampler)?;
2473
2474                        hal_samplers.push(sampler);
2475                    }
2476
2477                    (res_index, num_bindings)
2478                }
2479                Br::TextureView(ref view) => {
2480                    let tb = self.create_texture_binding(
2481                        binding,
2482                        decl,
2483                        view,
2484                        &mut used,
2485                        &mut used_texture_ranges,
2486                        &snatch_guard,
2487                    )?;
2488                    let res_index = hal_textures.len();
2489                    hal_textures.push(tb);
2490                    (res_index, 1)
2491                }
2492                Br::TextureViewArray(ref views) => {
2493                    let num_bindings = views.len();
2494                    Self::check_array_binding(self.features, decl.count, num_bindings)?;
2495
2496                    let res_index = hal_textures.len();
2497                    for view in views.iter() {
2498                        let tb = self.create_texture_binding(
2499                            binding,
2500                            decl,
2501                            view,
2502                            &mut used,
2503                            &mut used_texture_ranges,
2504                            &snatch_guard,
2505                        )?;
2506
2507                        hal_textures.push(tb);
2508                    }
2509
2510                    (res_index, num_bindings)
2511                }
2512                Br::AccelerationStructure(ref tlas) => {
2513                    let tlas =
2514                        self.create_tlas_binding(&mut used, binding, decl, tlas, &snatch_guard)?;
2515                    let res_index = hal_tlas_s.len();
2516                    hal_tlas_s.push(tlas);
2517                    (res_index, 1)
2518                }
2519            };
2520
2521            hal_entries.push(hal::BindGroupEntry {
2522                binding,
2523                resource_index: res_index as u32,
2524                count: count as u32,
2525            });
2526        }
2527
2528        used.optimize();
2529
2530        hal_entries.sort_by_key(|entry| entry.binding);
2531        for (a, b) in hal_entries.iter().zip(hal_entries.iter().skip(1)) {
2532            if a.binding == b.binding {
2533                return Err(Error::DuplicateBinding(a.binding));
2534            }
2535        }
2536        let hal_desc = hal::BindGroupDescriptor {
2537            label: desc.label.to_hal(self.instance_flags),
2538            layout: layout.raw(),
2539            entries: &hal_entries,
2540            buffers: &hal_buffers,
2541            samplers: &hal_samplers,
2542            textures: &hal_textures,
2543            acceleration_structures: &hal_tlas_s,
2544        };
2545        let raw = unsafe { self.raw().create_bind_group(&hal_desc) }
2546            .map_err(|e| self.handle_hal_error(e))?;
2547
2548        // collect in the order of BGL iteration
2549        let late_buffer_binding_sizes = layout
2550            .entries
2551            .indices()
2552            .flat_map(|binding| late_buffer_binding_sizes.get(&binding).cloned())
2553            .collect();
2554
2555        let bind_group = BindGroup {
2556            raw: Snatchable::new(raw),
2557            device: self.clone(),
2558            layout,
2559            label: desc.label.to_string(),
2560            tracking_data: TrackingData::new(self.tracker_indices.bind_groups.clone()),
2561            used,
2562            used_buffer_ranges,
2563            used_texture_ranges,
2564            dynamic_binding_info,
2565            late_buffer_binding_sizes,
2566        };
2567
2568        let bind_group = Arc::new(bind_group);
2569
2570        let weak_ref = Arc::downgrade(&bind_group);
2571        for range in &bind_group.used_texture_ranges {
2572            let mut bind_groups = range.texture.bind_groups.lock();
2573            bind_groups.push(weak_ref.clone());
2574        }
2575        for range in &bind_group.used_buffer_ranges {
2576            let mut bind_groups = range.buffer.bind_groups.lock();
2577            bind_groups.push(weak_ref.clone());
2578        }
2579
2580        Ok(bind_group)
2581    }
2582
2583    fn check_array_binding(
2584        features: wgt::Features,
2585        count: Option<NonZeroU32>,
2586        num_bindings: usize,
2587    ) -> Result<(), binding_model::CreateBindGroupError> {
2588        use super::binding_model::CreateBindGroupError as Error;
2589
2590        if let Some(count) = count {
2591            let count = count.get() as usize;
2592            if count < num_bindings {
2593                return Err(Error::BindingArrayPartialLengthMismatch {
2594                    actual: num_bindings,
2595                    expected: count,
2596                });
2597            }
2598            if count != num_bindings
2599                && !features.contains(wgt::Features::PARTIALLY_BOUND_BINDING_ARRAY)
2600            {
2601                return Err(Error::BindingArrayLengthMismatch {
2602                    actual: num_bindings,
2603                    expected: count,
2604                });
2605            }
2606            if num_bindings == 0 {
2607                return Err(Error::BindingArrayZeroLength);
2608            }
2609        } else {
2610            return Err(Error::SingleBindingExpected);
2611        };
2612
2613        Ok(())
2614    }
2615
2616    fn texture_use_parameters(
2617        &self,
2618        binding: u32,
2619        decl: &wgt::BindGroupLayoutEntry,
2620        view: &TextureView,
2621        expected: &'static str,
2622    ) -> Result<wgt::TextureUses, binding_model::CreateBindGroupError> {
2623        use crate::binding_model::CreateBindGroupError as Error;
2624        if view
2625            .desc
2626            .aspects()
2627            .contains(hal::FormatAspects::DEPTH | hal::FormatAspects::STENCIL)
2628        {
2629            return Err(Error::DepthStencilAspect);
2630        }
2631        match decl.ty {
2632            wgt::BindingType::Texture {
2633                sample_type,
2634                view_dimension,
2635                multisampled,
2636            } => {
2637                use wgt::TextureSampleType as Tst;
2638                if multisampled != (view.samples != 1) {
2639                    return Err(Error::InvalidTextureMultisample {
2640                        binding,
2641                        layout_multisampled: multisampled,
2642                        view_samples: view.samples,
2643                    });
2644                }
2645                let compat_sample_type = view
2646                    .desc
2647                    .format
2648                    .sample_type(Some(view.desc.range.aspect), Some(self.features))
2649                    .unwrap();
2650                match (sample_type, compat_sample_type) {
2651                    (Tst::Uint, Tst::Uint) |
2652                        (Tst::Sint, Tst::Sint) |
2653                        (Tst::Depth, Tst::Depth) |
2654                        // if we expect non-filterable, accept anything float
2655                        (Tst::Float { filterable: false }, Tst::Float { .. }) |
2656                        // if we expect filterable, require it
2657                        (Tst::Float { filterable: true }, Tst::Float { filterable: true }) |
2658                        // if we expect non-filterable, also accept depth
2659                        (Tst::Float { filterable: false }, Tst::Depth) => {}
2660                    // if we expect filterable, also accept Float that is defined as
2661                    // unfilterable if filterable feature is explicitly enabled (only hit
2662                    // if wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES is
2663                    // enabled)
2664                    (Tst::Float { filterable: true }, Tst::Float { .. }) if view.format_features.flags.contains(wgt::TextureFormatFeatureFlags::FILTERABLE) => {}
2665                    _ => {
2666                        return Err(Error::InvalidTextureSampleType {
2667                            binding,
2668                            layout_sample_type: sample_type,
2669                            view_format: view.desc.format,
2670                            view_sample_type: compat_sample_type,
2671                        })
2672                    }
2673                }
2674                if view_dimension != view.desc.dimension {
2675                    return Err(Error::InvalidTextureDimension {
2676                        binding,
2677                        layout_dimension: view_dimension,
2678                        view_dimension: view.desc.dimension,
2679                    });
2680                }
2681                view.check_usage(wgt::TextureUsages::TEXTURE_BINDING)?;
2682                Ok(wgt::TextureUses::RESOURCE)
2683            }
2684            wgt::BindingType::StorageTexture {
2685                access,
2686                format,
2687                view_dimension,
2688            } => {
2689                if format != view.desc.format {
2690                    return Err(Error::InvalidStorageTextureFormat {
2691                        binding,
2692                        layout_format: format,
2693                        view_format: view.desc.format,
2694                    });
2695                }
2696                if view_dimension != view.desc.dimension {
2697                    return Err(Error::InvalidTextureDimension {
2698                        binding,
2699                        layout_dimension: view_dimension,
2700                        view_dimension: view.desc.dimension,
2701                    });
2702                }
2703
2704                let mip_level_count = view.selector.mips.end - view.selector.mips.start;
2705                if mip_level_count != 1 {
2706                    return Err(Error::InvalidStorageTextureMipLevelCount {
2707                        binding,
2708                        mip_level_count,
2709                    });
2710                }
2711
2712                let internal_use = match access {
2713                    wgt::StorageTextureAccess::WriteOnly => {
2714                        if !view
2715                            .format_features
2716                            .flags
2717                            .contains(wgt::TextureFormatFeatureFlags::STORAGE_WRITE_ONLY)
2718                        {
2719                            return Err(Error::StorageWriteNotSupported(view.desc.format));
2720                        }
2721                        wgt::TextureUses::STORAGE_WRITE_ONLY
2722                    }
2723                    wgt::StorageTextureAccess::ReadOnly => {
2724                        if !view
2725                            .format_features
2726                            .flags
2727                            .contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_ONLY)
2728                        {
2729                            return Err(Error::StorageReadNotSupported(view.desc.format));
2730                        }
2731                        wgt::TextureUses::STORAGE_READ_ONLY
2732                    }
2733                    wgt::StorageTextureAccess::ReadWrite => {
2734                        if !view
2735                            .format_features
2736                            .flags
2737                            .contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE)
2738                        {
2739                            return Err(Error::StorageReadWriteNotSupported(view.desc.format));
2740                        }
2741
2742                        wgt::TextureUses::STORAGE_READ_WRITE
2743                    }
2744                    wgt::StorageTextureAccess::Atomic => {
2745                        if !view
2746                            .format_features
2747                            .flags
2748                            .contains(wgt::TextureFormatFeatureFlags::STORAGE_ATOMIC)
2749                        {
2750                            return Err(Error::StorageAtomicNotSupported(view.desc.format));
2751                        }
2752
2753                        wgt::TextureUses::STORAGE_ATOMIC
2754                    }
2755                };
2756                view.check_usage(wgt::TextureUsages::STORAGE_BINDING)?;
2757                Ok(internal_use)
2758            }
2759            _ => Err(Error::WrongBindingType {
2760                binding,
2761                actual: decl.ty,
2762                expected,
2763            }),
2764        }
2765    }
2766
2767    pub(crate) fn create_pipeline_layout(
2768        self: &Arc<Self>,
2769        desc: &binding_model::ResolvedPipelineLayoutDescriptor,
2770    ) -> Result<Arc<binding_model::PipelineLayout>, binding_model::CreatePipelineLayoutError> {
2771        use crate::binding_model::CreatePipelineLayoutError as Error;
2772
2773        self.check_is_valid()?;
2774
2775        let bind_group_layouts_count = desc.bind_group_layouts.len();
2776        let device_max_bind_groups = self.limits.max_bind_groups as usize;
2777        if bind_group_layouts_count > device_max_bind_groups {
2778            return Err(Error::TooManyGroups {
2779                actual: bind_group_layouts_count,
2780                max: device_max_bind_groups,
2781            });
2782        }
2783
2784        if !desc.push_constant_ranges.is_empty() {
2785            self.require_features(wgt::Features::PUSH_CONSTANTS)?;
2786        }
2787
2788        let mut used_stages = wgt::ShaderStages::empty();
2789        for (index, pc) in desc.push_constant_ranges.iter().enumerate() {
2790            if pc.stages.intersects(used_stages) {
2791                return Err(Error::MoreThanOnePushConstantRangePerStage {
2792                    index,
2793                    provided: pc.stages,
2794                    intersected: pc.stages & used_stages,
2795                });
2796            }
2797            used_stages |= pc.stages;
2798
2799            let device_max_pc_size = self.limits.max_push_constant_size;
2800            if device_max_pc_size < pc.range.end {
2801                return Err(Error::PushConstantRangeTooLarge {
2802                    index,
2803                    range: pc.range.clone(),
2804                    max: device_max_pc_size,
2805                });
2806            }
2807
2808            if pc.range.start % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
2809                return Err(Error::MisalignedPushConstantRange {
2810                    index,
2811                    bound: pc.range.start,
2812                });
2813            }
2814            if pc.range.end % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
2815                return Err(Error::MisalignedPushConstantRange {
2816                    index,
2817                    bound: pc.range.end,
2818                });
2819            }
2820        }
2821
2822        let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
2823
2824        for bgl in desc.bind_group_layouts.iter() {
2825            bgl.same_device(self)?;
2826            count_validator.merge(&bgl.binding_count_validator);
2827        }
2828
2829        count_validator
2830            .validate(&self.limits)
2831            .map_err(Error::TooManyBindings)?;
2832
2833        let bind_group_layouts = desc
2834            .bind_group_layouts
2835            .iter()
2836            .cloned()
2837            .collect::<ArrayVec<_, { hal::MAX_BIND_GROUPS }>>();
2838
2839        let raw_bind_group_layouts = desc
2840            .bind_group_layouts
2841            .iter()
2842            .map(|bgl| bgl.raw())
2843            .collect::<ArrayVec<_, { hal::MAX_BIND_GROUPS }>>();
2844
2845        let additional_flags = if self.indirect_validation.is_some() {
2846            hal::PipelineLayoutFlags::INDIRECT_BUILTIN_UPDATE
2847        } else {
2848            hal::PipelineLayoutFlags::empty()
2849        };
2850
2851        let hal_desc = hal::PipelineLayoutDescriptor {
2852            label: desc.label.to_hal(self.instance_flags),
2853            flags: hal::PipelineLayoutFlags::FIRST_VERTEX_INSTANCE
2854                | hal::PipelineLayoutFlags::NUM_WORK_GROUPS
2855                | additional_flags,
2856            bind_group_layouts: &raw_bind_group_layouts,
2857            push_constant_ranges: desc.push_constant_ranges.as_ref(),
2858        };
2859
2860        let raw = unsafe { self.raw().create_pipeline_layout(&hal_desc) }
2861            .map_err(|e| self.handle_hal_error(e))?;
2862
2863        drop(raw_bind_group_layouts);
2864
2865        let layout = binding_model::PipelineLayout {
2866            raw: ManuallyDrop::new(raw),
2867            device: self.clone(),
2868            label: desc.label.to_string(),
2869            bind_group_layouts,
2870            push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(),
2871        };
2872
2873        let layout = Arc::new(layout);
2874
2875        Ok(layout)
2876    }
2877
2878    pub(crate) fn derive_pipeline_layout(
2879        self: &Arc<Self>,
2880        mut derived_group_layouts: Box<ArrayVec<bgl::EntryMap, { hal::MAX_BIND_GROUPS }>>,
2881    ) -> Result<Arc<binding_model::PipelineLayout>, pipeline::ImplicitLayoutError> {
2882        while derived_group_layouts
2883            .last()
2884            .is_some_and(|map| map.is_empty())
2885        {
2886            derived_group_layouts.pop();
2887        }
2888
2889        let mut unique_bind_group_layouts = FastHashMap::default();
2890
2891        let bind_group_layouts = derived_group_layouts
2892            .into_iter()
2893            .map(|mut bgl_entry_map| {
2894                bgl_entry_map.sort();
2895                match unique_bind_group_layouts.entry(bgl_entry_map) {
2896                    hashbrown::hash_map::Entry::Occupied(v) => Ok(Arc::clone(v.get())),
2897                    hashbrown::hash_map::Entry::Vacant(e) => {
2898                        match self.create_bind_group_layout(
2899                            &None,
2900                            e.key().clone(),
2901                            bgl::Origin::Derived,
2902                        ) {
2903                            Ok(bgl) => {
2904                                e.insert(bgl.clone());
2905                                Ok(bgl)
2906                            }
2907                            Err(e) => Err(e),
2908                        }
2909                    }
2910                }
2911            })
2912            .collect::<Result<Vec<_>, _>>()?;
2913
2914        let layout_desc = binding_model::ResolvedPipelineLayoutDescriptor {
2915            label: None,
2916            bind_group_layouts: Cow::Owned(bind_group_layouts),
2917            push_constant_ranges: Cow::Borrowed(&[]), //TODO?
2918        };
2919
2920        let layout = self.create_pipeline_layout(&layout_desc)?;
2921        Ok(layout)
2922    }
2923
2924    pub(crate) fn create_compute_pipeline(
2925        self: &Arc<Self>,
2926        desc: pipeline::ResolvedComputePipelineDescriptor,
2927    ) -> Result<Arc<pipeline::ComputePipeline>, pipeline::CreateComputePipelineError> {
2928        self.check_is_valid()?;
2929
2930        self.require_downlevel_flags(wgt::DownlevelFlags::COMPUTE_SHADERS)?;
2931
2932        let shader_module = desc.stage.module;
2933
2934        shader_module.same_device(self)?;
2935
2936        let is_auto_layout = desc.layout.is_none();
2937
2938        // Get the pipeline layout from the desc if it is provided.
2939        let pipeline_layout = match desc.layout {
2940            Some(pipeline_layout) => {
2941                pipeline_layout.same_device(self)?;
2942                Some(pipeline_layout)
2943            }
2944            None => None,
2945        };
2946
2947        let mut binding_layout_source = match pipeline_layout {
2948            Some(ref pipeline_layout) => {
2949                validation::BindingLayoutSource::Provided(pipeline_layout.get_binding_maps())
2950            }
2951            None => validation::BindingLayoutSource::new_derived(&self.limits),
2952        };
2953        let mut shader_binding_sizes = FastHashMap::default();
2954        let io = validation::StageIo::default();
2955
2956        let final_entry_point_name;
2957
2958        {
2959            let stage = wgt::ShaderStages::COMPUTE;
2960
2961            final_entry_point_name = shader_module.finalize_entry_point_name(
2962                stage,
2963                desc.stage.entry_point.as_ref().map(|ep| ep.as_ref()),
2964            )?;
2965
2966            if let Some(ref interface) = shader_module.interface {
2967                let _ = interface.check_stage(
2968                    &mut binding_layout_source,
2969                    &mut shader_binding_sizes,
2970                    &final_entry_point_name,
2971                    stage,
2972                    io,
2973                    None,
2974                )?;
2975            }
2976        }
2977
2978        let pipeline_layout = match binding_layout_source {
2979            validation::BindingLayoutSource::Provided(_) => {
2980                drop(binding_layout_source);
2981                pipeline_layout.unwrap()
2982            }
2983            validation::BindingLayoutSource::Derived(entries) => {
2984                self.derive_pipeline_layout(entries)?
2985            }
2986        };
2987
2988        let late_sized_buffer_groups =
2989            Device::make_late_sized_buffer_groups(&shader_binding_sizes, &pipeline_layout);
2990
2991        let cache = match desc.cache {
2992            Some(cache) => {
2993                cache.same_device(self)?;
2994                Some(cache)
2995            }
2996            None => None,
2997        };
2998
2999        let pipeline_desc = hal::ComputePipelineDescriptor {
3000            label: desc.label.to_hal(self.instance_flags),
3001            layout: pipeline_layout.raw(),
3002            stage: hal::ProgrammableStage {
3003                module: shader_module.raw(),
3004                entry_point: final_entry_point_name.as_ref(),
3005                constants: &desc.stage.constants,
3006                zero_initialize_workgroup_memory: desc.stage.zero_initialize_workgroup_memory,
3007            },
3008            cache: cache.as_ref().map(|it| it.raw()),
3009        };
3010
3011        let raw =
3012            unsafe { self.raw().create_compute_pipeline(&pipeline_desc) }.map_err(
3013                |err| match err {
3014                    hal::PipelineError::Device(error) => {
3015                        pipeline::CreateComputePipelineError::Device(self.handle_hal_error(error))
3016                    }
3017                    hal::PipelineError::Linkage(_stages, msg) => {
3018                        pipeline::CreateComputePipelineError::Internal(msg)
3019                    }
3020                    hal::PipelineError::EntryPoint(_stage) => {
3021                        pipeline::CreateComputePipelineError::Internal(
3022                            ENTRYPOINT_FAILURE_ERROR.to_string(),
3023                        )
3024                    }
3025                    hal::PipelineError::PipelineConstants(_stages, msg) => {
3026                        pipeline::CreateComputePipelineError::PipelineConstants(msg)
3027                    }
3028                },
3029            )?;
3030
3031        let pipeline = pipeline::ComputePipeline {
3032            raw: ManuallyDrop::new(raw),
3033            layout: pipeline_layout,
3034            device: self.clone(),
3035            _shader_module: shader_module,
3036            late_sized_buffer_groups,
3037            label: desc.label.to_string(),
3038            tracking_data: TrackingData::new(self.tracker_indices.compute_pipelines.clone()),
3039        };
3040
3041        let pipeline = Arc::new(pipeline);
3042
3043        if is_auto_layout {
3044            for bgl in pipeline.layout.bind_group_layouts.iter() {
3045                // `bind_group_layouts` might contain duplicate entries, so we need to ignore the result.
3046                let _ = bgl
3047                    .exclusive_pipeline
3048                    .set(binding_model::ExclusivePipeline::Compute(Arc::downgrade(
3049                        &pipeline,
3050                    )));
3051            }
3052        }
3053
3054        Ok(pipeline)
3055    }
3056
3057    pub(crate) fn create_render_pipeline(
3058        self: &Arc<Self>,
3059        desc: pipeline::ResolvedRenderPipelineDescriptor,
3060    ) -> Result<Arc<pipeline::RenderPipeline>, pipeline::CreateRenderPipelineError> {
3061        use wgt::TextureFormatFeatureFlags as Tfff;
3062
3063        self.check_is_valid()?;
3064
3065        let mut shader_binding_sizes = FastHashMap::default();
3066
3067        let num_attachments = desc.fragment.as_ref().map(|f| f.targets.len()).unwrap_or(0);
3068        let max_attachments = self.limits.max_color_attachments as usize;
3069        if num_attachments > max_attachments {
3070            return Err(pipeline::CreateRenderPipelineError::ColorAttachment(
3071                command::ColorAttachmentError::TooMany {
3072                    given: num_attachments,
3073                    limit: max_attachments,
3074                },
3075            ));
3076        }
3077
3078        let color_targets = desc
3079            .fragment
3080            .as_ref()
3081            .map_or(&[][..], |fragment| &fragment.targets);
3082        let depth_stencil_state = desc.depth_stencil.as_ref();
3083
3084        {
3085            let cts: ArrayVec<_, { hal::MAX_COLOR_ATTACHMENTS }> =
3086                color_targets.iter().filter_map(|x| x.as_ref()).collect();
3087            if !cts.is_empty() && {
3088                let first = &cts[0];
3089                cts[1..]
3090                    .iter()
3091                    .any(|ct| ct.write_mask != first.write_mask || ct.blend != first.blend)
3092            } {
3093                self.require_downlevel_flags(wgt::DownlevelFlags::INDEPENDENT_BLEND)?;
3094            }
3095        }
3096
3097        let mut io = validation::StageIo::default();
3098        let mut validated_stages = wgt::ShaderStages::empty();
3099
3100        let mut vertex_steps = Vec::with_capacity(desc.vertex.buffers.len());
3101        let mut vertex_buffers = Vec::with_capacity(desc.vertex.buffers.len());
3102        let mut total_attributes = 0;
3103        let mut shader_expects_dual_source_blending = false;
3104        let mut pipeline_expects_dual_source_blending = false;
3105        for (i, vb_state) in desc.vertex.buffers.iter().enumerate() {
3106            // https://gpuweb.github.io/gpuweb/#abstract-opdef-validating-gpuvertexbufferlayout
3107
3108            if vb_state.array_stride > self.limits.max_vertex_buffer_array_stride as u64 {
3109                return Err(pipeline::CreateRenderPipelineError::VertexStrideTooLarge {
3110                    index: i as u32,
3111                    given: vb_state.array_stride as u32,
3112                    limit: self.limits.max_vertex_buffer_array_stride,
3113                });
3114            }
3115            if vb_state.array_stride % wgt::VERTEX_STRIDE_ALIGNMENT != 0 {
3116                return Err(pipeline::CreateRenderPipelineError::UnalignedVertexStride {
3117                    index: i as u32,
3118                    stride: vb_state.array_stride,
3119                });
3120            }
3121
3122            let max_stride = if vb_state.array_stride == 0 {
3123                self.limits.max_vertex_buffer_array_stride as u64
3124            } else {
3125                vb_state.array_stride
3126            };
3127            let mut last_stride = 0;
3128            for attribute in vb_state.attributes.iter() {
3129                let attribute_stride = attribute.offset + attribute.format.size();
3130                if attribute_stride > max_stride {
3131                    return Err(
3132                        pipeline::CreateRenderPipelineError::VertexAttributeStrideTooLarge {
3133                            location: attribute.shader_location,
3134                            given: attribute_stride as u32,
3135                            limit: max_stride as u32,
3136                        },
3137                    );
3138                }
3139
3140                let required_offset_alignment = attribute.format.size().min(4);
3141                if attribute.offset % required_offset_alignment != 0 {
3142                    return Err(
3143                        pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset {
3144                            location: attribute.shader_location,
3145                            offset: attribute.offset,
3146                        },
3147                    );
3148                }
3149
3150                if attribute.shader_location >= self.limits.max_vertex_attributes {
3151                    return Err(
3152                        pipeline::CreateRenderPipelineError::TooManyVertexAttributes {
3153                            given: attribute.shader_location,
3154                            limit: self.limits.max_vertex_attributes,
3155                        },
3156                    );
3157                }
3158
3159                last_stride = last_stride.max(attribute_stride);
3160            }
3161            vertex_steps.push(pipeline::VertexStep {
3162                stride: vb_state.array_stride,
3163                last_stride,
3164                mode: vb_state.step_mode,
3165            });
3166            if vb_state.attributes.is_empty() {
3167                continue;
3168            }
3169            vertex_buffers.push(hal::VertexBufferLayout {
3170                array_stride: vb_state.array_stride,
3171                step_mode: vb_state.step_mode,
3172                attributes: vb_state.attributes.as_ref(),
3173            });
3174
3175            for attribute in vb_state.attributes.iter() {
3176                if attribute.offset >= 0x10000000 {
3177                    return Err(
3178                        pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset {
3179                            location: attribute.shader_location,
3180                            offset: attribute.offset,
3181                        },
3182                    );
3183                }
3184
3185                if let wgt::VertexFormat::Float64
3186                | wgt::VertexFormat::Float64x2
3187                | wgt::VertexFormat::Float64x3
3188                | wgt::VertexFormat::Float64x4 = attribute.format
3189                {
3190                    self.require_features(wgt::Features::VERTEX_ATTRIBUTE_64BIT)?;
3191                }
3192
3193                let previous = io.insert(
3194                    attribute.shader_location,
3195                    validation::InterfaceVar::vertex_attribute(attribute.format),
3196                );
3197
3198                if previous.is_some() {
3199                    return Err(pipeline::CreateRenderPipelineError::ShaderLocationClash(
3200                        attribute.shader_location,
3201                    ));
3202                }
3203            }
3204            total_attributes += vb_state.attributes.len();
3205        }
3206
3207        if vertex_buffers.len() > self.limits.max_vertex_buffers as usize {
3208            return Err(pipeline::CreateRenderPipelineError::TooManyVertexBuffers {
3209                given: vertex_buffers.len() as u32,
3210                limit: self.limits.max_vertex_buffers,
3211            });
3212        }
3213        if total_attributes > self.limits.max_vertex_attributes as usize {
3214            return Err(
3215                pipeline::CreateRenderPipelineError::TooManyVertexAttributes {
3216                    given: total_attributes as u32,
3217                    limit: self.limits.max_vertex_attributes,
3218                },
3219            );
3220        }
3221
3222        if desc.primitive.strip_index_format.is_some() && !desc.primitive.topology.is_strip() {
3223            return Err(
3224                pipeline::CreateRenderPipelineError::StripIndexFormatForNonStripTopology {
3225                    strip_index_format: desc.primitive.strip_index_format,
3226                    topology: desc.primitive.topology,
3227                },
3228            );
3229        }
3230
3231        if desc.primitive.unclipped_depth {
3232            self.require_features(wgt::Features::DEPTH_CLIP_CONTROL)?;
3233        }
3234
3235        if desc.primitive.polygon_mode == wgt::PolygonMode::Line {
3236            self.require_features(wgt::Features::POLYGON_MODE_LINE)?;
3237        }
3238        if desc.primitive.polygon_mode == wgt::PolygonMode::Point {
3239            self.require_features(wgt::Features::POLYGON_MODE_POINT)?;
3240        }
3241
3242        if desc.primitive.conservative {
3243            self.require_features(wgt::Features::CONSERVATIVE_RASTERIZATION)?;
3244        }
3245
3246        if desc.primitive.conservative && desc.primitive.polygon_mode != wgt::PolygonMode::Fill {
3247            return Err(
3248                pipeline::CreateRenderPipelineError::ConservativeRasterizationNonFillPolygonMode,
3249            );
3250        }
3251
3252        let mut target_specified = false;
3253
3254        for (i, cs) in color_targets.iter().enumerate() {
3255            if let Some(cs) = cs.as_ref() {
3256                target_specified = true;
3257                let error = 'error: {
3258                    if cs.write_mask.contains_unknown_bits() {
3259                        break 'error Some(pipeline::ColorStateError::InvalidWriteMask(
3260                            cs.write_mask,
3261                        ));
3262                    }
3263
3264                    let format_features = self.describe_format_features(cs.format)?;
3265                    if !format_features
3266                        .allowed_usages
3267                        .contains(wgt::TextureUsages::RENDER_ATTACHMENT)
3268                    {
3269                        break 'error Some(pipeline::ColorStateError::FormatNotRenderable(
3270                            cs.format,
3271                        ));
3272                    }
3273                    let blendable = format_features.flags.contains(Tfff::BLENDABLE);
3274                    let filterable = format_features.flags.contains(Tfff::FILTERABLE);
3275                    let adapter_specific = self
3276                        .features
3277                        .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES);
3278                    // according to WebGPU specifications the texture needs to be
3279                    // [`TextureFormatFeatureFlags::FILTERABLE`] if blending is set - use
3280                    // [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] to elude
3281                    // this limitation
3282                    if cs.blend.is_some() && (!blendable || (!filterable && !adapter_specific)) {
3283                        break 'error Some(pipeline::ColorStateError::FormatNotBlendable(
3284                            cs.format,
3285                        ));
3286                    }
3287                    if !hal::FormatAspects::from(cs.format).contains(hal::FormatAspects::COLOR) {
3288                        break 'error Some(pipeline::ColorStateError::FormatNotColor(cs.format));
3289                    }
3290
3291                    if desc.multisample.count > 1
3292                        && !format_features
3293                            .flags
3294                            .sample_count_supported(desc.multisample.count)
3295                    {
3296                        break 'error Some(pipeline::ColorStateError::InvalidSampleCount(
3297                            desc.multisample.count,
3298                            cs.format,
3299                            cs.format
3300                                .guaranteed_format_features(self.features)
3301                                .flags
3302                                .supported_sample_counts(),
3303                            self.adapter
3304                                .get_texture_format_features(cs.format)
3305                                .flags
3306                                .supported_sample_counts(),
3307                        ));
3308                    }
3309
3310                    if let Some(blend_mode) = cs.blend {
3311                        for factor in [
3312                            blend_mode.color.src_factor,
3313                            blend_mode.color.dst_factor,
3314                            blend_mode.alpha.src_factor,
3315                            blend_mode.alpha.dst_factor,
3316                        ] {
3317                            if factor.ref_second_blend_source() {
3318                                self.require_features(wgt::Features::DUAL_SOURCE_BLENDING)?;
3319                                if i == 0 {
3320                                    pipeline_expects_dual_source_blending = true;
3321                                    break;
3322                                } else {
3323                                    return Err(pipeline::CreateRenderPipelineError
3324                                        ::BlendFactorOnUnsupportedTarget { factor, target: i as u32 });
3325                                }
3326                            }
3327                        }
3328                    }
3329
3330                    break 'error None;
3331                };
3332                if let Some(e) = error {
3333                    return Err(pipeline::CreateRenderPipelineError::ColorState(i as u8, e));
3334                }
3335            }
3336        }
3337
3338        let limit = self.limits.max_color_attachment_bytes_per_sample;
3339        let formats = color_targets
3340            .iter()
3341            .map(|cs| cs.as_ref().map(|cs| cs.format));
3342        if let Err(total) = validate_color_attachment_bytes_per_sample(formats, limit) {
3343            return Err(pipeline::CreateRenderPipelineError::ColorAttachment(
3344                command::ColorAttachmentError::TooManyBytesPerSample { total, limit },
3345            ));
3346        }
3347
3348        if let Some(ds) = depth_stencil_state {
3349            target_specified = true;
3350            let error = 'error: {
3351                let format_features = self.describe_format_features(ds.format)?;
3352                if !format_features
3353                    .allowed_usages
3354                    .contains(wgt::TextureUsages::RENDER_ATTACHMENT)
3355                {
3356                    break 'error Some(pipeline::DepthStencilStateError::FormatNotRenderable(
3357                        ds.format,
3358                    ));
3359                }
3360
3361                let aspect = hal::FormatAspects::from(ds.format);
3362                if ds.is_depth_enabled() && !aspect.contains(hal::FormatAspects::DEPTH) {
3363                    break 'error Some(pipeline::DepthStencilStateError::FormatNotDepth(ds.format));
3364                }
3365                if ds.stencil.is_enabled() && !aspect.contains(hal::FormatAspects::STENCIL) {
3366                    break 'error Some(pipeline::DepthStencilStateError::FormatNotStencil(
3367                        ds.format,
3368                    ));
3369                }
3370                if desc.multisample.count > 1
3371                    && !format_features
3372                        .flags
3373                        .sample_count_supported(desc.multisample.count)
3374                {
3375                    break 'error Some(pipeline::DepthStencilStateError::InvalidSampleCount(
3376                        desc.multisample.count,
3377                        ds.format,
3378                        ds.format
3379                            .guaranteed_format_features(self.features)
3380                            .flags
3381                            .supported_sample_counts(),
3382                        self.adapter
3383                            .get_texture_format_features(ds.format)
3384                            .flags
3385                            .supported_sample_counts(),
3386                    ));
3387                }
3388
3389                break 'error None;
3390            };
3391            if let Some(e) = error {
3392                return Err(pipeline::CreateRenderPipelineError::DepthStencilState(e));
3393            }
3394
3395            if ds.bias.clamp != 0.0 {
3396                self.require_downlevel_flags(wgt::DownlevelFlags::DEPTH_BIAS_CLAMP)?;
3397            }
3398        }
3399
3400        if !target_specified {
3401            return Err(pipeline::CreateRenderPipelineError::NoTargetSpecified);
3402        }
3403
3404        let is_auto_layout = desc.layout.is_none();
3405
3406        // Get the pipeline layout from the desc if it is provided.
3407        let pipeline_layout = match desc.layout {
3408            Some(pipeline_layout) => {
3409                pipeline_layout.same_device(self)?;
3410                Some(pipeline_layout)
3411            }
3412            None => None,
3413        };
3414
3415        let mut binding_layout_source = match pipeline_layout {
3416            Some(ref pipeline_layout) => {
3417                validation::BindingLayoutSource::Provided(pipeline_layout.get_binding_maps())
3418            }
3419            None => validation::BindingLayoutSource::new_derived(&self.limits),
3420        };
3421
3422        let samples = {
3423            let sc = desc.multisample.count;
3424            if sc == 0 || sc > 32 || !sc.is_power_of_two() {
3425                return Err(pipeline::CreateRenderPipelineError::InvalidSampleCount(sc));
3426            }
3427            sc
3428        };
3429
3430        let vertex_entry_point_name;
3431        let vertex_stage = {
3432            let stage_desc = &desc.vertex.stage;
3433            let stage = wgt::ShaderStages::VERTEX;
3434
3435            let vertex_shader_module = &stage_desc.module;
3436            vertex_shader_module.same_device(self)?;
3437
3438            let stage_err = |error| pipeline::CreateRenderPipelineError::Stage { stage, error };
3439
3440            vertex_entry_point_name = vertex_shader_module
3441                .finalize_entry_point_name(
3442                    stage,
3443                    stage_desc.entry_point.as_ref().map(|ep| ep.as_ref()),
3444                )
3445                .map_err(stage_err)?;
3446
3447            if let Some(ref interface) = vertex_shader_module.interface {
3448                io = interface
3449                    .check_stage(
3450                        &mut binding_layout_source,
3451                        &mut shader_binding_sizes,
3452                        &vertex_entry_point_name,
3453                        stage,
3454                        io,
3455                        desc.depth_stencil.as_ref().map(|d| d.depth_compare),
3456                    )
3457                    .map_err(stage_err)?;
3458                validated_stages |= stage;
3459            }
3460
3461            hal::ProgrammableStage {
3462                module: vertex_shader_module.raw(),
3463                entry_point: &vertex_entry_point_name,
3464                constants: &stage_desc.constants,
3465                zero_initialize_workgroup_memory: stage_desc.zero_initialize_workgroup_memory,
3466            }
3467        };
3468
3469        let fragment_entry_point_name;
3470        let fragment_stage = match desc.fragment {
3471            Some(ref fragment_state) => {
3472                let stage = wgt::ShaderStages::FRAGMENT;
3473
3474                let shader_module = &fragment_state.stage.module;
3475                shader_module.same_device(self)?;
3476
3477                let stage_err = |error| pipeline::CreateRenderPipelineError::Stage { stage, error };
3478
3479                fragment_entry_point_name = shader_module
3480                    .finalize_entry_point_name(
3481                        stage,
3482                        fragment_state
3483                            .stage
3484                            .entry_point
3485                            .as_ref()
3486                            .map(|ep| ep.as_ref()),
3487                    )
3488                    .map_err(stage_err)?;
3489
3490                if validated_stages == wgt::ShaderStages::VERTEX {
3491                    if let Some(ref interface) = shader_module.interface {
3492                        io = interface
3493                            .check_stage(
3494                                &mut binding_layout_source,
3495                                &mut shader_binding_sizes,
3496                                &fragment_entry_point_name,
3497                                stage,
3498                                io,
3499                                desc.depth_stencil.as_ref().map(|d| d.depth_compare),
3500                            )
3501                            .map_err(stage_err)?;
3502                        validated_stages |= stage;
3503                    }
3504                }
3505
3506                if let Some(ref interface) = shader_module.interface {
3507                    shader_expects_dual_source_blending = interface
3508                        .fragment_uses_dual_source_blending(&fragment_entry_point_name)
3509                        .map_err(|error| pipeline::CreateRenderPipelineError::Stage {
3510                            stage,
3511                            error,
3512                        })?;
3513                }
3514
3515                Some(hal::ProgrammableStage {
3516                    module: shader_module.raw(),
3517                    entry_point: &fragment_entry_point_name,
3518                    constants: &fragment_state.stage.constants,
3519                    zero_initialize_workgroup_memory: fragment_state
3520                        .stage
3521                        .zero_initialize_workgroup_memory,
3522                })
3523            }
3524            None => None,
3525        };
3526
3527        if !pipeline_expects_dual_source_blending && shader_expects_dual_source_blending {
3528            return Err(
3529                pipeline::CreateRenderPipelineError::ShaderExpectsPipelineToUseDualSourceBlending,
3530            );
3531        }
3532        if pipeline_expects_dual_source_blending && !shader_expects_dual_source_blending {
3533            return Err(
3534                pipeline::CreateRenderPipelineError::PipelineExpectsShaderToUseDualSourceBlending,
3535            );
3536        }
3537
3538        if validated_stages.contains(wgt::ShaderStages::FRAGMENT) {
3539            for (i, output) in io.iter() {
3540                match color_targets.get(*i as usize) {
3541                    Some(Some(state)) => {
3542                        validation::check_texture_format(state.format, &output.ty).map_err(
3543                            |pipeline| {
3544                                pipeline::CreateRenderPipelineError::ColorState(
3545                                    *i as u8,
3546                                    pipeline::ColorStateError::IncompatibleFormat {
3547                                        pipeline,
3548                                        shader: output.ty,
3549                                    },
3550                                )
3551                            },
3552                        )?;
3553                    }
3554                    _ => {
3555                        log::warn!(
3556                            "The fragment stage {:?} output @location({}) values are ignored",
3557                            fragment_stage
3558                                .as_ref()
3559                                .map_or("", |stage| stage.entry_point),
3560                            i
3561                        );
3562                    }
3563                }
3564            }
3565        }
3566        let last_stage = match desc.fragment {
3567            Some(_) => wgt::ShaderStages::FRAGMENT,
3568            None => wgt::ShaderStages::VERTEX,
3569        };
3570        if is_auto_layout && !validated_stages.contains(last_stage) {
3571            return Err(pipeline::ImplicitLayoutError::ReflectionError(last_stage).into());
3572        }
3573
3574        let pipeline_layout = match binding_layout_source {
3575            validation::BindingLayoutSource::Provided(_) => {
3576                drop(binding_layout_source);
3577                pipeline_layout.unwrap()
3578            }
3579            validation::BindingLayoutSource::Derived(entries) => {
3580                self.derive_pipeline_layout(entries)?
3581            }
3582        };
3583
3584        // Multiview is only supported if the feature is enabled
3585        if desc.multiview.is_some() {
3586            self.require_features(wgt::Features::MULTIVIEW)?;
3587        }
3588
3589        if !self
3590            .downlevel
3591            .flags
3592            .contains(wgt::DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED)
3593        {
3594            for (binding, size) in shader_binding_sizes.iter() {
3595                if size.get() % 16 != 0 {
3596                    return Err(pipeline::CreateRenderPipelineError::UnalignedShader {
3597                        binding: binding.binding,
3598                        group: binding.group,
3599                        size: size.get(),
3600                    });
3601                }
3602            }
3603        }
3604
3605        let late_sized_buffer_groups =
3606            Device::make_late_sized_buffer_groups(&shader_binding_sizes, &pipeline_layout);
3607
3608        let cache = match desc.cache {
3609            Some(cache) => {
3610                cache.same_device(self)?;
3611                Some(cache)
3612            }
3613            None => None,
3614        };
3615
3616        let pipeline_desc = hal::RenderPipelineDescriptor {
3617            label: desc.label.to_hal(self.instance_flags),
3618            layout: pipeline_layout.raw(),
3619            vertex_buffers: &vertex_buffers,
3620            vertex_stage,
3621            primitive: desc.primitive,
3622            depth_stencil: desc.depth_stencil.clone(),
3623            multisample: desc.multisample,
3624            fragment_stage,
3625            color_targets,
3626            multiview: desc.multiview,
3627            cache: cache.as_ref().map(|it| it.raw()),
3628        };
3629        let raw =
3630            unsafe { self.raw().create_render_pipeline(&pipeline_desc) }.map_err(
3631                |err| match err {
3632                    hal::PipelineError::Device(error) => {
3633                        pipeline::CreateRenderPipelineError::Device(self.handle_hal_error(error))
3634                    }
3635                    hal::PipelineError::Linkage(stage, msg) => {
3636                        pipeline::CreateRenderPipelineError::Internal { stage, error: msg }
3637                    }
3638                    hal::PipelineError::EntryPoint(stage) => {
3639                        pipeline::CreateRenderPipelineError::Internal {
3640                            stage: hal::auxil::map_naga_stage(stage),
3641                            error: ENTRYPOINT_FAILURE_ERROR.to_string(),
3642                        }
3643                    }
3644                    hal::PipelineError::PipelineConstants(stage, error) => {
3645                        pipeline::CreateRenderPipelineError::PipelineConstants { stage, error }
3646                    }
3647                },
3648            )?;
3649
3650        let pass_context = RenderPassContext {
3651            attachments: AttachmentData {
3652                colors: color_targets
3653                    .iter()
3654                    .map(|state| state.as_ref().map(|s| s.format))
3655                    .collect(),
3656                resolves: ArrayVec::new(),
3657                depth_stencil: depth_stencil_state.as_ref().map(|state| state.format),
3658            },
3659            sample_count: samples,
3660            multiview: desc.multiview,
3661        };
3662
3663        let mut flags = pipeline::PipelineFlags::empty();
3664        for state in color_targets.iter().filter_map(|s| s.as_ref()) {
3665            if let Some(ref bs) = state.blend {
3666                if bs.color.uses_constant() | bs.alpha.uses_constant() {
3667                    flags |= pipeline::PipelineFlags::BLEND_CONSTANT;
3668                }
3669            }
3670        }
3671        if let Some(ds) = depth_stencil_state.as_ref() {
3672            if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() {
3673                flags |= pipeline::PipelineFlags::STENCIL_REFERENCE;
3674            }
3675            if !ds.is_depth_read_only() {
3676                flags |= pipeline::PipelineFlags::WRITES_DEPTH;
3677            }
3678            if !ds.is_stencil_read_only(desc.primitive.cull_mode) {
3679                flags |= pipeline::PipelineFlags::WRITES_STENCIL;
3680            }
3681        }
3682
3683        let shader_modules = {
3684            let mut shader_modules = ArrayVec::new();
3685            shader_modules.push(desc.vertex.stage.module);
3686            shader_modules.extend(desc.fragment.map(|f| f.stage.module));
3687            shader_modules
3688        };
3689
3690        let pipeline = pipeline::RenderPipeline {
3691            raw: ManuallyDrop::new(raw),
3692            layout: pipeline_layout,
3693            device: self.clone(),
3694            pass_context,
3695            _shader_modules: shader_modules,
3696            flags,
3697            strip_index_format: desc.primitive.strip_index_format,
3698            vertex_steps,
3699            late_sized_buffer_groups,
3700            label: desc.label.to_string(),
3701            tracking_data: TrackingData::new(self.tracker_indices.render_pipelines.clone()),
3702        };
3703
3704        let pipeline = Arc::new(pipeline);
3705
3706        if is_auto_layout {
3707            for bgl in pipeline.layout.bind_group_layouts.iter() {
3708                // `bind_group_layouts` might contain duplicate entries, so we need to ignore the result.
3709                let _ = bgl
3710                    .exclusive_pipeline
3711                    .set(binding_model::ExclusivePipeline::Render(Arc::downgrade(
3712                        &pipeline,
3713                    )));
3714            }
3715        }
3716
3717        Ok(pipeline)
3718    }
3719
3720    /// # Safety
3721    /// The `data` field on `desc` must have previously been returned from [`crate::global::Global::pipeline_cache_get_data`]
3722    pub unsafe fn create_pipeline_cache(
3723        self: &Arc<Self>,
3724        desc: &pipeline::PipelineCacheDescriptor,
3725    ) -> Result<Arc<pipeline::PipelineCache>, pipeline::CreatePipelineCacheError> {
3726        use crate::pipeline_cache;
3727
3728        self.check_is_valid()?;
3729
3730        self.require_features(wgt::Features::PIPELINE_CACHE)?;
3731        let data = if let Some((data, validation_key)) = desc
3732            .data
3733            .as_ref()
3734            .zip(self.raw().pipeline_cache_validation_key())
3735        {
3736            let data = pipeline_cache::validate_pipeline_cache(
3737                data,
3738                &self.adapter.raw.info,
3739                validation_key,
3740            );
3741            match data {
3742                Ok(data) => Some(data),
3743                Err(e) if e.was_avoidable() || !desc.fallback => return Err(e.into()),
3744                // If the error was unavoidable and we are asked to fallback, do so
3745                Err(_) => None,
3746            }
3747        } else {
3748            None
3749        };
3750        let cache_desc = hal::PipelineCacheDescriptor {
3751            data,
3752            label: desc.label.to_hal(self.instance_flags),
3753        };
3754        let raw = match unsafe { self.raw().create_pipeline_cache(&cache_desc) } {
3755            Ok(raw) => raw,
3756            Err(e) => match e {
3757                hal::PipelineCacheError::Device(e) => return Err(self.handle_hal_error(e).into()),
3758            },
3759        };
3760        let cache = pipeline::PipelineCache {
3761            device: self.clone(),
3762            label: desc.label.to_string(),
3763            // This would be none in the error condition, which we don't implement yet
3764            raw: ManuallyDrop::new(raw),
3765        };
3766
3767        let cache = Arc::new(cache);
3768
3769        Ok(cache)
3770    }
3771
3772    fn get_texture_format_features(&self, format: TextureFormat) -> wgt::TextureFormatFeatures {
3773        // Variant of adapter.get_texture_format_features that takes device features into account
3774        use wgt::TextureFormatFeatureFlags as tfsc;
3775        let mut format_features = self.adapter.get_texture_format_features(format);
3776        if (format == TextureFormat::R32Float
3777            || format == TextureFormat::Rg32Float
3778            || format == TextureFormat::Rgba32Float)
3779            && !self.features.contains(wgt::Features::FLOAT32_FILTERABLE)
3780        {
3781            format_features.flags.set(tfsc::FILTERABLE, false);
3782        }
3783        format_features
3784    }
3785
3786    fn describe_format_features(
3787        &self,
3788        format: TextureFormat,
3789    ) -> Result<wgt::TextureFormatFeatures, MissingFeatures> {
3790        self.require_features(format.required_features())?;
3791
3792        let using_device_features = self
3793            .features
3794            .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES);
3795        // If we're running downlevel, we need to manually ask the backend what
3796        // we can use as we can't trust WebGPU.
3797        let downlevel = !self
3798            .downlevel
3799            .flags
3800            .contains(wgt::DownlevelFlags::WEBGPU_TEXTURE_FORMAT_SUPPORT);
3801
3802        if using_device_features || downlevel {
3803            Ok(self.get_texture_format_features(format))
3804        } else {
3805            Ok(format.guaranteed_format_features(self.features))
3806        }
3807    }
3808
3809    #[cfg(feature = "replay")]
3810    pub(crate) fn wait_for_submit(
3811        &self,
3812        submission_index: crate::SubmissionIndex,
3813    ) -> Result<(), DeviceError> {
3814        let fence = self.fence.read();
3815        let last_done_index = unsafe { self.raw().get_fence_value(fence.as_ref()) }
3816            .map_err(|e| self.handle_hal_error(e))?;
3817        if last_done_index < submission_index {
3818            unsafe { self.raw().wait(fence.as_ref(), submission_index, !0) }
3819                .map_err(|e| self.handle_hal_error(e))?;
3820            drop(fence);
3821            if let Some(queue) = self.get_queue() {
3822                let closures = queue.lock_life().triage_submissions(submission_index);
3823                assert!(
3824                    closures.is_empty(),
3825                    "wait_for_submit is not expected to work with closures"
3826                );
3827            }
3828        }
3829        Ok(())
3830    }
3831
3832    pub(crate) fn create_query_set(
3833        self: &Arc<Self>,
3834        desc: &resource::QuerySetDescriptor,
3835    ) -> Result<Arc<QuerySet>, resource::CreateQuerySetError> {
3836        use resource::CreateQuerySetError as Error;
3837
3838        self.check_is_valid()?;
3839
3840        match desc.ty {
3841            wgt::QueryType::Occlusion => {}
3842            wgt::QueryType::Timestamp => {
3843                self.require_features(wgt::Features::TIMESTAMP_QUERY)?;
3844            }
3845            wgt::QueryType::PipelineStatistics(..) => {
3846                self.require_features(wgt::Features::PIPELINE_STATISTICS_QUERY)?;
3847            }
3848        }
3849
3850        if desc.count == 0 {
3851            return Err(Error::ZeroCount);
3852        }
3853
3854        if desc.count > wgt::QUERY_SET_MAX_QUERIES {
3855            return Err(Error::TooManyQueries {
3856                count: desc.count,
3857                maximum: wgt::QUERY_SET_MAX_QUERIES,
3858            });
3859        }
3860
3861        let hal_desc = desc.map_label(|label| label.to_hal(self.instance_flags));
3862
3863        let raw = unsafe { self.raw().create_query_set(&hal_desc) }
3864            .map_err(|e| self.handle_hal_error(e))?;
3865
3866        let query_set = QuerySet {
3867            raw: ManuallyDrop::new(raw),
3868            device: self.clone(),
3869            label: desc.label.to_string(),
3870            tracking_data: TrackingData::new(self.tracker_indices.query_sets.clone()),
3871            desc: desc.map_label(|_| ()),
3872        };
3873
3874        let query_set = Arc::new(query_set);
3875
3876        Ok(query_set)
3877    }
3878
3879    fn lose(&self, message: &str) {
3880        // Follow the steps at https://gpuweb.github.io/gpuweb/#lose-the-device.
3881
3882        // Mark the device explicitly as invalid. This is checked in various
3883        // places to prevent new work from being submitted.
3884        self.valid.store(false, Ordering::Release);
3885
3886        // 1) Resolve the GPUDevice device.lost promise.
3887        if let Some(device_lost_closure) = self.device_lost_closure.lock().take() {
3888            device_lost_closure(DeviceLostReason::Unknown, message.to_string());
3889        }
3890
3891        // 2) Complete any outstanding mapAsync() steps.
3892        // 3) Complete any outstanding onSubmittedWorkDone() steps.
3893
3894        // These parts are passively accomplished by setting valid to false,
3895        // since that will prevent any new work from being added to the queues.
3896        // Future calls to poll_devices will continue to check the work queues
3897        // until they are cleared, and then drop the device.
3898
3899        // Eagerly release GPU resources.
3900        self.release_gpu_resources();
3901    }
3902
3903    pub(crate) fn release_gpu_resources(&self) {
3904        // This is called when the device is lost, which makes every associated
3905        // resource invalid and unusable. This is an opportunity to release all of
3906        // the underlying gpu resources, even though the objects remain visible to
3907        // the user agent. We purge this memory naturally when resources have been
3908        // moved into the appropriate buckets, so this function just needs to
3909        // initiate movement into those buckets, and it can do that by calling
3910        // "destroy" on all the resources we know about.
3911
3912        // During these iterations, we discard all errors. We don't care!
3913        let trackers = self.trackers.lock();
3914        for buffer in trackers.buffers.used_resources() {
3915            if let Some(buffer) = Weak::upgrade(buffer) {
3916                let _ = buffer.destroy();
3917            }
3918        }
3919        for texture in trackers.textures.used_resources() {
3920            if let Some(texture) = Weak::upgrade(texture) {
3921                let _ = texture.destroy();
3922            }
3923        }
3924    }
3925
3926    pub(crate) fn new_usage_scope(&self) -> UsageScope<'_> {
3927        UsageScope::new_pooled(&self.usage_scopes, &self.tracker_indices)
3928    }
3929
3930    pub fn get_hal_counters(&self) -> wgt::HalCounters {
3931        self.raw().get_internal_counters()
3932    }
3933
3934    pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> {
3935        self.raw().generate_allocator_report()
3936    }
3937}
3938
3939crate::impl_resource_type!(Device);
3940crate::impl_labeled!(Device);
3941crate::impl_storage_item!(Device);