wgpu_core/device/
queue.rs

1use alloc::{boxed::Box, sync::Arc, vec, vec::Vec};
2use core::{
3    iter,
4    mem::{self, ManuallyDrop},
5    ptr::NonNull,
6    sync::atomic::Ordering,
7};
8
9use smallvec::SmallVec;
10use thiserror::Error;
11
12use super::{life::LifetimeTracker, Device};
13use crate::device::resource::CommandIndices;
14#[cfg(feature = "trace")]
15use crate::device::trace::Action;
16use crate::scratch::ScratchBuffer;
17use crate::{
18    api_log,
19    command::{
20        extract_texture_selector, validate_linear_texture_data, validate_texture_copy_range,
21        ClearError, CommandAllocator, CommandBuffer, CommandEncoderError, CopySide,
22        TexelCopyTextureInfo, TransferError,
23    },
24    conv,
25    device::{DeviceError, WaitIdleError},
26    get_lowest_common_denom,
27    global::Global,
28    id::{self, QueueId},
29    init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
30    lock::{rank, Mutex, MutexGuard, RwLockWriteGuard},
31    resource::{
32        Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError,
33        DestroyedTexture, Fallible, FlushedStagingBuffer, InvalidResourceError, Labeled,
34        ParentDevice, ResourceErrorIdent, StagingBuffer, Texture, TextureInner, Trackable,
35    },
36    resource_log,
37    snatch::SnatchGuard,
38    track::{self, Tracker, TrackerIndex},
39    FastHashMap, SubmissionIndex,
40};
41
42pub struct Queue {
43    raw: Box<dyn hal::DynQueue>,
44    pub(crate) pending_writes: Mutex<PendingWrites>,
45    life_tracker: Mutex<LifetimeTracker>,
46    // The device needs to be dropped last (`Device.zero_buffer` might be referenced by the encoder in pending writes).
47    pub(crate) device: Arc<Device>,
48}
49
50impl Queue {
51    pub(crate) fn new(
52        device: Arc<Device>,
53        raw: Box<dyn hal::DynQueue>,
54    ) -> Result<Self, DeviceError> {
55        let pending_encoder = device
56            .command_allocator
57            .acquire_encoder(device.raw(), raw.as_ref())
58            .map_err(DeviceError::from_hal);
59
60        let pending_encoder = match pending_encoder {
61            Ok(pending_encoder) => pending_encoder,
62            Err(e) => {
63                return Err(e);
64            }
65        };
66
67        let mut pending_writes = PendingWrites::new(pending_encoder);
68
69        let zero_buffer = device.zero_buffer.as_ref();
70        pending_writes.activate();
71        unsafe {
72            pending_writes
73                .command_encoder
74                .transition_buffers(&[hal::BufferBarrier {
75                    buffer: zero_buffer,
76                    usage: hal::StateTransition {
77                        from: wgt::BufferUses::empty(),
78                        to: wgt::BufferUses::COPY_DST,
79                    },
80                }]);
81            pending_writes
82                .command_encoder
83                .clear_buffer(zero_buffer, 0..super::ZERO_BUFFER_SIZE);
84            pending_writes
85                .command_encoder
86                .transition_buffers(&[hal::BufferBarrier {
87                    buffer: zero_buffer,
88                    usage: hal::StateTransition {
89                        from: wgt::BufferUses::COPY_DST,
90                        to: wgt::BufferUses::COPY_SRC,
91                    },
92                }]);
93        }
94
95        Ok(Queue {
96            raw,
97            device,
98            pending_writes: Mutex::new(rank::QUEUE_PENDING_WRITES, pending_writes),
99            life_tracker: Mutex::new(rank::QUEUE_LIFE_TRACKER, LifetimeTracker::new()),
100        })
101    }
102
103    pub(crate) fn raw(&self) -> &dyn hal::DynQueue {
104        self.raw.as_ref()
105    }
106
107    #[track_caller]
108    pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> {
109        self.life_tracker.lock()
110    }
111
112    pub(crate) fn maintain(
113        &self,
114        submission_index: u64,
115        snatch_guard: &SnatchGuard,
116    ) -> (
117        SmallVec<[SubmittedWorkDoneClosure; 1]>,
118        Vec<super::BufferMapPendingClosure>,
119        bool,
120    ) {
121        let mut life_tracker = self.lock_life();
122        let submission_closures = life_tracker.triage_submissions(submission_index);
123
124        let mapping_closures = life_tracker.handle_mapping(snatch_guard);
125
126        let queue_empty = life_tracker.queue_empty();
127
128        (submission_closures, mapping_closures, queue_empty)
129    }
130}
131
132crate::impl_resource_type!(Queue);
133// TODO: https://github.com/gfx-rs/wgpu/issues/4014
134impl Labeled for Queue {
135    fn label(&self) -> &str {
136        ""
137    }
138}
139crate::impl_parent_device!(Queue);
140crate::impl_storage_item!(Queue);
141
142impl Drop for Queue {
143    fn drop(&mut self) {
144        resource_log!("Drop {}", self.error_ident());
145
146        let last_successful_submission_index = self
147            .device
148            .last_successful_submission_index
149            .load(Ordering::Acquire);
150
151        let fence = self.device.fence.read();
152
153        // Try waiting on the last submission using the following sequence of timeouts
154        let timeouts_in_ms = [100, 200, 400, 800, 1600, 3200];
155
156        for (i, timeout_ms) in timeouts_in_ms.into_iter().enumerate() {
157            let is_last_iter = i == timeouts_in_ms.len() - 1;
158
159            api_log!(
160                "Waiting on last submission. try: {}/{}. timeout: {}ms",
161                i + 1,
162                timeouts_in_ms.len(),
163                timeout_ms
164            );
165
166            let wait_res = unsafe {
167                self.device.raw().wait(
168                    fence.as_ref(),
169                    last_successful_submission_index,
170                    #[cfg(not(target_arch = "wasm32"))]
171                    timeout_ms,
172                    #[cfg(target_arch = "wasm32")]
173                    0, // WebKit and Chromium don't support a non-0 timeout
174                )
175            };
176            // Note: If we don't panic below we are in UB land (destroying resources while they are still in use by the GPU).
177            match wait_res {
178                Ok(true) => break,
179                Ok(false) => {
180                    // It's fine that we timed out on WebGL; GL objects can be deleted early as they
181                    // will be kept around by the driver if GPU work hasn't finished.
182                    // Moreover, the way we emulate read mappings on WebGL allows us to execute map_buffer earlier than on other
183                    // backends since getBufferSubData is synchronous with respect to the other previously enqueued GL commands.
184                    // Relying on this behavior breaks the clean abstraction wgpu-hal tries to maintain and
185                    // we should find ways to improve this. See https://github.com/gfx-rs/wgpu/issues/6538.
186                    #[cfg(target_arch = "wasm32")]
187                    {
188                        break;
189                    }
190                    #[cfg(not(target_arch = "wasm32"))]
191                    {
192                        if is_last_iter {
193                            panic!(
194                                "We timed out while waiting on the last successful submission to complete!"
195                            );
196                        }
197                    }
198                }
199                Err(e) => match e {
200                    hal::DeviceError::OutOfMemory => {
201                        if is_last_iter {
202                            panic!(
203                                "We ran into an OOM error while waiting on the last successful submission to complete!"
204                            );
205                        }
206                    }
207                    hal::DeviceError::Lost => {
208                        self.device.handle_hal_error(e); // will lose the device
209                        break;
210                    }
211                    hal::DeviceError::ResourceCreationFailed => unreachable!(),
212                    hal::DeviceError::Unexpected => {
213                        panic!(
214                            "We ran into an unexpected error while waiting on the last successful submission to complete!"
215                        );
216                    }
217                },
218            }
219        }
220        drop(fence);
221
222        let snatch_guard = self.device.snatchable_lock.read();
223        let (submission_closures, mapping_closures, queue_empty) =
224            self.maintain(last_successful_submission_index, &snatch_guard);
225        drop(snatch_guard);
226
227        assert!(queue_empty);
228
229        let closures = crate::device::UserClosures {
230            mappings: mapping_closures,
231            submissions: submission_closures,
232            device_lost_invocations: SmallVec::new(),
233        };
234
235        closures.fire();
236    }
237}
238
239#[cfg(send_sync)]
240pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + Send + 'static>;
241#[cfg(not(send_sync))]
242pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + 'static>;
243
244/// A texture or buffer to be freed soon.
245///
246/// This is just a tagged raw texture or buffer, generally about to be added to
247/// some other more specific container like:
248///
249/// - `PendingWrites::temp_resources`: resources used by queue writes and
250///   unmaps, waiting to be folded in with the next queue submission
251///
252/// - `ActiveSubmission::temp_resources`: temporary resources used by a queue
253///   submission, to be freed when it completes
254#[derive(Debug)]
255pub enum TempResource {
256    StagingBuffer(FlushedStagingBuffer),
257    ScratchBuffer(ScratchBuffer),
258    DestroyedBuffer(DestroyedBuffer),
259    DestroyedTexture(DestroyedTexture),
260}
261
262/// A series of raw [`CommandBuffer`]s that have been submitted to a
263/// queue, and the [`wgpu_hal::CommandEncoder`] that built them.
264///
265/// [`CommandBuffer`]: hal::Api::CommandBuffer
266/// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
267pub(crate) struct EncoderInFlight {
268    inner: crate::command::CommandEncoder,
269    pub(crate) trackers: Tracker,
270    pub(crate) temp_resources: Vec<TempResource>,
271    /// We only need to keep these resources alive.
272    _indirect_draw_validation_resources: crate::indirect_validation::DrawResources,
273
274    /// These are the buffers that have been tracked by `PendingWrites`.
275    pub(crate) pending_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
276    /// These are the textures that have been tracked by `PendingWrites`.
277    pub(crate) pending_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
278}
279
280/// A private command encoder for writes made directly on the device
281/// or queue.
282///
283/// Operations like `buffer_unmap`, `queue_write_buffer`, and
284/// `queue_write_texture` need to copy data to the GPU. At the hal
285/// level, this must be done by encoding and submitting commands, but
286/// these operations are not associated with any specific wgpu command
287/// buffer.
288///
289/// Instead, `Device::pending_writes` owns one of these values, which
290/// has its own hal command encoder and resource lists. The commands
291/// accumulated here are automatically submitted to the queue the next
292/// time the user submits a wgpu command buffer, ahead of the user's
293/// commands.
294///
295/// Important:
296/// When locking pending_writes be sure that tracker is not locked
297/// and try to lock trackers for the minimum timespan possible
298///
299/// All uses of [`StagingBuffer`]s end up here.
300#[derive(Debug)]
301pub(crate) struct PendingWrites {
302    // The command encoder needs to be destroyed before any other resource in pending writes.
303    pub command_encoder: Box<dyn hal::DynCommandEncoder>,
304
305    /// True if `command_encoder` is in the "recording" state, as
306    /// described in the docs for the [`wgpu_hal::CommandEncoder`]
307    /// trait.
308    ///
309    /// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
310    pub is_recording: bool,
311
312    temp_resources: Vec<TempResource>,
313    dst_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
314    dst_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
315}
316
317impl PendingWrites {
318    pub fn new(command_encoder: Box<dyn hal::DynCommandEncoder>) -> Self {
319        Self {
320            command_encoder,
321            is_recording: false,
322            temp_resources: Vec::new(),
323            dst_buffers: FastHashMap::default(),
324            dst_textures: FastHashMap::default(),
325        }
326    }
327
328    pub fn insert_buffer(&mut self, buffer: &Arc<Buffer>) {
329        self.dst_buffers
330            .insert(buffer.tracker_index(), buffer.clone());
331    }
332
333    pub fn insert_texture(&mut self, texture: &Arc<Texture>) {
334        self.dst_textures
335            .insert(texture.tracker_index(), texture.clone());
336    }
337
338    pub fn contains_buffer(&self, buffer: &Arc<Buffer>) -> bool {
339        self.dst_buffers.contains_key(&buffer.tracker_index())
340    }
341
342    pub fn contains_texture(&self, texture: &Arc<Texture>) -> bool {
343        self.dst_textures.contains_key(&texture.tracker_index())
344    }
345
346    pub fn consume_temp(&mut self, resource: TempResource) {
347        self.temp_resources.push(resource);
348    }
349
350    pub fn consume(&mut self, buffer: FlushedStagingBuffer) {
351        self.temp_resources
352            .push(TempResource::StagingBuffer(buffer));
353    }
354
355    fn pre_submit(
356        &mut self,
357        command_allocator: &CommandAllocator,
358        device: &Arc<Device>,
359        queue: &Queue,
360    ) -> Result<Option<EncoderInFlight>, DeviceError> {
361        if self.is_recording {
362            let pending_buffers = mem::take(&mut self.dst_buffers);
363            let pending_textures = mem::take(&mut self.dst_textures);
364
365            let cmd_buf = unsafe { self.command_encoder.end_encoding() }
366                .map_err(|e| device.handle_hal_error(e))?;
367            self.is_recording = false;
368
369            let new_encoder = command_allocator
370                .acquire_encoder(device.raw(), queue.raw())
371                .map_err(|e| device.handle_hal_error(e))?;
372
373            let encoder = EncoderInFlight {
374                inner: crate::command::CommandEncoder {
375                    raw: ManuallyDrop::new(mem::replace(&mut self.command_encoder, new_encoder)),
376                    list: vec![cmd_buf],
377                    device: device.clone(),
378                    is_open: false,
379                    hal_label: None,
380                },
381                trackers: Tracker::new(),
382                temp_resources: mem::take(&mut self.temp_resources),
383                _indirect_draw_validation_resources: crate::indirect_validation::DrawResources::new(
384                    device.clone(),
385                ),
386                pending_buffers,
387                pending_textures,
388            };
389            Ok(Some(encoder))
390        } else {
391            self.dst_buffers.clear();
392            self.dst_textures.clear();
393            Ok(None)
394        }
395    }
396
397    pub fn activate(&mut self) -> &mut dyn hal::DynCommandEncoder {
398        if !self.is_recording {
399            unsafe {
400                self.command_encoder
401                    .begin_encoding(Some("(wgpu internal) PendingWrites"))
402                    .unwrap();
403            }
404            self.is_recording = true;
405        }
406        self.command_encoder.as_mut()
407    }
408}
409
410impl Drop for PendingWrites {
411    fn drop(&mut self) {
412        unsafe {
413            if self.is_recording {
414                self.command_encoder.discard_encoding();
415            }
416        }
417    }
418}
419
420#[derive(Clone, Debug, Error)]
421#[non_exhaustive]
422pub enum QueueWriteError {
423    #[error(transparent)]
424    Queue(#[from] DeviceError),
425    #[error(transparent)]
426    Transfer(#[from] TransferError),
427    #[error(transparent)]
428    MemoryInitFailure(#[from] ClearError),
429    #[error(transparent)]
430    DestroyedResource(#[from] DestroyedResourceError),
431    #[error(transparent)]
432    InvalidResource(#[from] InvalidResourceError),
433}
434
435#[derive(Clone, Debug, Error)]
436#[non_exhaustive]
437pub enum QueueSubmitError {
438    #[error(transparent)]
439    Queue(#[from] DeviceError),
440    #[error(transparent)]
441    DestroyedResource(#[from] DestroyedResourceError),
442    #[error(transparent)]
443    Unmap(#[from] BufferAccessError),
444    #[error("{0} is still mapped")]
445    BufferStillMapped(ResourceErrorIdent),
446    #[error(transparent)]
447    InvalidResource(#[from] InvalidResourceError),
448    #[error(transparent)]
449    CommandEncoder(#[from] CommandEncoderError),
450    #[error(transparent)]
451    ValidateAsActionsError(#[from] crate::ray_tracing::ValidateAsActionsError),
452}
453
454//TODO: move out common parts of write_xxx.
455
456impl Queue {
457    pub fn write_buffer(
458        &self,
459        buffer: Fallible<Buffer>,
460        buffer_offset: wgt::BufferAddress,
461        data: &[u8],
462    ) -> Result<(), QueueWriteError> {
463        profiling::scope!("Queue::write_buffer");
464        api_log!("Queue::write_buffer");
465
466        let buffer = buffer.get()?;
467
468        let data_size = data.len() as wgt::BufferAddress;
469
470        self.same_device_as(buffer.as_ref())?;
471
472        let data_size = if let Some(data_size) = wgt::BufferSize::new(data_size) {
473            data_size
474        } else {
475            log::trace!("Ignoring write_buffer of size 0");
476            return Ok(());
477        };
478
479        // Platform validation requires that the staging buffer always be
480        // freed, even if an error occurs. All paths from here must call
481        // `device.pending_writes.consume`.
482        let mut staging_buffer = StagingBuffer::new(&self.device, data_size)?;
483        let mut pending_writes = self.pending_writes.lock();
484
485        let staging_buffer = {
486            profiling::scope!("copy");
487            staging_buffer.write(data);
488            staging_buffer.flush()
489        };
490
491        let result = self.write_staging_buffer_impl(
492            &mut pending_writes,
493            &staging_buffer,
494            buffer,
495            buffer_offset,
496        );
497
498        pending_writes.consume(staging_buffer);
499        result
500    }
501
502    pub fn create_staging_buffer(
503        &self,
504        buffer_size: wgt::BufferSize,
505    ) -> Result<(StagingBuffer, NonNull<u8>), QueueWriteError> {
506        profiling::scope!("Queue::create_staging_buffer");
507        resource_log!("Queue::create_staging_buffer");
508
509        let staging_buffer = StagingBuffer::new(&self.device, buffer_size)?;
510        let ptr = unsafe { staging_buffer.ptr() };
511
512        Ok((staging_buffer, ptr))
513    }
514
515    pub fn write_staging_buffer(
516        &self,
517        buffer: Fallible<Buffer>,
518        buffer_offset: wgt::BufferAddress,
519        staging_buffer: StagingBuffer,
520    ) -> Result<(), QueueWriteError> {
521        profiling::scope!("Queue::write_staging_buffer");
522
523        let buffer = buffer.get()?;
524
525        let mut pending_writes = self.pending_writes.lock();
526
527        // At this point, we have taken ownership of the staging_buffer from the
528        // user. Platform validation requires that the staging buffer always
529        // be freed, even if an error occurs. All paths from here must call
530        // `device.pending_writes.consume`.
531        let staging_buffer = staging_buffer.flush();
532
533        let result = self.write_staging_buffer_impl(
534            &mut pending_writes,
535            &staging_buffer,
536            buffer,
537            buffer_offset,
538        );
539
540        pending_writes.consume(staging_buffer);
541        result
542    }
543
544    pub fn validate_write_buffer(
545        &self,
546        buffer: Fallible<Buffer>,
547        buffer_offset: u64,
548        buffer_size: wgt::BufferSize,
549    ) -> Result<(), QueueWriteError> {
550        profiling::scope!("Queue::validate_write_buffer");
551
552        let buffer = buffer.get()?;
553
554        self.validate_write_buffer_impl(&buffer, buffer_offset, buffer_size)?;
555
556        Ok(())
557    }
558
559    fn validate_write_buffer_impl(
560        &self,
561        buffer: &Buffer,
562        buffer_offset: u64,
563        buffer_size: wgt::BufferSize,
564    ) -> Result<(), TransferError> {
565        buffer.check_usage(wgt::BufferUsages::COPY_DST)?;
566        if buffer_size.get() % wgt::COPY_BUFFER_ALIGNMENT != 0 {
567            return Err(TransferError::UnalignedCopySize(buffer_size.get()));
568        }
569        if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
570            return Err(TransferError::UnalignedBufferOffset(buffer_offset));
571        }
572        if buffer_offset + buffer_size.get() > buffer.size {
573            return Err(TransferError::BufferOverrun {
574                start_offset: buffer_offset,
575                end_offset: buffer_offset + buffer_size.get(),
576                buffer_size: buffer.size,
577                side: CopySide::Destination,
578            });
579        }
580
581        Ok(())
582    }
583
584    fn write_staging_buffer_impl(
585        &self,
586        pending_writes: &mut PendingWrites,
587        staging_buffer: &FlushedStagingBuffer,
588        buffer: Arc<Buffer>,
589        buffer_offset: u64,
590    ) -> Result<(), QueueWriteError> {
591        let transition = {
592            let mut trackers = self.device.trackers.lock();
593            trackers
594                .buffers
595                .set_single(&buffer, wgt::BufferUses::COPY_DST)
596        };
597
598        let snatch_guard = self.device.snatchable_lock.read();
599        let dst_raw = buffer.try_raw(&snatch_guard)?;
600
601        self.same_device_as(buffer.as_ref())?;
602
603        self.validate_write_buffer_impl(&buffer, buffer_offset, staging_buffer.size)?;
604
605        let region = hal::BufferCopy {
606            src_offset: 0,
607            dst_offset: buffer_offset,
608            size: staging_buffer.size,
609        };
610        let barriers = iter::once(hal::BufferBarrier {
611            buffer: staging_buffer.raw(),
612            usage: hal::StateTransition {
613                from: wgt::BufferUses::MAP_WRITE,
614                to: wgt::BufferUses::COPY_SRC,
615            },
616        })
617        .chain(transition.map(|pending| pending.into_hal(&buffer, &snatch_guard)))
618        .collect::<Vec<_>>();
619        let encoder = pending_writes.activate();
620        unsafe {
621            encoder.transition_buffers(&barriers);
622            encoder.copy_buffer_to_buffer(staging_buffer.raw(), dst_raw, &[region]);
623        }
624
625        pending_writes.insert_buffer(&buffer);
626
627        // Ensure the overwritten bytes are marked as initialized so
628        // they don't need to be nulled prior to mapping or binding.
629        {
630            buffer
631                .initialization_status
632                .write()
633                .drain(buffer_offset..(buffer_offset + staging_buffer.size.get()));
634        }
635
636        Ok(())
637    }
638
639    pub fn write_texture(
640        &self,
641        destination: wgt::TexelCopyTextureInfo<Fallible<Texture>>,
642        data: &[u8],
643        data_layout: &wgt::TexelCopyBufferLayout,
644        size: &wgt::Extent3d,
645    ) -> Result<(), QueueWriteError> {
646        profiling::scope!("Queue::write_texture");
647        api_log!("Queue::write_texture");
648
649        if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
650            log::trace!("Ignoring write_texture of size 0");
651            return Ok(());
652        }
653
654        let dst = destination.texture.get()?;
655        let destination = wgt::TexelCopyTextureInfo {
656            texture: (),
657            mip_level: destination.mip_level,
658            origin: destination.origin,
659            aspect: destination.aspect,
660        };
661
662        self.same_device_as(dst.as_ref())?;
663
664        dst.check_usage(wgt::TextureUsages::COPY_DST)
665            .map_err(TransferError::MissingTextureUsage)?;
666
667        // Note: Doing the copy range validation early is important because ensures that the
668        // dimensions are not going to cause overflow in other parts of the validation.
669        let (hal_copy_size, array_layer_count) =
670            validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, size)?;
671
672        let (selector, dst_base) = extract_texture_selector(&destination, size, &dst)?;
673
674        if !dst_base.aspect.is_one() {
675            return Err(TransferError::CopyAspectNotOne.into());
676        }
677
678        if !conv::is_valid_copy_dst_texture_format(dst.desc.format, destination.aspect) {
679            return Err(TransferError::CopyToForbiddenTextureFormat {
680                format: dst.desc.format,
681                aspect: destination.aspect,
682            }
683            .into());
684        }
685
686        // Note: `_source_bytes_per_array_layer` is ignored since we
687        // have a staging copy, and it can have a different value.
688        let (required_bytes_in_copy, _source_bytes_per_array_layer) = validate_linear_texture_data(
689            data_layout,
690            dst.desc.format,
691            destination.aspect,
692            data.len() as wgt::BufferAddress,
693            CopySide::Source,
694            size,
695            false,
696        )?;
697
698        if dst.desc.format.is_depth_stencil_format() {
699            self.device
700                .require_downlevel_flags(wgt::DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES)
701                .map_err(TransferError::from)?;
702        }
703
704        let snatch_guard = self.device.snatchable_lock.read();
705
706        let mut pending_writes = self.pending_writes.lock();
707        let encoder = pending_writes.activate();
708
709        // If the copy does not fully cover the layers, we need to initialize to
710        // zero *first* as we don't keep track of partial texture layer inits.
711        //
712        // Strictly speaking we only need to clear the areas of a layer
713        // untouched, but this would get increasingly messy.
714        let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
715            // volume textures don't have a layer range as array volumes aren't supported
716            0..1
717        } else {
718            destination.origin.z..destination.origin.z + size.depth_or_array_layers
719        };
720        let mut dst_initialization_status = dst.initialization_status.write();
721        if dst_initialization_status.mips[destination.mip_level as usize]
722            .check(init_layer_range.clone())
723            .is_some()
724        {
725            if has_copy_partial_init_tracker_coverage(size, destination.mip_level, &dst.desc) {
726                for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
727                    .drain(init_layer_range)
728                    .collect::<Vec<core::ops::Range<u32>>>()
729                {
730                    let mut trackers = self.device.trackers.lock();
731                    crate::command::clear_texture(
732                        &dst,
733                        TextureInitRange {
734                            mip_range: destination.mip_level..(destination.mip_level + 1),
735                            layer_range,
736                        },
737                        encoder,
738                        &mut trackers.textures,
739                        &self.device.alignments,
740                        self.device.zero_buffer.as_ref(),
741                        &snatch_guard,
742                    )
743                    .map_err(QueueWriteError::from)?;
744                }
745            } else {
746                dst_initialization_status.mips[destination.mip_level as usize]
747                    .drain(init_layer_range);
748            }
749        }
750
751        let dst_raw = dst.try_raw(&snatch_guard)?;
752
753        let (block_width, block_height) = dst.desc.format.block_dimensions();
754        let width_in_blocks = size.width / block_width;
755        let height_in_blocks = size.height / block_height;
756
757        let block_size = dst
758            .desc
759            .format
760            .block_copy_size(Some(destination.aspect))
761            .unwrap();
762        let bytes_in_last_row = width_in_blocks * block_size;
763
764        let bytes_per_row = data_layout.bytes_per_row.unwrap_or(bytes_in_last_row);
765        let rows_per_image = data_layout.rows_per_image.unwrap_or(height_in_blocks);
766
767        let bytes_per_row_alignment = get_lowest_common_denom(
768            self.device.alignments.buffer_copy_pitch.get() as u32,
769            block_size,
770        );
771        let stage_bytes_per_row = wgt::math::align_to(bytes_in_last_row, bytes_per_row_alignment);
772
773        // Platform validation requires that the staging buffer always be
774        // freed, even if an error occurs. All paths from here must call
775        // `device.pending_writes.consume`.
776        let staging_buffer = if stage_bytes_per_row == bytes_per_row {
777            profiling::scope!("copy aligned");
778            // Fast path if the data is already being aligned optimally.
779            let stage_size = wgt::BufferSize::new(required_bytes_in_copy).unwrap();
780            let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
781            staging_buffer.write(&data[data_layout.offset as usize..]);
782            staging_buffer
783        } else {
784            profiling::scope!("copy chunked");
785            // Copy row by row into the optimal alignment.
786            let block_rows_in_copy =
787                (size.depth_or_array_layers - 1) * rows_per_image + height_in_blocks;
788            let stage_size =
789                wgt::BufferSize::new(stage_bytes_per_row as u64 * block_rows_in_copy as u64)
790                    .unwrap();
791            let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
792            let copy_bytes_per_row = stage_bytes_per_row.min(bytes_per_row) as usize;
793            for layer in 0..size.depth_or_array_layers {
794                let rows_offset = layer * rows_per_image;
795                for row in rows_offset..rows_offset + height_in_blocks {
796                    let src_offset = data_layout.offset as u32 + row * bytes_per_row;
797                    let dst_offset = row * stage_bytes_per_row;
798                    unsafe {
799                        staging_buffer.write_with_offset(
800                            data,
801                            src_offset as isize,
802                            dst_offset as isize,
803                            copy_bytes_per_row,
804                        )
805                    }
806                }
807            }
808            staging_buffer
809        };
810
811        let staging_buffer = staging_buffer.flush();
812
813        let regions = (0..array_layer_count)
814            .map(|array_layer_offset| {
815                let mut texture_base = dst_base.clone();
816                texture_base.array_layer += array_layer_offset;
817                hal::BufferTextureCopy {
818                    buffer_layout: wgt::TexelCopyBufferLayout {
819                        offset: array_layer_offset as u64
820                            * rows_per_image as u64
821                            * stage_bytes_per_row as u64,
822                        bytes_per_row: Some(stage_bytes_per_row),
823                        rows_per_image: Some(rows_per_image),
824                    },
825                    texture_base,
826                    size: hal_copy_size,
827                }
828            })
829            .collect::<Vec<_>>();
830
831        {
832            let buffer_barrier = hal::BufferBarrier {
833                buffer: staging_buffer.raw(),
834                usage: hal::StateTransition {
835                    from: wgt::BufferUses::MAP_WRITE,
836                    to: wgt::BufferUses::COPY_SRC,
837                },
838            };
839
840            let mut trackers = self.device.trackers.lock();
841            let transition =
842                trackers
843                    .textures
844                    .set_single(&dst, selector, wgt::TextureUses::COPY_DST);
845            let texture_barriers = transition
846                .map(|pending| pending.into_hal(dst_raw))
847                .collect::<Vec<_>>();
848
849            unsafe {
850                encoder.transition_textures(&texture_barriers);
851                encoder.transition_buffers(&[buffer_barrier]);
852                encoder.copy_buffer_to_texture(staging_buffer.raw(), dst_raw, &regions);
853            }
854        }
855
856        pending_writes.consume(staging_buffer);
857        pending_writes.insert_texture(&dst);
858
859        Ok(())
860    }
861
862    #[cfg(webgl)]
863    pub fn copy_external_image_to_texture(
864        &self,
865        source: &wgt::CopyExternalImageSourceInfo,
866        destination: wgt::CopyExternalImageDestInfo<Fallible<Texture>>,
867        size: wgt::Extent3d,
868    ) -> Result<(), QueueWriteError> {
869        profiling::scope!("Queue::copy_external_image_to_texture");
870
871        if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
872            log::trace!("Ignoring write_texture of size 0");
873            return Ok(());
874        }
875
876        let mut needs_flag = false;
877        needs_flag |= matches!(source.source, wgt::ExternalImageSource::OffscreenCanvas(_));
878        needs_flag |= source.origin != wgt::Origin2d::ZERO;
879        needs_flag |= destination.color_space != wgt::PredefinedColorSpace::Srgb;
880        #[allow(clippy::bool_comparison)]
881        if matches!(source.source, wgt::ExternalImageSource::ImageBitmap(_)) {
882            needs_flag |= source.flip_y != false;
883            needs_flag |= destination.premultiplied_alpha != false;
884        }
885
886        if needs_flag {
887            self.device
888                .require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_EXTERNAL_TEXTURE_COPIES)
889                .map_err(TransferError::from)?;
890        }
891
892        let src_width = source.source.width();
893        let src_height = source.source.height();
894
895        let dst = destination.texture.get()?;
896        let premultiplied_alpha = destination.premultiplied_alpha;
897        let destination = wgt::TexelCopyTextureInfo {
898            texture: (),
899            mip_level: destination.mip_level,
900            origin: destination.origin,
901            aspect: destination.aspect,
902        };
903
904        if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) {
905            return Err(
906                TransferError::ExternalCopyToForbiddenTextureFormat(dst.desc.format).into(),
907            );
908        }
909        if dst.desc.dimension != wgt::TextureDimension::D2 {
910            return Err(TransferError::InvalidDimensionExternal.into());
911        }
912        dst.check_usage(wgt::TextureUsages::COPY_DST | wgt::TextureUsages::RENDER_ATTACHMENT)
913            .map_err(TransferError::MissingTextureUsage)?;
914        if dst.desc.sample_count != 1 {
915            return Err(TransferError::InvalidSampleCount {
916                sample_count: dst.desc.sample_count,
917            }
918            .into());
919        }
920
921        if source.origin.x + size.width > src_width {
922            return Err(TransferError::TextureOverrun {
923                start_offset: source.origin.x,
924                end_offset: source.origin.x + size.width,
925                texture_size: src_width,
926                dimension: crate::resource::TextureErrorDimension::X,
927                side: CopySide::Source,
928            }
929            .into());
930        }
931        if source.origin.y + size.height > src_height {
932            return Err(TransferError::TextureOverrun {
933                start_offset: source.origin.y,
934                end_offset: source.origin.y + size.height,
935                texture_size: src_height,
936                dimension: crate::resource::TextureErrorDimension::Y,
937                side: CopySide::Source,
938            }
939            .into());
940        }
941        if size.depth_or_array_layers != 1 {
942            return Err(TransferError::TextureOverrun {
943                start_offset: 0,
944                end_offset: size.depth_or_array_layers,
945                texture_size: 1,
946                dimension: crate::resource::TextureErrorDimension::Z,
947                side: CopySide::Source,
948            }
949            .into());
950        }
951
952        // Note: Doing the copy range validation early is important because ensures that the
953        // dimensions are not going to cause overflow in other parts of the validation.
954        let (hal_copy_size, _) =
955            validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, &size)?;
956
957        let (selector, dst_base) = extract_texture_selector(&destination, &size, &dst)?;
958
959        let mut pending_writes = self.pending_writes.lock();
960        let encoder = pending_writes.activate();
961
962        // If the copy does not fully cover the layers, we need to initialize to
963        // zero *first* as we don't keep track of partial texture layer inits.
964        //
965        // Strictly speaking we only need to clear the areas of a layer
966        // untouched, but this would get increasingly messy.
967        let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
968            // volume textures don't have a layer range as array volumes aren't supported
969            0..1
970        } else {
971            destination.origin.z..destination.origin.z + size.depth_or_array_layers
972        };
973        let mut dst_initialization_status = dst.initialization_status.write();
974        if dst_initialization_status.mips[destination.mip_level as usize]
975            .check(init_layer_range.clone())
976            .is_some()
977        {
978            if has_copy_partial_init_tracker_coverage(&size, destination.mip_level, &dst.desc) {
979                for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
980                    .drain(init_layer_range)
981                    .collect::<Vec<core::ops::Range<u32>>>()
982                {
983                    let mut trackers = self.device.trackers.lock();
984                    crate::command::clear_texture(
985                        &dst,
986                        TextureInitRange {
987                            mip_range: destination.mip_level..(destination.mip_level + 1),
988                            layer_range,
989                        },
990                        encoder,
991                        &mut trackers.textures,
992                        &self.device.alignments,
993                        self.device.zero_buffer.as_ref(),
994                        &self.device.snatchable_lock.read(),
995                    )
996                    .map_err(QueueWriteError::from)?;
997                }
998            } else {
999                dst_initialization_status.mips[destination.mip_level as usize]
1000                    .drain(init_layer_range);
1001            }
1002        }
1003
1004        let snatch_guard = self.device.snatchable_lock.read();
1005        let dst_raw = dst.try_raw(&snatch_guard)?;
1006
1007        let regions = hal::TextureCopy {
1008            src_base: hal::TextureCopyBase {
1009                mip_level: 0,
1010                array_layer: 0,
1011                origin: source.origin.to_3d(0),
1012                aspect: hal::FormatAspects::COLOR,
1013            },
1014            dst_base,
1015            size: hal_copy_size,
1016        };
1017
1018        let mut trackers = self.device.trackers.lock();
1019        let transitions = trackers
1020            .textures
1021            .set_single(&dst, selector, wgt::TextureUses::COPY_DST);
1022
1023        // `copy_external_image_to_texture` is exclusive to the WebGL backend.
1024        // Don't go through the `DynCommandEncoder` abstraction and directly to the WebGL backend.
1025        let encoder_webgl = encoder
1026            .as_any_mut()
1027            .downcast_mut::<hal::gles::CommandEncoder>()
1028            .unwrap();
1029        let dst_raw_webgl = dst_raw
1030            .as_any()
1031            .downcast_ref::<hal::gles::Texture>()
1032            .unwrap();
1033        let transitions_webgl = transitions.map(|pending| {
1034            let dyn_transition = pending.into_hal(dst_raw);
1035            hal::TextureBarrier {
1036                texture: dst_raw_webgl,
1037                range: dyn_transition.range,
1038                usage: dyn_transition.usage,
1039            }
1040        });
1041
1042        use hal::CommandEncoder as _;
1043        unsafe {
1044            encoder_webgl.transition_textures(transitions_webgl);
1045            encoder_webgl.copy_external_image_to_texture(
1046                source,
1047                dst_raw_webgl,
1048                premultiplied_alpha,
1049                iter::once(regions),
1050            );
1051        }
1052
1053        Ok(())
1054    }
1055
1056    pub fn submit(
1057        &self,
1058        command_buffers: &[Arc<CommandBuffer>],
1059    ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1060        profiling::scope!("Queue::submit");
1061        api_log!("Queue::submit");
1062
1063        let submit_index;
1064
1065        let res = 'error: {
1066            let snatch_guard = self.device.snatchable_lock.read();
1067
1068            // Fence lock must be acquired after the snatch lock everywhere to avoid deadlocks.
1069            let mut fence = self.device.fence.write();
1070
1071            let mut command_index_guard = self.device.command_indices.write();
1072            command_index_guard.active_submission_index += 1;
1073            submit_index = command_index_guard.active_submission_index;
1074            let mut active_executions = Vec::new();
1075
1076            let mut used_surface_textures = track::TextureUsageScope::default();
1077
1078            // Use a hashmap here to deduplicate the surface textures that are used in the command buffers.
1079            // This avoids vulkan deadlocking from the same surface texture being submitted multiple times.
1080            let mut submit_surface_textures_owned = FastHashMap::default();
1081
1082            {
1083                if !command_buffers.is_empty() {
1084                    profiling::scope!("prepare");
1085
1086                    let mut first_error = None;
1087
1088                    //TODO: if multiple command buffers are submitted, we can re-use the last
1089                    // native command buffer of the previous chain instead of always creating
1090                    // a temporary one, since the chains are not finished.
1091
1092                    // finish all the command buffers first
1093                    for command_buffer in command_buffers {
1094                        profiling::scope!("process command buffer");
1095
1096                        // we reset the used surface textures every time we use
1097                        // it, so make sure to set_size on it.
1098                        used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1099
1100                        // Note that we are required to invalidate all command buffers in both the success and failure paths.
1101                        // This is why we `continue` and don't early return via `?`.
1102                        #[allow(unused_mut)]
1103                        let mut cmd_buf_data = command_buffer.take_finished();
1104
1105                        #[cfg(feature = "trace")]
1106                        if let Some(ref mut trace) = *self.device.trace.lock() {
1107                            if let Ok(ref mut cmd_buf_data) = cmd_buf_data {
1108                                trace.add(Action::Submit(
1109                                    submit_index,
1110                                    cmd_buf_data.commands.take().unwrap(),
1111                                ));
1112                            }
1113                        }
1114
1115                        if first_error.is_some() {
1116                            continue;
1117                        }
1118
1119                        let mut baked = match cmd_buf_data {
1120                            Ok(cmd_buf_data) => {
1121                                let res = validate_command_buffer(
1122                                    command_buffer,
1123                                    self,
1124                                    &cmd_buf_data,
1125                                    &snatch_guard,
1126                                    &mut submit_surface_textures_owned,
1127                                    &mut used_surface_textures,
1128                                    &mut command_index_guard,
1129                                );
1130                                if let Err(err) = res {
1131                                    first_error.get_or_insert(err);
1132                                    continue;
1133                                }
1134                                cmd_buf_data.into_baked_commands()
1135                            }
1136                            Err(err) => {
1137                                first_error.get_or_insert(err.into());
1138                                continue;
1139                            }
1140                        };
1141
1142                        // execute resource transitions
1143                        if let Err(e) = baked.encoder.open_pass(Some("(wgpu internal) Transit")) {
1144                            break 'error Err(e.into());
1145                        }
1146
1147                        //Note: locking the trackers has to be done after the storages
1148                        let mut trackers = self.device.trackers.lock();
1149                        if let Err(e) = baked.initialize_buffer_memory(&mut trackers, &snatch_guard)
1150                        {
1151                            break 'error Err(e.into());
1152                        }
1153                        if let Err(e) = baked.initialize_texture_memory(
1154                            &mut trackers,
1155                            &self.device,
1156                            &snatch_guard,
1157                        ) {
1158                            break 'error Err(e.into());
1159                        }
1160
1161                        //Note: stateless trackers are not merged:
1162                        // device already knows these resources exist.
1163                        CommandBuffer::insert_barriers_from_device_tracker(
1164                            baked.encoder.raw.as_mut(),
1165                            &mut trackers,
1166                            &baked.trackers,
1167                            &snatch_guard,
1168                        );
1169
1170                        if let Err(e) = baked.encoder.close_and_push_front() {
1171                            break 'error Err(e.into());
1172                        }
1173
1174                        // Transition surface textures into `Present` state.
1175                        // Note: we could technically do it after all of the command buffers,
1176                        // but here we have a command encoder by hand, so it's easier to use it.
1177                        if !used_surface_textures.is_empty() {
1178                            if let Err(e) = baked.encoder.open_pass(Some("(wgpu internal) Present"))
1179                            {
1180                                break 'error Err(e.into());
1181                            }
1182                            let texture_barriers = trackers
1183                                .textures
1184                                .set_from_usage_scope_and_drain_transitions(
1185                                    &used_surface_textures,
1186                                    &snatch_guard,
1187                                )
1188                                .collect::<Vec<_>>();
1189                            unsafe {
1190                                baked.encoder.raw.transition_textures(&texture_barriers);
1191                            };
1192                            if let Err(e) = baked.encoder.close() {
1193                                break 'error Err(e.into());
1194                            }
1195                            used_surface_textures = track::TextureUsageScope::default();
1196                        }
1197
1198                        // done
1199                        active_executions.push(EncoderInFlight {
1200                            inner: baked.encoder,
1201                            trackers: baked.trackers,
1202                            temp_resources: baked.temp_resources,
1203                            _indirect_draw_validation_resources: baked
1204                                .indirect_draw_validation_resources,
1205                            pending_buffers: FastHashMap::default(),
1206                            pending_textures: FastHashMap::default(),
1207                        });
1208                    }
1209
1210                    if let Some(first_error) = first_error {
1211                        break 'error Err(first_error);
1212                    }
1213                }
1214            }
1215
1216            let mut pending_writes = self.pending_writes.lock();
1217
1218            {
1219                used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1220                for texture in pending_writes.dst_textures.values() {
1221                    match texture.try_inner(&snatch_guard) {
1222                        Ok(TextureInner::Native { .. }) => {}
1223                        Ok(TextureInner::Surface { .. }) => {
1224                            // Compare the Arcs by pointer as Textures don't implement Eq
1225                            submit_surface_textures_owned
1226                                .insert(Arc::as_ptr(texture), texture.clone());
1227
1228                            unsafe {
1229                                used_surface_textures
1230                                    .merge_single(texture, None, wgt::TextureUses::PRESENT)
1231                                    .unwrap()
1232                            };
1233                        }
1234                        Err(e) => break 'error Err(e.into()),
1235                    }
1236                }
1237
1238                if !used_surface_textures.is_empty() {
1239                    let mut trackers = self.device.trackers.lock();
1240
1241                    let texture_barriers = trackers
1242                        .textures
1243                        .set_from_usage_scope_and_drain_transitions(
1244                            &used_surface_textures,
1245                            &snatch_guard,
1246                        )
1247                        .collect::<Vec<_>>();
1248                    unsafe {
1249                        pending_writes
1250                            .command_encoder
1251                            .transition_textures(&texture_barriers);
1252                    };
1253                }
1254            }
1255
1256            match pending_writes.pre_submit(&self.device.command_allocator, &self.device, self) {
1257                Ok(Some(pending_execution)) => {
1258                    active_executions.insert(0, pending_execution);
1259                }
1260                Ok(None) => {}
1261                Err(e) => break 'error Err(e.into()),
1262            }
1263            let hal_command_buffers = active_executions
1264                .iter()
1265                .flat_map(|e| e.inner.list.iter().map(|b| b.as_ref()))
1266                .collect::<Vec<_>>();
1267
1268            {
1269                let mut submit_surface_textures =
1270                    SmallVec::<[&dyn hal::DynSurfaceTexture; 2]>::with_capacity(
1271                        submit_surface_textures_owned.len(),
1272                    );
1273
1274                for texture in submit_surface_textures_owned.values() {
1275                    let raw = match texture.inner.get(&snatch_guard) {
1276                        Some(TextureInner::Surface { raw, .. }) => raw.as_ref(),
1277                        _ => unreachable!(),
1278                    };
1279                    submit_surface_textures.push(raw);
1280                }
1281
1282                if let Err(e) = unsafe {
1283                    self.raw().submit(
1284                        &hal_command_buffers,
1285                        &submit_surface_textures,
1286                        (fence.as_mut(), submit_index),
1287                    )
1288                }
1289                .map_err(|e| self.device.handle_hal_error(e))
1290                {
1291                    break 'error Err(e.into());
1292                }
1293
1294                drop(command_index_guard);
1295
1296                // Advance the successful submission index.
1297                self.device
1298                    .last_successful_submission_index
1299                    .fetch_max(submit_index, Ordering::SeqCst);
1300            }
1301
1302            profiling::scope!("cleanup");
1303
1304            // this will register the new submission to the life time tracker
1305            self.lock_life()
1306                .track_submission(submit_index, active_executions);
1307            drop(pending_writes);
1308
1309            // This will schedule destruction of all resources that are no longer needed
1310            // by the user but used in the command stream, among other things.
1311            let fence_guard = RwLockWriteGuard::downgrade(fence);
1312            let (closures, result) =
1313                self.device
1314                    .maintain(fence_guard, wgt::PollType::Poll, snatch_guard);
1315            match result {
1316                Ok(status) => {
1317                    debug_assert!(matches!(
1318                        status,
1319                        wgt::PollStatus::QueueEmpty | wgt::PollStatus::Poll
1320                    ));
1321                }
1322                Err(WaitIdleError::Device(err)) => break 'error Err(QueueSubmitError::Queue(err)),
1323                Err(WaitIdleError::WrongSubmissionIndex(..)) => {
1324                    unreachable!("Cannot get WrongSubmissionIndex from Poll")
1325                }
1326                Err(WaitIdleError::Timeout) => unreachable!("Cannot get Timeout from Poll"),
1327            };
1328
1329            Ok(closures)
1330        };
1331
1332        let callbacks = match res {
1333            Ok(ok) => ok,
1334            Err(e) => return Err((submit_index, e)),
1335        };
1336
1337        // the closures should execute with nothing locked!
1338        callbacks.fire();
1339
1340        api_log!("Queue::submit returned submit index {submit_index}");
1341
1342        Ok(submit_index)
1343    }
1344
1345    pub fn get_timestamp_period(&self) -> f32 {
1346        unsafe { self.raw().get_timestamp_period() }
1347    }
1348
1349    /// `closure` is guaranteed to be called.
1350    pub fn on_submitted_work_done(
1351        &self,
1352        closure: SubmittedWorkDoneClosure,
1353    ) -> Option<SubmissionIndex> {
1354        api_log!("Queue::on_submitted_work_done");
1355        //TODO: flush pending writes
1356        self.lock_life().add_work_done_closure(closure)
1357    }
1358}
1359
1360impl Global {
1361    pub fn queue_write_buffer(
1362        &self,
1363        queue_id: QueueId,
1364        buffer_id: id::BufferId,
1365        buffer_offset: wgt::BufferAddress,
1366        data: &[u8],
1367    ) -> Result<(), QueueWriteError> {
1368        let queue = self.hub.queues.get(queue_id);
1369
1370        #[cfg(feature = "trace")]
1371        if let Some(ref mut trace) = *queue.device.trace.lock() {
1372            let data_path = trace.make_binary("bin", data);
1373            trace.add(Action::WriteBuffer {
1374                id: buffer_id,
1375                data: data_path,
1376                range: buffer_offset..buffer_offset + data.len() as u64,
1377                queued: true,
1378            });
1379        }
1380
1381        let buffer = self.hub.buffers.get(buffer_id);
1382        queue.write_buffer(buffer, buffer_offset, data)
1383    }
1384
1385    pub fn queue_create_staging_buffer(
1386        &self,
1387        queue_id: QueueId,
1388        buffer_size: wgt::BufferSize,
1389        id_in: Option<id::StagingBufferId>,
1390    ) -> Result<(id::StagingBufferId, NonNull<u8>), QueueWriteError> {
1391        let queue = self.hub.queues.get(queue_id);
1392        let (staging_buffer, ptr) = queue.create_staging_buffer(buffer_size)?;
1393
1394        let fid = self.hub.staging_buffers.prepare(id_in);
1395        let id = fid.assign(staging_buffer);
1396
1397        Ok((id, ptr))
1398    }
1399
1400    pub fn queue_write_staging_buffer(
1401        &self,
1402        queue_id: QueueId,
1403        buffer_id: id::BufferId,
1404        buffer_offset: wgt::BufferAddress,
1405        staging_buffer_id: id::StagingBufferId,
1406    ) -> Result<(), QueueWriteError> {
1407        let queue = self.hub.queues.get(queue_id);
1408        let buffer = self.hub.buffers.get(buffer_id);
1409        let staging_buffer = self.hub.staging_buffers.remove(staging_buffer_id);
1410        queue.write_staging_buffer(buffer, buffer_offset, staging_buffer)
1411    }
1412
1413    pub fn queue_validate_write_buffer(
1414        &self,
1415        queue_id: QueueId,
1416        buffer_id: id::BufferId,
1417        buffer_offset: u64,
1418        buffer_size: wgt::BufferSize,
1419    ) -> Result<(), QueueWriteError> {
1420        let queue = self.hub.queues.get(queue_id);
1421        let buffer = self.hub.buffers.get(buffer_id);
1422        queue.validate_write_buffer(buffer, buffer_offset, buffer_size)
1423    }
1424
1425    pub fn queue_write_texture(
1426        &self,
1427        queue_id: QueueId,
1428        destination: &TexelCopyTextureInfo,
1429        data: &[u8],
1430        data_layout: &wgt::TexelCopyBufferLayout,
1431        size: &wgt::Extent3d,
1432    ) -> Result<(), QueueWriteError> {
1433        let queue = self.hub.queues.get(queue_id);
1434
1435        #[cfg(feature = "trace")]
1436        if let Some(ref mut trace) = *queue.device.trace.lock() {
1437            let data_path = trace.make_binary("bin", data);
1438            trace.add(Action::WriteTexture {
1439                to: *destination,
1440                data: data_path,
1441                layout: *data_layout,
1442                size: *size,
1443            });
1444        }
1445
1446        let destination = wgt::TexelCopyTextureInfo {
1447            texture: self.hub.textures.get(destination.texture),
1448            mip_level: destination.mip_level,
1449            origin: destination.origin,
1450            aspect: destination.aspect,
1451        };
1452        queue.write_texture(destination, data, data_layout, size)
1453    }
1454
1455    #[cfg(webgl)]
1456    pub fn queue_copy_external_image_to_texture(
1457        &self,
1458        queue_id: QueueId,
1459        source: &wgt::CopyExternalImageSourceInfo,
1460        destination: crate::command::CopyExternalImageDestInfo,
1461        size: wgt::Extent3d,
1462    ) -> Result<(), QueueWriteError> {
1463        let queue = self.hub.queues.get(queue_id);
1464        let destination = wgt::CopyExternalImageDestInfo {
1465            texture: self.hub.textures.get(destination.texture),
1466            mip_level: destination.mip_level,
1467            origin: destination.origin,
1468            aspect: destination.aspect,
1469            color_space: destination.color_space,
1470            premultiplied_alpha: destination.premultiplied_alpha,
1471        };
1472        queue.copy_external_image_to_texture(source, destination, size)
1473    }
1474
1475    pub fn queue_submit(
1476        &self,
1477        queue_id: QueueId,
1478        command_buffer_ids: &[id::CommandBufferId],
1479    ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1480        let queue = self.hub.queues.get(queue_id);
1481        let command_buffer_guard = self.hub.command_buffers.read();
1482        let command_buffers = command_buffer_ids
1483            .iter()
1484            .map(|id| command_buffer_guard.get(*id))
1485            .collect::<Vec<_>>();
1486        drop(command_buffer_guard);
1487        queue.submit(&command_buffers)
1488    }
1489
1490    pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> f32 {
1491        let queue = self.hub.queues.get(queue_id);
1492
1493        if queue.device.timestamp_normalizer.get().unwrap().enabled() {
1494            return 1.0;
1495        }
1496
1497        queue.get_timestamp_period()
1498    }
1499
1500    pub fn queue_on_submitted_work_done(
1501        &self,
1502        queue_id: QueueId,
1503        closure: SubmittedWorkDoneClosure,
1504    ) -> SubmissionIndex {
1505        api_log!("Queue::on_submitted_work_done {queue_id:?}");
1506
1507        //TODO: flush pending writes
1508        let queue = self.hub.queues.get(queue_id);
1509        let result = queue.on_submitted_work_done(closure);
1510        result.unwrap_or(0) // '0' means no wait is necessary
1511    }
1512}
1513
1514fn validate_command_buffer(
1515    command_buffer: &CommandBuffer,
1516    queue: &Queue,
1517    cmd_buf_data: &crate::command::CommandBufferMutable,
1518    snatch_guard: &SnatchGuard,
1519    submit_surface_textures_owned: &mut FastHashMap<*const Texture, Arc<Texture>>,
1520    used_surface_textures: &mut track::TextureUsageScope,
1521    command_index_guard: &mut RwLockWriteGuard<CommandIndices>,
1522) -> Result<(), QueueSubmitError> {
1523    command_buffer.same_device_as(queue)?;
1524
1525    {
1526        profiling::scope!("check resource state");
1527
1528        {
1529            profiling::scope!("buffers");
1530            for buffer in cmd_buf_data.trackers.buffers.used_resources() {
1531                buffer.check_destroyed(snatch_guard)?;
1532
1533                match *buffer.map_state.lock() {
1534                    BufferMapState::Idle => (),
1535                    _ => return Err(QueueSubmitError::BufferStillMapped(buffer.error_ident())),
1536                }
1537            }
1538        }
1539        {
1540            profiling::scope!("textures");
1541            for texture in cmd_buf_data.trackers.textures.used_resources() {
1542                let should_extend = match texture.try_inner(snatch_guard)? {
1543                    TextureInner::Native { .. } => false,
1544                    TextureInner::Surface { .. } => {
1545                        // Compare the Arcs by pointer as Textures don't implement Eq.
1546                        submit_surface_textures_owned.insert(Arc::as_ptr(texture), texture.clone());
1547
1548                        true
1549                    }
1550                };
1551                if should_extend {
1552                    unsafe {
1553                        used_surface_textures
1554                            .merge_single(texture, None, wgt::TextureUses::PRESENT)
1555                            .unwrap();
1556                    };
1557                }
1558            }
1559        }
1560
1561        if let Err(e) =
1562            cmd_buf_data.validate_acceleration_structure_actions(snatch_guard, command_index_guard)
1563        {
1564            return Err(e.into());
1565        }
1566    }
1567    Ok(())
1568}