wgpu_core/device/
queue.rs

1use alloc::{boxed::Box, sync::Arc, vec, vec::Vec};
2use core::{
3    iter,
4    mem::{self, ManuallyDrop},
5    ptr::NonNull,
6    sync::atomic::Ordering,
7};
8
9use smallvec::SmallVec;
10use thiserror::Error;
11
12use super::{life::LifetimeTracker, Device};
13use crate::device::resource::CommandIndices;
14#[cfg(feature = "trace")]
15use crate::device::trace::Action;
16use crate::scratch::ScratchBuffer;
17use crate::{
18    api_log,
19    command::{
20        extract_texture_selector, validate_linear_texture_data, validate_texture_copy_range,
21        ClearError, CommandAllocator, CommandBuffer, CommandEncoderError, CopySide,
22        TexelCopyTextureInfo, TransferError,
23    },
24    conv,
25    device::{DeviceError, WaitIdleError},
26    get_lowest_common_denom,
27    global::Global,
28    id::{self, QueueId},
29    init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
30    lock::{rank, Mutex, MutexGuard, RwLockWriteGuard},
31    resource::{
32        Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError,
33        DestroyedTexture, Fallible, FlushedStagingBuffer, InvalidResourceError, Labeled,
34        ParentDevice, ResourceErrorIdent, StagingBuffer, Texture, TextureInner, Trackable,
35    },
36    resource_log,
37    snatch::SnatchGuard,
38    track::{self, Tracker, TrackerIndex},
39    FastHashMap, SubmissionIndex,
40};
41
42pub struct Queue {
43    raw: Box<dyn hal::DynQueue>,
44    pub(crate) pending_writes: Mutex<PendingWrites>,
45    life_tracker: Mutex<LifetimeTracker>,
46    // The device needs to be dropped last (`Device.zero_buffer` might be referenced by the encoder in pending writes).
47    pub(crate) device: Arc<Device>,
48}
49
50impl Queue {
51    pub(crate) fn new(
52        device: Arc<Device>,
53        raw: Box<dyn hal::DynQueue>,
54    ) -> Result<Self, DeviceError> {
55        let pending_encoder = device
56            .command_allocator
57            .acquire_encoder(device.raw(), raw.as_ref())
58            .map_err(DeviceError::from_hal);
59
60        let pending_encoder = match pending_encoder {
61            Ok(pending_encoder) => pending_encoder,
62            Err(e) => {
63                return Err(e);
64            }
65        };
66
67        let mut pending_writes = PendingWrites::new(pending_encoder);
68
69        let zero_buffer = device.zero_buffer.as_ref();
70        pending_writes.activate();
71        unsafe {
72            pending_writes
73                .command_encoder
74                .transition_buffers(&[hal::BufferBarrier {
75                    buffer: zero_buffer,
76                    usage: hal::StateTransition {
77                        from: wgt::BufferUses::empty(),
78                        to: wgt::BufferUses::COPY_DST,
79                    },
80                }]);
81            pending_writes
82                .command_encoder
83                .clear_buffer(zero_buffer, 0..super::ZERO_BUFFER_SIZE);
84            pending_writes
85                .command_encoder
86                .transition_buffers(&[hal::BufferBarrier {
87                    buffer: zero_buffer,
88                    usage: hal::StateTransition {
89                        from: wgt::BufferUses::COPY_DST,
90                        to: wgt::BufferUses::COPY_SRC,
91                    },
92                }]);
93        }
94
95        Ok(Queue {
96            raw,
97            device,
98            pending_writes: Mutex::new(rank::QUEUE_PENDING_WRITES, pending_writes),
99            life_tracker: Mutex::new(rank::QUEUE_LIFE_TRACKER, LifetimeTracker::new()),
100        })
101    }
102
103    pub(crate) fn raw(&self) -> &dyn hal::DynQueue {
104        self.raw.as_ref()
105    }
106
107    #[track_caller]
108    pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> {
109        self.life_tracker.lock()
110    }
111
112    pub(crate) fn maintain(
113        &self,
114        submission_index: u64,
115        snatch_guard: &SnatchGuard,
116    ) -> (
117        SmallVec<[SubmittedWorkDoneClosure; 1]>,
118        Vec<super::BufferMapPendingClosure>,
119        bool,
120    ) {
121        let mut life_tracker = self.lock_life();
122        let submission_closures = life_tracker.triage_submissions(submission_index);
123
124        let mapping_closures = life_tracker.handle_mapping(snatch_guard);
125
126        let queue_empty = life_tracker.queue_empty();
127
128        (submission_closures, mapping_closures, queue_empty)
129    }
130}
131
132crate::impl_resource_type!(Queue);
133// TODO: https://github.com/gfx-rs/wgpu/issues/4014
134impl Labeled for Queue {
135    fn label(&self) -> &str {
136        ""
137    }
138}
139crate::impl_parent_device!(Queue);
140crate::impl_storage_item!(Queue);
141
142impl Drop for Queue {
143    fn drop(&mut self) {
144        resource_log!("Drop {}", self.error_ident());
145
146        let last_successful_submission_index = self
147            .device
148            .last_successful_submission_index
149            .load(Ordering::Acquire);
150
151        let fence = self.device.fence.read();
152
153        // Try waiting on the last submission using the following sequence of timeouts
154        let timeouts_in_ms = [100, 200, 400, 800, 1600, 3200];
155
156        for (i, timeout_ms) in timeouts_in_ms.into_iter().enumerate() {
157            let is_last_iter = i == timeouts_in_ms.len() - 1;
158
159            api_log!(
160                "Waiting on last submission. try: {}/{}. timeout: {}ms",
161                i + 1,
162                timeouts_in_ms.len(),
163                timeout_ms
164            );
165
166            let wait_res = unsafe {
167                self.device.raw().wait(
168                    fence.as_ref(),
169                    last_successful_submission_index,
170                    #[cfg(not(target_arch = "wasm32"))]
171                    timeout_ms,
172                    #[cfg(target_arch = "wasm32")]
173                    0, // WebKit and Chromium don't support a non-0 timeout
174                )
175            };
176            // Note: If we don't panic below we are in UB land (destroying resources while they are still in use by the GPU).
177            match wait_res {
178                Ok(true) => break,
179                Ok(false) => {
180                    // It's fine that we timed out on WebGL; GL objects can be deleted early as they
181                    // will be kept around by the driver if GPU work hasn't finished.
182                    // Moreover, the way we emulate read mappings on WebGL allows us to execute map_buffer earlier than on other
183                    // backends since getBufferSubData is synchronous with respect to the other previously enqueued GL commands.
184                    // Relying on this behavior breaks the clean abstraction wgpu-hal tries to maintain and
185                    // we should find ways to improve this. See https://github.com/gfx-rs/wgpu/issues/6538.
186                    #[cfg(target_arch = "wasm32")]
187                    {
188                        break;
189                    }
190                    #[cfg(not(target_arch = "wasm32"))]
191                    {
192                        if is_last_iter {
193                            panic!(
194                                "We timed out while waiting on the last successful submission to complete!"
195                            );
196                        }
197                    }
198                }
199                Err(e) => match e {
200                    hal::DeviceError::OutOfMemory => {
201                        if is_last_iter {
202                            panic!(
203                                "We ran into an OOM error while waiting on the last successful submission to complete!"
204                            );
205                        }
206                    }
207                    hal::DeviceError::Lost => {
208                        self.device.handle_hal_error(e); // will lose the device
209                        break;
210                    }
211                    hal::DeviceError::ResourceCreationFailed => unreachable!(),
212                    hal::DeviceError::Unexpected => {
213                        panic!(
214                            "We ran into an unexpected error while waiting on the last successful submission to complete!"
215                        );
216                    }
217                },
218            }
219        }
220        drop(fence);
221
222        let snatch_guard = self.device.snatchable_lock.read();
223        let (submission_closures, mapping_closures, queue_empty) =
224            self.maintain(last_successful_submission_index, &snatch_guard);
225        drop(snatch_guard);
226
227        assert!(queue_empty);
228
229        let closures = crate::device::UserClosures {
230            mappings: mapping_closures,
231            submissions: submission_closures,
232            device_lost_invocations: SmallVec::new(),
233        };
234
235        closures.fire();
236    }
237}
238
239#[cfg(send_sync)]
240pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + Send + 'static>;
241#[cfg(not(send_sync))]
242pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + 'static>;
243
244/// A texture or buffer to be freed soon.
245///
246/// This is just a tagged raw texture or buffer, generally about to be added to
247/// some other more specific container like:
248///
249/// - `PendingWrites::temp_resources`: resources used by queue writes and
250///   unmaps, waiting to be folded in with the next queue submission
251///
252/// - `ActiveSubmission::temp_resources`: temporary resources used by a queue
253///   submission, to be freed when it completes
254#[derive(Debug)]
255pub enum TempResource {
256    StagingBuffer(FlushedStagingBuffer),
257    ScratchBuffer(ScratchBuffer),
258    DestroyedBuffer(DestroyedBuffer),
259    DestroyedTexture(DestroyedTexture),
260}
261
262/// A series of raw [`CommandBuffer`]s that have been submitted to a
263/// queue, and the [`wgpu_hal::CommandEncoder`] that built them.
264///
265/// [`CommandBuffer`]: hal::Api::CommandBuffer
266/// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
267pub(crate) struct EncoderInFlight {
268    inner: crate::command::CommandEncoder,
269    pub(crate) trackers: Tracker,
270    pub(crate) temp_resources: Vec<TempResource>,
271    /// We only need to keep these resources alive.
272    _indirect_draw_validation_resources: crate::indirect_validation::DrawResources,
273
274    /// These are the buffers that have been tracked by `PendingWrites`.
275    pub(crate) pending_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
276    /// These are the textures that have been tracked by `PendingWrites`.
277    pub(crate) pending_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
278}
279
280/// A private command encoder for writes made directly on the device
281/// or queue.
282///
283/// Operations like `buffer_unmap`, `queue_write_buffer`, and
284/// `queue_write_texture` need to copy data to the GPU. At the hal
285/// level, this must be done by encoding and submitting commands, but
286/// these operations are not associated with any specific wgpu command
287/// buffer.
288///
289/// Instead, `Device::pending_writes` owns one of these values, which
290/// has its own hal command encoder and resource lists. The commands
291/// accumulated here are automatically submitted to the queue the next
292/// time the user submits a wgpu command buffer, ahead of the user's
293/// commands.
294///
295/// Important:
296/// When locking pending_writes be sure that tracker is not locked
297/// and try to lock trackers for the minimum timespan possible
298///
299/// All uses of [`StagingBuffer`]s end up here.
300#[derive(Debug)]
301pub(crate) struct PendingWrites {
302    // The command encoder needs to be destroyed before any other resource in pending writes.
303    pub command_encoder: Box<dyn hal::DynCommandEncoder>,
304
305    /// True if `command_encoder` is in the "recording" state, as
306    /// described in the docs for the [`wgpu_hal::CommandEncoder`]
307    /// trait.
308    ///
309    /// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
310    pub is_recording: bool,
311
312    temp_resources: Vec<TempResource>,
313    dst_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
314    dst_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
315}
316
317impl PendingWrites {
318    pub fn new(command_encoder: Box<dyn hal::DynCommandEncoder>) -> Self {
319        Self {
320            command_encoder,
321            is_recording: false,
322            temp_resources: Vec::new(),
323            dst_buffers: FastHashMap::default(),
324            dst_textures: FastHashMap::default(),
325        }
326    }
327
328    pub fn insert_buffer(&mut self, buffer: &Arc<Buffer>) {
329        self.dst_buffers
330            .insert(buffer.tracker_index(), buffer.clone());
331    }
332
333    pub fn insert_texture(&mut self, texture: &Arc<Texture>) {
334        self.dst_textures
335            .insert(texture.tracker_index(), texture.clone());
336    }
337
338    pub fn contains_buffer(&self, buffer: &Arc<Buffer>) -> bool {
339        self.dst_buffers.contains_key(&buffer.tracker_index())
340    }
341
342    pub fn contains_texture(&self, texture: &Arc<Texture>) -> bool {
343        self.dst_textures.contains_key(&texture.tracker_index())
344    }
345
346    pub fn consume_temp(&mut self, resource: TempResource) {
347        self.temp_resources.push(resource);
348    }
349
350    pub fn consume(&mut self, buffer: FlushedStagingBuffer) {
351        self.temp_resources
352            .push(TempResource::StagingBuffer(buffer));
353    }
354
355    fn pre_submit(
356        &mut self,
357        command_allocator: &CommandAllocator,
358        device: &Arc<Device>,
359        queue: &Queue,
360    ) -> Result<Option<EncoderInFlight>, DeviceError> {
361        if self.is_recording {
362            let pending_buffers = mem::take(&mut self.dst_buffers);
363            let pending_textures = mem::take(&mut self.dst_textures);
364
365            let cmd_buf = unsafe { self.command_encoder.end_encoding() }
366                .map_err(|e| device.handle_hal_error(e))?;
367            self.is_recording = false;
368
369            let new_encoder = command_allocator
370                .acquire_encoder(device.raw(), queue.raw())
371                .map_err(|e| device.handle_hal_error(e))?;
372
373            let encoder = EncoderInFlight {
374                inner: crate::command::CommandEncoder {
375                    raw: ManuallyDrop::new(mem::replace(&mut self.command_encoder, new_encoder)),
376                    list: vec![cmd_buf],
377                    device: device.clone(),
378                    is_open: false,
379                    hal_label: None,
380                },
381                trackers: Tracker::new(),
382                temp_resources: mem::take(&mut self.temp_resources),
383                _indirect_draw_validation_resources: crate::indirect_validation::DrawResources::new(
384                    device.clone(),
385                ),
386                pending_buffers,
387                pending_textures,
388            };
389            Ok(Some(encoder))
390        } else {
391            self.dst_buffers.clear();
392            self.dst_textures.clear();
393            Ok(None)
394        }
395    }
396
397    pub fn activate(&mut self) -> &mut dyn hal::DynCommandEncoder {
398        if !self.is_recording {
399            unsafe {
400                self.command_encoder
401                    .begin_encoding(Some("(wgpu internal) PendingWrites"))
402                    .unwrap();
403            }
404            self.is_recording = true;
405        }
406        self.command_encoder.as_mut()
407    }
408}
409
410impl Drop for PendingWrites {
411    fn drop(&mut self) {
412        unsafe {
413            if self.is_recording {
414                self.command_encoder.discard_encoding();
415            }
416        }
417    }
418}
419
420#[derive(Clone, Debug, Error)]
421#[non_exhaustive]
422pub enum QueueWriteError {
423    #[error(transparent)]
424    Queue(#[from] DeviceError),
425    #[error(transparent)]
426    Transfer(#[from] TransferError),
427    #[error(transparent)]
428    MemoryInitFailure(#[from] ClearError),
429    #[error(transparent)]
430    DestroyedResource(#[from] DestroyedResourceError),
431    #[error(transparent)]
432    InvalidResource(#[from] InvalidResourceError),
433}
434
435#[derive(Clone, Debug, Error)]
436#[non_exhaustive]
437pub enum QueueSubmitError {
438    #[error(transparent)]
439    Queue(#[from] DeviceError),
440    #[error(transparent)]
441    DestroyedResource(#[from] DestroyedResourceError),
442    #[error(transparent)]
443    Unmap(#[from] BufferAccessError),
444    #[error("{0} is still mapped")]
445    BufferStillMapped(ResourceErrorIdent),
446    #[error(transparent)]
447    InvalidResource(#[from] InvalidResourceError),
448    #[error(transparent)]
449    CommandEncoder(#[from] CommandEncoderError),
450    #[error(transparent)]
451    ValidateAsActionsError(#[from] crate::ray_tracing::ValidateAsActionsError),
452}
453
454//TODO: move out common parts of write_xxx.
455
456impl Queue {
457    pub fn write_buffer(
458        &self,
459        buffer: Fallible<Buffer>,
460        buffer_offset: wgt::BufferAddress,
461        data: &[u8],
462    ) -> Result<(), QueueWriteError> {
463        profiling::scope!("Queue::write_buffer");
464        api_log!("Queue::write_buffer");
465
466        let buffer = buffer.get()?;
467
468        let data_size = data.len() as wgt::BufferAddress;
469
470        self.same_device_as(buffer.as_ref())?;
471
472        let data_size = if let Some(data_size) = wgt::BufferSize::new(data_size) {
473            data_size
474        } else {
475            log::trace!("Ignoring write_buffer of size 0");
476            return Ok(());
477        };
478
479        let snatch_guard = self.device.snatchable_lock.read();
480
481        // Platform validation requires that the staging buffer always be
482        // freed, even if an error occurs. All paths from here must call
483        // `device.pending_writes.consume`.
484        let mut staging_buffer = StagingBuffer::new(&self.device, data_size)?;
485        let mut pending_writes = self.pending_writes.lock();
486
487        let staging_buffer = {
488            profiling::scope!("copy");
489            staging_buffer.write(data);
490            staging_buffer.flush()
491        };
492
493        let result = self.write_staging_buffer_impl(
494            &snatch_guard,
495            &mut pending_writes,
496            &staging_buffer,
497            buffer,
498            buffer_offset,
499        );
500
501        pending_writes.consume(staging_buffer);
502        result
503    }
504
505    pub fn create_staging_buffer(
506        &self,
507        buffer_size: wgt::BufferSize,
508    ) -> Result<(StagingBuffer, NonNull<u8>), QueueWriteError> {
509        profiling::scope!("Queue::create_staging_buffer");
510        resource_log!("Queue::create_staging_buffer");
511
512        let staging_buffer = StagingBuffer::new(&self.device, buffer_size)?;
513        let ptr = unsafe { staging_buffer.ptr() };
514
515        Ok((staging_buffer, ptr))
516    }
517
518    pub fn write_staging_buffer(
519        &self,
520        buffer: Fallible<Buffer>,
521        buffer_offset: wgt::BufferAddress,
522        staging_buffer: StagingBuffer,
523    ) -> Result<(), QueueWriteError> {
524        profiling::scope!("Queue::write_staging_buffer");
525
526        let buffer = buffer.get()?;
527
528        let snatch_guard = self.device.snatchable_lock.read();
529        let mut pending_writes = self.pending_writes.lock();
530
531        // At this point, we have taken ownership of the staging_buffer from the
532        // user. Platform validation requires that the staging buffer always
533        // be freed, even if an error occurs. All paths from here must call
534        // `device.pending_writes.consume`.
535        let staging_buffer = staging_buffer.flush();
536
537        let result = self.write_staging_buffer_impl(
538            &snatch_guard,
539            &mut pending_writes,
540            &staging_buffer,
541            buffer,
542            buffer_offset,
543        );
544
545        pending_writes.consume(staging_buffer);
546        result
547    }
548
549    pub fn validate_write_buffer(
550        &self,
551        buffer: Fallible<Buffer>,
552        buffer_offset: u64,
553        buffer_size: wgt::BufferSize,
554    ) -> Result<(), QueueWriteError> {
555        profiling::scope!("Queue::validate_write_buffer");
556
557        let buffer = buffer.get()?;
558
559        self.validate_write_buffer_impl(&buffer, buffer_offset, buffer_size)?;
560
561        Ok(())
562    }
563
564    fn validate_write_buffer_impl(
565        &self,
566        buffer: &Buffer,
567        buffer_offset: u64,
568        buffer_size: wgt::BufferSize,
569    ) -> Result<(), TransferError> {
570        buffer.check_usage(wgt::BufferUsages::COPY_DST)?;
571        if buffer_size.get() % wgt::COPY_BUFFER_ALIGNMENT != 0 {
572            return Err(TransferError::UnalignedCopySize(buffer_size.get()));
573        }
574        if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
575            return Err(TransferError::UnalignedBufferOffset(buffer_offset));
576        }
577        if buffer_offset + buffer_size.get() > buffer.size {
578            return Err(TransferError::BufferOverrun {
579                start_offset: buffer_offset,
580                end_offset: buffer_offset + buffer_size.get(),
581                buffer_size: buffer.size,
582                side: CopySide::Destination,
583            });
584        }
585
586        Ok(())
587    }
588
589    fn write_staging_buffer_impl(
590        &self,
591        snatch_guard: &SnatchGuard,
592        pending_writes: &mut PendingWrites,
593        staging_buffer: &FlushedStagingBuffer,
594        buffer: Arc<Buffer>,
595        buffer_offset: u64,
596    ) -> Result<(), QueueWriteError> {
597        let transition = {
598            let mut trackers = self.device.trackers.lock();
599            trackers
600                .buffers
601                .set_single(&buffer, wgt::BufferUses::COPY_DST)
602        };
603
604        let dst_raw = buffer.try_raw(snatch_guard)?;
605
606        self.same_device_as(buffer.as_ref())?;
607
608        self.validate_write_buffer_impl(&buffer, buffer_offset, staging_buffer.size)?;
609
610        let region = hal::BufferCopy {
611            src_offset: 0,
612            dst_offset: buffer_offset,
613            size: staging_buffer.size,
614        };
615        let barriers = iter::once(hal::BufferBarrier {
616            buffer: staging_buffer.raw(),
617            usage: hal::StateTransition {
618                from: wgt::BufferUses::MAP_WRITE,
619                to: wgt::BufferUses::COPY_SRC,
620            },
621        })
622        .chain(transition.map(|pending| pending.into_hal(&buffer, snatch_guard)))
623        .collect::<Vec<_>>();
624        let encoder = pending_writes.activate();
625        unsafe {
626            encoder.transition_buffers(&barriers);
627            encoder.copy_buffer_to_buffer(staging_buffer.raw(), dst_raw, &[region]);
628        }
629
630        pending_writes.insert_buffer(&buffer);
631
632        // Ensure the overwritten bytes are marked as initialized so
633        // they don't need to be nulled prior to mapping or binding.
634        {
635            buffer
636                .initialization_status
637                .write()
638                .drain(buffer_offset..(buffer_offset + staging_buffer.size.get()));
639        }
640
641        Ok(())
642    }
643
644    pub fn write_texture(
645        &self,
646        destination: wgt::TexelCopyTextureInfo<Fallible<Texture>>,
647        data: &[u8],
648        data_layout: &wgt::TexelCopyBufferLayout,
649        size: &wgt::Extent3d,
650    ) -> Result<(), QueueWriteError> {
651        profiling::scope!("Queue::write_texture");
652        api_log!("Queue::write_texture");
653
654        if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
655            log::trace!("Ignoring write_texture of size 0");
656            return Ok(());
657        }
658
659        let dst = destination.texture.get()?;
660        let destination = wgt::TexelCopyTextureInfo {
661            texture: (),
662            mip_level: destination.mip_level,
663            origin: destination.origin,
664            aspect: destination.aspect,
665        };
666
667        self.same_device_as(dst.as_ref())?;
668
669        dst.check_usage(wgt::TextureUsages::COPY_DST)
670            .map_err(TransferError::MissingTextureUsage)?;
671
672        // Note: Doing the copy range validation early is important because ensures that the
673        // dimensions are not going to cause overflow in other parts of the validation.
674        let (hal_copy_size, array_layer_count) =
675            validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, size)?;
676
677        let (selector, dst_base) = extract_texture_selector(&destination, size, &dst)?;
678
679        if !dst_base.aspect.is_one() {
680            return Err(TransferError::CopyAspectNotOne.into());
681        }
682
683        if !conv::is_valid_copy_dst_texture_format(dst.desc.format, destination.aspect) {
684            return Err(TransferError::CopyToForbiddenTextureFormat {
685                format: dst.desc.format,
686                aspect: destination.aspect,
687            }
688            .into());
689        }
690
691        // Note: `_source_bytes_per_array_layer` is ignored since we
692        // have a staging copy, and it can have a different value.
693        let (required_bytes_in_copy, _source_bytes_per_array_layer) = validate_linear_texture_data(
694            data_layout,
695            dst.desc.format,
696            destination.aspect,
697            data.len() as wgt::BufferAddress,
698            CopySide::Source,
699            size,
700            false,
701        )?;
702
703        if dst.desc.format.is_depth_stencil_format() {
704            self.device
705                .require_downlevel_flags(wgt::DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES)
706                .map_err(TransferError::from)?;
707        }
708
709        let snatch_guard = self.device.snatchable_lock.read();
710
711        let mut pending_writes = self.pending_writes.lock();
712        let encoder = pending_writes.activate();
713
714        // If the copy does not fully cover the layers, we need to initialize to
715        // zero *first* as we don't keep track of partial texture layer inits.
716        //
717        // Strictly speaking we only need to clear the areas of a layer
718        // untouched, but this would get increasingly messy.
719        let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
720            // volume textures don't have a layer range as array volumes aren't supported
721            0..1
722        } else {
723            destination.origin.z..destination.origin.z + size.depth_or_array_layers
724        };
725        let mut dst_initialization_status = dst.initialization_status.write();
726        if dst_initialization_status.mips[destination.mip_level as usize]
727            .check(init_layer_range.clone())
728            .is_some()
729        {
730            if has_copy_partial_init_tracker_coverage(size, destination.mip_level, &dst.desc) {
731                for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
732                    .drain(init_layer_range)
733                    .collect::<Vec<core::ops::Range<u32>>>()
734                {
735                    let mut trackers = self.device.trackers.lock();
736                    crate::command::clear_texture(
737                        &dst,
738                        TextureInitRange {
739                            mip_range: destination.mip_level..(destination.mip_level + 1),
740                            layer_range,
741                        },
742                        encoder,
743                        &mut trackers.textures,
744                        &self.device.alignments,
745                        self.device.zero_buffer.as_ref(),
746                        &snatch_guard,
747                    )
748                    .map_err(QueueWriteError::from)?;
749                }
750            } else {
751                dst_initialization_status.mips[destination.mip_level as usize]
752                    .drain(init_layer_range);
753            }
754        }
755
756        let dst_raw = dst.try_raw(&snatch_guard)?;
757
758        let (block_width, block_height) = dst.desc.format.block_dimensions();
759        let width_in_blocks = size.width / block_width;
760        let height_in_blocks = size.height / block_height;
761
762        let block_size = dst
763            .desc
764            .format
765            .block_copy_size(Some(destination.aspect))
766            .unwrap();
767        let bytes_in_last_row = width_in_blocks * block_size;
768
769        let bytes_per_row = data_layout.bytes_per_row.unwrap_or(bytes_in_last_row);
770        let rows_per_image = data_layout.rows_per_image.unwrap_or(height_in_blocks);
771
772        let bytes_per_row_alignment = get_lowest_common_denom(
773            self.device.alignments.buffer_copy_pitch.get() as u32,
774            block_size,
775        );
776        let stage_bytes_per_row = wgt::math::align_to(bytes_in_last_row, bytes_per_row_alignment);
777
778        // Platform validation requires that the staging buffer always be
779        // freed, even if an error occurs. All paths from here must call
780        // `device.pending_writes.consume`.
781        let staging_buffer = if stage_bytes_per_row == bytes_per_row {
782            profiling::scope!("copy aligned");
783            // Fast path if the data is already being aligned optimally.
784            let stage_size = wgt::BufferSize::new(required_bytes_in_copy).unwrap();
785            let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
786            staging_buffer.write(&data[data_layout.offset as usize..]);
787            staging_buffer
788        } else {
789            profiling::scope!("copy chunked");
790            // Copy row by row into the optimal alignment.
791            let block_rows_in_copy =
792                (size.depth_or_array_layers - 1) * rows_per_image + height_in_blocks;
793            let stage_size =
794                wgt::BufferSize::new(stage_bytes_per_row as u64 * block_rows_in_copy as u64)
795                    .unwrap();
796            let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
797            let copy_bytes_per_row = stage_bytes_per_row.min(bytes_per_row) as usize;
798            for layer in 0..size.depth_or_array_layers {
799                let rows_offset = layer * rows_per_image;
800                for row in rows_offset..rows_offset + height_in_blocks {
801                    let src_offset = data_layout.offset as u32 + row * bytes_per_row;
802                    let dst_offset = row * stage_bytes_per_row;
803                    unsafe {
804                        staging_buffer.write_with_offset(
805                            data,
806                            src_offset as isize,
807                            dst_offset as isize,
808                            copy_bytes_per_row,
809                        )
810                    }
811                }
812            }
813            staging_buffer
814        };
815
816        let staging_buffer = staging_buffer.flush();
817
818        let regions = (0..array_layer_count)
819            .map(|array_layer_offset| {
820                let mut texture_base = dst_base.clone();
821                texture_base.array_layer += array_layer_offset;
822                hal::BufferTextureCopy {
823                    buffer_layout: wgt::TexelCopyBufferLayout {
824                        offset: array_layer_offset as u64
825                            * rows_per_image as u64
826                            * stage_bytes_per_row as u64,
827                        bytes_per_row: Some(stage_bytes_per_row),
828                        rows_per_image: Some(rows_per_image),
829                    },
830                    texture_base,
831                    size: hal_copy_size,
832                }
833            })
834            .collect::<Vec<_>>();
835
836        {
837            let buffer_barrier = hal::BufferBarrier {
838                buffer: staging_buffer.raw(),
839                usage: hal::StateTransition {
840                    from: wgt::BufferUses::MAP_WRITE,
841                    to: wgt::BufferUses::COPY_SRC,
842                },
843            };
844
845            let mut trackers = self.device.trackers.lock();
846            let transition =
847                trackers
848                    .textures
849                    .set_single(&dst, selector, wgt::TextureUses::COPY_DST);
850            let texture_barriers = transition
851                .map(|pending| pending.into_hal(dst_raw))
852                .collect::<Vec<_>>();
853
854            unsafe {
855                encoder.transition_textures(&texture_barriers);
856                encoder.transition_buffers(&[buffer_barrier]);
857                encoder.copy_buffer_to_texture(staging_buffer.raw(), dst_raw, &regions);
858            }
859        }
860
861        pending_writes.consume(staging_buffer);
862        pending_writes.insert_texture(&dst);
863
864        Ok(())
865    }
866
867    #[cfg(webgl)]
868    pub fn copy_external_image_to_texture(
869        &self,
870        source: &wgt::CopyExternalImageSourceInfo,
871        destination: wgt::CopyExternalImageDestInfo<Fallible<Texture>>,
872        size: wgt::Extent3d,
873    ) -> Result<(), QueueWriteError> {
874        profiling::scope!("Queue::copy_external_image_to_texture");
875
876        if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
877            log::trace!("Ignoring write_texture of size 0");
878            return Ok(());
879        }
880
881        let mut needs_flag = false;
882        needs_flag |= matches!(source.source, wgt::ExternalImageSource::OffscreenCanvas(_));
883        needs_flag |= source.origin != wgt::Origin2d::ZERO;
884        needs_flag |= destination.color_space != wgt::PredefinedColorSpace::Srgb;
885        #[allow(clippy::bool_comparison)]
886        if matches!(source.source, wgt::ExternalImageSource::ImageBitmap(_)) {
887            needs_flag |= source.flip_y != false;
888            needs_flag |= destination.premultiplied_alpha != false;
889        }
890
891        if needs_flag {
892            self.device
893                .require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_EXTERNAL_TEXTURE_COPIES)
894                .map_err(TransferError::from)?;
895        }
896
897        let src_width = source.source.width();
898        let src_height = source.source.height();
899
900        let dst = destination.texture.get()?;
901        let premultiplied_alpha = destination.premultiplied_alpha;
902        let destination = wgt::TexelCopyTextureInfo {
903            texture: (),
904            mip_level: destination.mip_level,
905            origin: destination.origin,
906            aspect: destination.aspect,
907        };
908
909        if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) {
910            return Err(
911                TransferError::ExternalCopyToForbiddenTextureFormat(dst.desc.format).into(),
912            );
913        }
914        if dst.desc.dimension != wgt::TextureDimension::D2 {
915            return Err(TransferError::InvalidDimensionExternal.into());
916        }
917        dst.check_usage(wgt::TextureUsages::COPY_DST | wgt::TextureUsages::RENDER_ATTACHMENT)
918            .map_err(TransferError::MissingTextureUsage)?;
919        if dst.desc.sample_count != 1 {
920            return Err(TransferError::InvalidSampleCount {
921                sample_count: dst.desc.sample_count,
922            }
923            .into());
924        }
925
926        if source.origin.x + size.width > src_width {
927            return Err(TransferError::TextureOverrun {
928                start_offset: source.origin.x,
929                end_offset: source.origin.x + size.width,
930                texture_size: src_width,
931                dimension: crate::resource::TextureErrorDimension::X,
932                side: CopySide::Source,
933            }
934            .into());
935        }
936        if source.origin.y + size.height > src_height {
937            return Err(TransferError::TextureOverrun {
938                start_offset: source.origin.y,
939                end_offset: source.origin.y + size.height,
940                texture_size: src_height,
941                dimension: crate::resource::TextureErrorDimension::Y,
942                side: CopySide::Source,
943            }
944            .into());
945        }
946        if size.depth_or_array_layers != 1 {
947            return Err(TransferError::TextureOverrun {
948                start_offset: 0,
949                end_offset: size.depth_or_array_layers,
950                texture_size: 1,
951                dimension: crate::resource::TextureErrorDimension::Z,
952                side: CopySide::Source,
953            }
954            .into());
955        }
956
957        // Note: Doing the copy range validation early is important because ensures that the
958        // dimensions are not going to cause overflow in other parts of the validation.
959        let (hal_copy_size, _) =
960            validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, &size)?;
961
962        let (selector, dst_base) = extract_texture_selector(&destination, &size, &dst)?;
963
964        let mut pending_writes = self.pending_writes.lock();
965        let encoder = pending_writes.activate();
966
967        // If the copy does not fully cover the layers, we need to initialize to
968        // zero *first* as we don't keep track of partial texture layer inits.
969        //
970        // Strictly speaking we only need to clear the areas of a layer
971        // untouched, but this would get increasingly messy.
972        let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
973            // volume textures don't have a layer range as array volumes aren't supported
974            0..1
975        } else {
976            destination.origin.z..destination.origin.z + size.depth_or_array_layers
977        };
978        let mut dst_initialization_status = dst.initialization_status.write();
979        if dst_initialization_status.mips[destination.mip_level as usize]
980            .check(init_layer_range.clone())
981            .is_some()
982        {
983            if has_copy_partial_init_tracker_coverage(&size, destination.mip_level, &dst.desc) {
984                for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
985                    .drain(init_layer_range)
986                    .collect::<Vec<core::ops::Range<u32>>>()
987                {
988                    let mut trackers = self.device.trackers.lock();
989                    crate::command::clear_texture(
990                        &dst,
991                        TextureInitRange {
992                            mip_range: destination.mip_level..(destination.mip_level + 1),
993                            layer_range,
994                        },
995                        encoder,
996                        &mut trackers.textures,
997                        &self.device.alignments,
998                        self.device.zero_buffer.as_ref(),
999                        &self.device.snatchable_lock.read(),
1000                    )
1001                    .map_err(QueueWriteError::from)?;
1002                }
1003            } else {
1004                dst_initialization_status.mips[destination.mip_level as usize]
1005                    .drain(init_layer_range);
1006            }
1007        }
1008
1009        let snatch_guard = self.device.snatchable_lock.read();
1010        let dst_raw = dst.try_raw(&snatch_guard)?;
1011
1012        let regions = hal::TextureCopy {
1013            src_base: hal::TextureCopyBase {
1014                mip_level: 0,
1015                array_layer: 0,
1016                origin: source.origin.to_3d(0),
1017                aspect: hal::FormatAspects::COLOR,
1018            },
1019            dst_base,
1020            size: hal_copy_size,
1021        };
1022
1023        let mut trackers = self.device.trackers.lock();
1024        let transitions = trackers
1025            .textures
1026            .set_single(&dst, selector, wgt::TextureUses::COPY_DST);
1027
1028        // `copy_external_image_to_texture` is exclusive to the WebGL backend.
1029        // Don't go through the `DynCommandEncoder` abstraction and directly to the WebGL backend.
1030        let encoder_webgl = encoder
1031            .as_any_mut()
1032            .downcast_mut::<hal::gles::CommandEncoder>()
1033            .unwrap();
1034        let dst_raw_webgl = dst_raw
1035            .as_any()
1036            .downcast_ref::<hal::gles::Texture>()
1037            .unwrap();
1038        let transitions_webgl = transitions.map(|pending| {
1039            let dyn_transition = pending.into_hal(dst_raw);
1040            hal::TextureBarrier {
1041                texture: dst_raw_webgl,
1042                range: dyn_transition.range,
1043                usage: dyn_transition.usage,
1044            }
1045        });
1046
1047        use hal::CommandEncoder as _;
1048        unsafe {
1049            encoder_webgl.transition_textures(transitions_webgl);
1050            encoder_webgl.copy_external_image_to_texture(
1051                source,
1052                dst_raw_webgl,
1053                premultiplied_alpha,
1054                iter::once(regions),
1055            );
1056        }
1057
1058        Ok(())
1059    }
1060
1061    pub fn submit(
1062        &self,
1063        command_buffers: &[Arc<CommandBuffer>],
1064    ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1065        profiling::scope!("Queue::submit");
1066        api_log!("Queue::submit");
1067
1068        let submit_index;
1069
1070        let res = 'error: {
1071            let snatch_guard = self.device.snatchable_lock.read();
1072
1073            // Fence lock must be acquired after the snatch lock everywhere to avoid deadlocks.
1074            let mut fence = self.device.fence.write();
1075
1076            let mut command_index_guard = self.device.command_indices.write();
1077            command_index_guard.active_submission_index += 1;
1078            submit_index = command_index_guard.active_submission_index;
1079            let mut active_executions = Vec::new();
1080
1081            let mut used_surface_textures = track::TextureUsageScope::default();
1082
1083            // Use a hashmap here to deduplicate the surface textures that are used in the command buffers.
1084            // This avoids vulkan deadlocking from the same surface texture being submitted multiple times.
1085            let mut submit_surface_textures_owned = FastHashMap::default();
1086
1087            {
1088                if !command_buffers.is_empty() {
1089                    profiling::scope!("prepare");
1090
1091                    let mut first_error = None;
1092
1093                    //TODO: if multiple command buffers are submitted, we can re-use the last
1094                    // native command buffer of the previous chain instead of always creating
1095                    // a temporary one, since the chains are not finished.
1096
1097                    // finish all the command buffers first
1098                    for command_buffer in command_buffers {
1099                        profiling::scope!("process command buffer");
1100
1101                        // we reset the used surface textures every time we use
1102                        // it, so make sure to set_size on it.
1103                        used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1104
1105                        // Note that we are required to invalidate all command buffers in both the success and failure paths.
1106                        // This is why we `continue` and don't early return via `?`.
1107                        #[allow(unused_mut)]
1108                        let mut cmd_buf_data = command_buffer.take_finished();
1109
1110                        #[cfg(feature = "trace")]
1111                        if let Some(ref mut trace) = *self.device.trace.lock() {
1112                            if let Ok(ref mut cmd_buf_data) = cmd_buf_data {
1113                                trace.add(Action::Submit(
1114                                    submit_index,
1115                                    cmd_buf_data.commands.take().unwrap(),
1116                                ));
1117                            }
1118                        }
1119
1120                        if first_error.is_some() {
1121                            continue;
1122                        }
1123
1124                        let mut baked = match cmd_buf_data {
1125                            Ok(cmd_buf_data) => {
1126                                let res = validate_command_buffer(
1127                                    command_buffer,
1128                                    self,
1129                                    &cmd_buf_data,
1130                                    &snatch_guard,
1131                                    &mut submit_surface_textures_owned,
1132                                    &mut used_surface_textures,
1133                                    &mut command_index_guard,
1134                                );
1135                                if let Err(err) = res {
1136                                    first_error.get_or_insert(err);
1137                                    continue;
1138                                }
1139                                cmd_buf_data.into_baked_commands()
1140                            }
1141                            Err(err) => {
1142                                first_error.get_or_insert(err.into());
1143                                continue;
1144                            }
1145                        };
1146
1147                        // execute resource transitions
1148                        if let Err(e) = baked.encoder.open_pass(Some("(wgpu internal) Transit")) {
1149                            break 'error Err(e.into());
1150                        }
1151
1152                        //Note: locking the trackers has to be done after the storages
1153                        let mut trackers = self.device.trackers.lock();
1154                        if let Err(e) = baked.initialize_buffer_memory(&mut trackers, &snatch_guard)
1155                        {
1156                            break 'error Err(e.into());
1157                        }
1158                        if let Err(e) = baked.initialize_texture_memory(
1159                            &mut trackers,
1160                            &self.device,
1161                            &snatch_guard,
1162                        ) {
1163                            break 'error Err(e.into());
1164                        }
1165
1166                        //Note: stateless trackers are not merged:
1167                        // device already knows these resources exist.
1168                        CommandBuffer::insert_barriers_from_device_tracker(
1169                            baked.encoder.raw.as_mut(),
1170                            &mut trackers,
1171                            &baked.trackers,
1172                            &snatch_guard,
1173                        );
1174
1175                        if let Err(e) = baked.encoder.close_and_push_front() {
1176                            break 'error Err(e.into());
1177                        }
1178
1179                        // Transition surface textures into `Present` state.
1180                        // Note: we could technically do it after all of the command buffers,
1181                        // but here we have a command encoder by hand, so it's easier to use it.
1182                        if !used_surface_textures.is_empty() {
1183                            if let Err(e) = baked.encoder.open_pass(Some("(wgpu internal) Present"))
1184                            {
1185                                break 'error Err(e.into());
1186                            }
1187                            let texture_barriers = trackers
1188                                .textures
1189                                .set_from_usage_scope_and_drain_transitions(
1190                                    &used_surface_textures,
1191                                    &snatch_guard,
1192                                )
1193                                .collect::<Vec<_>>();
1194                            unsafe {
1195                                baked.encoder.raw.transition_textures(&texture_barriers);
1196                            };
1197                            if let Err(e) = baked.encoder.close() {
1198                                break 'error Err(e.into());
1199                            }
1200                            used_surface_textures = track::TextureUsageScope::default();
1201                        }
1202
1203                        // done
1204                        active_executions.push(EncoderInFlight {
1205                            inner: baked.encoder,
1206                            trackers: baked.trackers,
1207                            temp_resources: baked.temp_resources,
1208                            _indirect_draw_validation_resources: baked
1209                                .indirect_draw_validation_resources,
1210                            pending_buffers: FastHashMap::default(),
1211                            pending_textures: FastHashMap::default(),
1212                        });
1213                    }
1214
1215                    if let Some(first_error) = first_error {
1216                        break 'error Err(first_error);
1217                    }
1218                }
1219            }
1220
1221            let mut pending_writes = self.pending_writes.lock();
1222
1223            {
1224                used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1225                for texture in pending_writes.dst_textures.values() {
1226                    match texture.try_inner(&snatch_guard) {
1227                        Ok(TextureInner::Native { .. }) => {}
1228                        Ok(TextureInner::Surface { .. }) => {
1229                            // Compare the Arcs by pointer as Textures don't implement Eq
1230                            submit_surface_textures_owned
1231                                .insert(Arc::as_ptr(texture), texture.clone());
1232
1233                            unsafe {
1234                                used_surface_textures
1235                                    .merge_single(texture, None, wgt::TextureUses::PRESENT)
1236                                    .unwrap()
1237                            };
1238                        }
1239                        Err(e) => break 'error Err(e.into()),
1240                    }
1241                }
1242
1243                if !used_surface_textures.is_empty() {
1244                    let mut trackers = self.device.trackers.lock();
1245
1246                    let texture_barriers = trackers
1247                        .textures
1248                        .set_from_usage_scope_and_drain_transitions(
1249                            &used_surface_textures,
1250                            &snatch_guard,
1251                        )
1252                        .collect::<Vec<_>>();
1253                    unsafe {
1254                        pending_writes
1255                            .command_encoder
1256                            .transition_textures(&texture_barriers);
1257                    };
1258                }
1259            }
1260
1261            match pending_writes.pre_submit(&self.device.command_allocator, &self.device, self) {
1262                Ok(Some(pending_execution)) => {
1263                    active_executions.insert(0, pending_execution);
1264                }
1265                Ok(None) => {}
1266                Err(e) => break 'error Err(e.into()),
1267            }
1268            let hal_command_buffers = active_executions
1269                .iter()
1270                .flat_map(|e| e.inner.list.iter().map(|b| b.as_ref()))
1271                .collect::<Vec<_>>();
1272
1273            {
1274                let mut submit_surface_textures =
1275                    SmallVec::<[&dyn hal::DynSurfaceTexture; 2]>::with_capacity(
1276                        submit_surface_textures_owned.len(),
1277                    );
1278
1279                for texture in submit_surface_textures_owned.values() {
1280                    let raw = match texture.inner.get(&snatch_guard) {
1281                        Some(TextureInner::Surface { raw, .. }) => raw.as_ref(),
1282                        _ => unreachable!(),
1283                    };
1284                    submit_surface_textures.push(raw);
1285                }
1286
1287                if let Err(e) = unsafe {
1288                    self.raw().submit(
1289                        &hal_command_buffers,
1290                        &submit_surface_textures,
1291                        (fence.as_mut(), submit_index),
1292                    )
1293                }
1294                .map_err(|e| self.device.handle_hal_error(e))
1295                {
1296                    break 'error Err(e.into());
1297                }
1298
1299                drop(command_index_guard);
1300
1301                // Advance the successful submission index.
1302                self.device
1303                    .last_successful_submission_index
1304                    .fetch_max(submit_index, Ordering::SeqCst);
1305            }
1306
1307            profiling::scope!("cleanup");
1308
1309            // this will register the new submission to the life time tracker
1310            self.lock_life()
1311                .track_submission(submit_index, active_executions);
1312            drop(pending_writes);
1313
1314            // This will schedule destruction of all resources that are no longer needed
1315            // by the user but used in the command stream, among other things.
1316            let fence_guard = RwLockWriteGuard::downgrade(fence);
1317            let (closures, result) =
1318                self.device
1319                    .maintain(fence_guard, wgt::PollType::Poll, snatch_guard);
1320            match result {
1321                Ok(status) => {
1322                    debug_assert!(matches!(
1323                        status,
1324                        wgt::PollStatus::QueueEmpty | wgt::PollStatus::Poll
1325                    ));
1326                }
1327                Err(WaitIdleError::Device(err)) => break 'error Err(QueueSubmitError::Queue(err)),
1328                Err(WaitIdleError::WrongSubmissionIndex(..)) => {
1329                    unreachable!("Cannot get WrongSubmissionIndex from Poll")
1330                }
1331                Err(WaitIdleError::Timeout) => unreachable!("Cannot get Timeout from Poll"),
1332            };
1333
1334            Ok(closures)
1335        };
1336
1337        let callbacks = match res {
1338            Ok(ok) => ok,
1339            Err(e) => return Err((submit_index, e)),
1340        };
1341
1342        // the closures should execute with nothing locked!
1343        callbacks.fire();
1344
1345        api_log!("Queue::submit returned submit index {submit_index}");
1346
1347        Ok(submit_index)
1348    }
1349
1350    pub fn get_timestamp_period(&self) -> f32 {
1351        unsafe { self.raw().get_timestamp_period() }
1352    }
1353
1354    /// `closure` is guaranteed to be called.
1355    pub fn on_submitted_work_done(
1356        &self,
1357        closure: SubmittedWorkDoneClosure,
1358    ) -> Option<SubmissionIndex> {
1359        api_log!("Queue::on_submitted_work_done");
1360        //TODO: flush pending writes
1361        self.lock_life().add_work_done_closure(closure)
1362    }
1363}
1364
1365impl Global {
1366    pub fn queue_write_buffer(
1367        &self,
1368        queue_id: QueueId,
1369        buffer_id: id::BufferId,
1370        buffer_offset: wgt::BufferAddress,
1371        data: &[u8],
1372    ) -> Result<(), QueueWriteError> {
1373        let queue = self.hub.queues.get(queue_id);
1374
1375        #[cfg(feature = "trace")]
1376        if let Some(ref mut trace) = *queue.device.trace.lock() {
1377            let data_path = trace.make_binary("bin", data);
1378            trace.add(Action::WriteBuffer {
1379                id: buffer_id,
1380                data: data_path,
1381                range: buffer_offset..buffer_offset + data.len() as u64,
1382                queued: true,
1383            });
1384        }
1385
1386        let buffer = self.hub.buffers.get(buffer_id);
1387        queue.write_buffer(buffer, buffer_offset, data)
1388    }
1389
1390    pub fn queue_create_staging_buffer(
1391        &self,
1392        queue_id: QueueId,
1393        buffer_size: wgt::BufferSize,
1394        id_in: Option<id::StagingBufferId>,
1395    ) -> Result<(id::StagingBufferId, NonNull<u8>), QueueWriteError> {
1396        let queue = self.hub.queues.get(queue_id);
1397        let (staging_buffer, ptr) = queue.create_staging_buffer(buffer_size)?;
1398
1399        let fid = self.hub.staging_buffers.prepare(id_in);
1400        let id = fid.assign(staging_buffer);
1401
1402        Ok((id, ptr))
1403    }
1404
1405    pub fn queue_write_staging_buffer(
1406        &self,
1407        queue_id: QueueId,
1408        buffer_id: id::BufferId,
1409        buffer_offset: wgt::BufferAddress,
1410        staging_buffer_id: id::StagingBufferId,
1411    ) -> Result<(), QueueWriteError> {
1412        let queue = self.hub.queues.get(queue_id);
1413        let buffer = self.hub.buffers.get(buffer_id);
1414        let staging_buffer = self.hub.staging_buffers.remove(staging_buffer_id);
1415        queue.write_staging_buffer(buffer, buffer_offset, staging_buffer)
1416    }
1417
1418    pub fn queue_validate_write_buffer(
1419        &self,
1420        queue_id: QueueId,
1421        buffer_id: id::BufferId,
1422        buffer_offset: u64,
1423        buffer_size: wgt::BufferSize,
1424    ) -> Result<(), QueueWriteError> {
1425        let queue = self.hub.queues.get(queue_id);
1426        let buffer = self.hub.buffers.get(buffer_id);
1427        queue.validate_write_buffer(buffer, buffer_offset, buffer_size)
1428    }
1429
1430    pub fn queue_write_texture(
1431        &self,
1432        queue_id: QueueId,
1433        destination: &TexelCopyTextureInfo,
1434        data: &[u8],
1435        data_layout: &wgt::TexelCopyBufferLayout,
1436        size: &wgt::Extent3d,
1437    ) -> Result<(), QueueWriteError> {
1438        let queue = self.hub.queues.get(queue_id);
1439
1440        #[cfg(feature = "trace")]
1441        if let Some(ref mut trace) = *queue.device.trace.lock() {
1442            let data_path = trace.make_binary("bin", data);
1443            trace.add(Action::WriteTexture {
1444                to: *destination,
1445                data: data_path,
1446                layout: *data_layout,
1447                size: *size,
1448            });
1449        }
1450
1451        let destination = wgt::TexelCopyTextureInfo {
1452            texture: self.hub.textures.get(destination.texture),
1453            mip_level: destination.mip_level,
1454            origin: destination.origin,
1455            aspect: destination.aspect,
1456        };
1457        queue.write_texture(destination, data, data_layout, size)
1458    }
1459
1460    #[cfg(webgl)]
1461    pub fn queue_copy_external_image_to_texture(
1462        &self,
1463        queue_id: QueueId,
1464        source: &wgt::CopyExternalImageSourceInfo,
1465        destination: crate::command::CopyExternalImageDestInfo,
1466        size: wgt::Extent3d,
1467    ) -> Result<(), QueueWriteError> {
1468        let queue = self.hub.queues.get(queue_id);
1469        let destination = wgt::CopyExternalImageDestInfo {
1470            texture: self.hub.textures.get(destination.texture),
1471            mip_level: destination.mip_level,
1472            origin: destination.origin,
1473            aspect: destination.aspect,
1474            color_space: destination.color_space,
1475            premultiplied_alpha: destination.premultiplied_alpha,
1476        };
1477        queue.copy_external_image_to_texture(source, destination, size)
1478    }
1479
1480    pub fn queue_submit(
1481        &self,
1482        queue_id: QueueId,
1483        command_buffer_ids: &[id::CommandBufferId],
1484    ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1485        let queue = self.hub.queues.get(queue_id);
1486        let command_buffer_guard = self.hub.command_buffers.read();
1487        let command_buffers = command_buffer_ids
1488            .iter()
1489            .map(|id| command_buffer_guard.get(*id))
1490            .collect::<Vec<_>>();
1491        drop(command_buffer_guard);
1492        queue.submit(&command_buffers)
1493    }
1494
1495    pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> f32 {
1496        let queue = self.hub.queues.get(queue_id);
1497
1498        if queue.device.timestamp_normalizer.get().unwrap().enabled() {
1499            return 1.0;
1500        }
1501
1502        queue.get_timestamp_period()
1503    }
1504
1505    pub fn queue_on_submitted_work_done(
1506        &self,
1507        queue_id: QueueId,
1508        closure: SubmittedWorkDoneClosure,
1509    ) -> SubmissionIndex {
1510        api_log!("Queue::on_submitted_work_done {queue_id:?}");
1511
1512        //TODO: flush pending writes
1513        let queue = self.hub.queues.get(queue_id);
1514        let result = queue.on_submitted_work_done(closure);
1515        result.unwrap_or(0) // '0' means no wait is necessary
1516    }
1517}
1518
1519fn validate_command_buffer(
1520    command_buffer: &CommandBuffer,
1521    queue: &Queue,
1522    cmd_buf_data: &crate::command::CommandBufferMutable,
1523    snatch_guard: &SnatchGuard,
1524    submit_surface_textures_owned: &mut FastHashMap<*const Texture, Arc<Texture>>,
1525    used_surface_textures: &mut track::TextureUsageScope,
1526    command_index_guard: &mut RwLockWriteGuard<CommandIndices>,
1527) -> Result<(), QueueSubmitError> {
1528    command_buffer.same_device_as(queue)?;
1529
1530    {
1531        profiling::scope!("check resource state");
1532
1533        {
1534            profiling::scope!("buffers");
1535            for buffer in cmd_buf_data.trackers.buffers.used_resources() {
1536                buffer.check_destroyed(snatch_guard)?;
1537
1538                match *buffer.map_state.lock() {
1539                    BufferMapState::Idle => (),
1540                    _ => return Err(QueueSubmitError::BufferStillMapped(buffer.error_ident())),
1541                }
1542            }
1543        }
1544        {
1545            profiling::scope!("textures");
1546            for texture in cmd_buf_data.trackers.textures.used_resources() {
1547                let should_extend = match texture.try_inner(snatch_guard)? {
1548                    TextureInner::Native { .. } => false,
1549                    TextureInner::Surface { .. } => {
1550                        // Compare the Arcs by pointer as Textures don't implement Eq.
1551                        submit_surface_textures_owned.insert(Arc::as_ptr(texture), texture.clone());
1552
1553                        true
1554                    }
1555                };
1556                if should_extend {
1557                    unsafe {
1558                        used_surface_textures
1559                            .merge_single(texture, None, wgt::TextureUses::PRESENT)
1560                            .unwrap();
1561                    };
1562                }
1563            }
1564        }
1565
1566        if let Err(e) =
1567            cmd_buf_data.validate_acceleration_structure_actions(snatch_guard, command_index_guard)
1568        {
1569            return Err(e.into());
1570        }
1571    }
1572    Ok(())
1573}