1use alloc::{boxed::Box, string::ToString, sync::Arc, vec, vec::Vec};
2use core::{
3 iter,
4 mem::{self, ManuallyDrop},
5 num::NonZeroU64,
6 ptr::NonNull,
7 sync::atomic::Ordering,
8};
9use smallvec::SmallVec;
10use thiserror::Error;
11use wgt::{
12 error::{ErrorType, WebGpuError},
13 AccelerationStructureFlags,
14};
15
16use super::{life::LifetimeTracker, Device};
17#[cfg(feature = "trace")]
18use crate::device::trace::{Action, IntoTrace};
19use crate::{
20 api_log,
21 command::{
22 extract_texture_selector, validate_linear_texture_data, validate_texture_buffer_copy,
23 validate_texture_copy_dst_format, validate_texture_copy_range, ClearError,
24 CommandAllocator, CommandBuffer, CommandEncoder, CommandEncoderError, CopySide,
25 TransferError,
26 },
27 device::{DeviceError, WaitIdleError},
28 get_lowest_common_denom,
29 global::Global,
30 hal_label,
31 id::{self, BlasId, QueueId},
32 init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
33 lock::{rank, Mutex, MutexGuard, RwLock, RwLockWriteGuard},
34 ray_tracing::{BlasCompactReadyPendingClosure, CompactBlasError},
35 resource::{
36 Blas, BlasCompactState, Buffer, BufferAccessError, BufferMapState, DestroyedBuffer,
37 DestroyedResourceError, DestroyedTexture, Fallible, FlushedStagingBuffer,
38 InvalidResourceError, Labeled, ParentDevice, ResourceErrorIdent, StagingBuffer, Texture,
39 TextureInner, Trackable, TrackingData,
40 },
41 resource_log,
42 scratch::ScratchBuffer,
43 snatch::{SnatchGuard, Snatchable},
44 track::{self, Tracker, TrackerIndex},
45 FastHashMap, SubmissionIndex,
46};
47use crate::{device::resource::CommandIndices, resource::RawResourceAccess};
48
49pub struct Queue {
50 raw: Box<dyn hal::DynQueue>,
51 pub(crate) pending_writes: Mutex<PendingWrites>,
52 life_tracker: Mutex<LifetimeTracker>,
53 pub(crate) device: Arc<Device>,
55}
56
57impl Queue {
58 pub(crate) fn new(
59 device: Arc<Device>,
60 raw: Box<dyn hal::DynQueue>,
61 instance_flags: wgt::InstanceFlags,
62 ) -> Result<Self, DeviceError> {
63 let pending_encoder = device
64 .command_allocator
65 .acquire_encoder(device.raw(), raw.as_ref())
66 .map_err(DeviceError::from_hal);
67
68 let pending_encoder = match pending_encoder {
69 Ok(pending_encoder) => pending_encoder,
70 Err(e) => {
71 return Err(e);
72 }
73 };
74
75 let mut pending_writes = PendingWrites::new(pending_encoder, instance_flags);
76
77 let zero_buffer = device.zero_buffer.as_ref();
78 pending_writes.activate();
79 unsafe {
80 pending_writes
81 .command_encoder
82 .transition_buffers(&[hal::BufferBarrier {
83 buffer: zero_buffer,
84 usage: hal::StateTransition {
85 from: wgt::BufferUses::empty(),
86 to: wgt::BufferUses::COPY_DST,
87 },
88 }]);
89 pending_writes
90 .command_encoder
91 .clear_buffer(zero_buffer, 0..super::ZERO_BUFFER_SIZE);
92 pending_writes
93 .command_encoder
94 .transition_buffers(&[hal::BufferBarrier {
95 buffer: zero_buffer,
96 usage: hal::StateTransition {
97 from: wgt::BufferUses::COPY_DST,
98 to: wgt::BufferUses::COPY_SRC,
99 },
100 }]);
101 }
102
103 Ok(Queue {
104 raw,
105 device,
106 pending_writes: Mutex::new(rank::QUEUE_PENDING_WRITES, pending_writes),
107 life_tracker: Mutex::new(rank::QUEUE_LIFE_TRACKER, LifetimeTracker::new()),
108 })
109 }
110
111 pub(crate) fn raw(&self) -> &dyn hal::DynQueue {
112 self.raw.as_ref()
113 }
114
115 #[track_caller]
116 pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> {
117 self.life_tracker.lock()
118 }
119
120 pub(crate) fn maintain(
121 &self,
122 submission_index: u64,
123 snatch_guard: &SnatchGuard,
124 ) -> (
125 SmallVec<[SubmittedWorkDoneClosure; 1]>,
126 Vec<super::BufferMapPendingClosure>,
127 Vec<BlasCompactReadyPendingClosure>,
128 bool,
129 ) {
130 let mut life_tracker = self.lock_life();
131 let submission_closures = life_tracker.triage_submissions(submission_index);
132
133 let mapping_closures = life_tracker.handle_mapping(snatch_guard);
134 let blas_closures = life_tracker.handle_compact_read_back();
135
136 let queue_empty = life_tracker.queue_empty();
137
138 (
139 submission_closures,
140 mapping_closures,
141 blas_closures,
142 queue_empty,
143 )
144 }
145}
146
147crate::impl_resource_type!(Queue);
148impl Labeled for Queue {
150 fn label(&self) -> &str {
151 ""
152 }
153}
154crate::impl_parent_device!(Queue);
155crate::impl_storage_item!(Queue);
156
157impl Drop for Queue {
158 fn drop(&mut self) {
159 resource_log!("Drop {}", self.error_ident());
160
161 let last_successful_submission_index = self
162 .device
163 .last_successful_submission_index
164 .load(Ordering::Acquire);
165
166 let fence = self.device.fence.read();
167
168 let timeouts_in_ms = [100, 200, 400, 800, 1600, 3200];
170
171 for (i, timeout_ms) in timeouts_in_ms.into_iter().enumerate() {
172 let is_last_iter = i == timeouts_in_ms.len() - 1;
173
174 api_log!(
175 "Waiting on last submission. try: {}/{}. timeout: {}ms",
176 i + 1,
177 timeouts_in_ms.len(),
178 timeout_ms
179 );
180
181 let wait_res = unsafe {
182 self.device.raw().wait(
183 fence.as_ref(),
184 last_successful_submission_index,
185 #[cfg(not(target_arch = "wasm32"))]
186 Some(core::time::Duration::from_millis(timeout_ms)),
187 #[cfg(target_arch = "wasm32")]
188 Some(core::time::Duration::ZERO), )
190 };
191 match wait_res {
193 Ok(true) => break,
194 Ok(false) => {
195 #[cfg(target_arch = "wasm32")]
202 {
203 break;
204 }
205 #[cfg(not(target_arch = "wasm32"))]
206 {
207 if is_last_iter {
208 panic!(
209 "We timed out while waiting on the last successful submission to complete!"
210 );
211 }
212 }
213 }
214 Err(e) => match e {
215 hal::DeviceError::OutOfMemory => {
216 if is_last_iter {
217 panic!(
218 "We ran into an OOM error while waiting on the last successful submission to complete!"
219 );
220 }
221 }
222 hal::DeviceError::Lost => {
223 self.device.handle_hal_error(e); break;
225 }
226 hal::DeviceError::Unexpected => {
227 panic!(
228 "We ran into an unexpected error while waiting on the last successful submission to complete!"
229 );
230 }
231 },
232 }
233 }
234 drop(fence);
235
236 let snatch_guard = self.device.snatchable_lock.read();
237 let (submission_closures, mapping_closures, blas_compact_ready_closures, queue_empty) =
238 self.maintain(last_successful_submission_index, &snatch_guard);
239 drop(snatch_guard);
240
241 assert!(queue_empty);
242
243 let closures = crate::device::UserClosures {
244 mappings: mapping_closures,
245 blas_compact_ready: blas_compact_ready_closures,
246 submissions: submission_closures,
247 device_lost_invocations: SmallVec::new(),
248 };
249
250 closures.fire();
251 }
252}
253
254#[cfg(send_sync)]
255pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + Send + 'static>;
256#[cfg(not(send_sync))]
257pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + 'static>;
258
259#[derive(Debug)]
270pub enum TempResource {
271 StagingBuffer(FlushedStagingBuffer),
272 ScratchBuffer(ScratchBuffer),
273 DestroyedBuffer(DestroyedBuffer),
274 DestroyedTexture(DestroyedTexture),
275}
276
277pub(crate) struct EncoderInFlight {
283 inner: crate::command::InnerCommandEncoder,
284 pub(crate) trackers: Tracker,
285 pub(crate) temp_resources: Vec<TempResource>,
286 _indirect_draw_validation_resources: crate::indirect_validation::DrawResources,
288
289 pub(crate) pending_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
291 pub(crate) pending_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
293 pub(crate) pending_blas_s: FastHashMap<TrackerIndex, Arc<Blas>>,
295}
296
297#[derive(Debug)]
318pub(crate) struct PendingWrites {
319 pub command_encoder: Box<dyn hal::DynCommandEncoder>,
321
322 pub is_recording: bool,
328
329 temp_resources: Vec<TempResource>,
330 dst_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
331 dst_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
332 copied_blas_s: FastHashMap<TrackerIndex, Arc<Blas>>,
333 instance_flags: wgt::InstanceFlags,
334}
335
336impl PendingWrites {
337 pub fn new(
338 command_encoder: Box<dyn hal::DynCommandEncoder>,
339 instance_flags: wgt::InstanceFlags,
340 ) -> Self {
341 Self {
342 command_encoder,
343 is_recording: false,
344 temp_resources: Vec::new(),
345 dst_buffers: FastHashMap::default(),
346 dst_textures: FastHashMap::default(),
347 copied_blas_s: FastHashMap::default(),
348 instance_flags,
349 }
350 }
351
352 pub fn insert_buffer(&mut self, buffer: &Arc<Buffer>) {
353 self.dst_buffers
354 .insert(buffer.tracker_index(), buffer.clone());
355 }
356
357 pub fn insert_texture(&mut self, texture: &Arc<Texture>) {
358 self.dst_textures
359 .insert(texture.tracker_index(), texture.clone());
360 }
361
362 pub fn insert_blas(&mut self, blas: &Arc<Blas>) {
363 self.copied_blas_s
364 .insert(blas.tracker_index(), blas.clone());
365 }
366
367 pub fn contains_buffer(&self, buffer: &Arc<Buffer>) -> bool {
368 self.dst_buffers.contains_key(&buffer.tracker_index())
369 }
370
371 pub fn contains_texture(&self, texture: &Arc<Texture>) -> bool {
372 self.dst_textures.contains_key(&texture.tracker_index())
373 }
374
375 pub fn consume_temp(&mut self, resource: TempResource) {
376 self.temp_resources.push(resource);
377 }
378
379 pub fn consume(&mut self, buffer: FlushedStagingBuffer) {
380 self.temp_resources
381 .push(TempResource::StagingBuffer(buffer));
382 }
383
384 fn pre_submit(
385 &mut self,
386 command_allocator: &CommandAllocator,
387 device: &Arc<Device>,
388 queue: &Queue,
389 ) -> Result<Option<EncoderInFlight>, DeviceError> {
390 if self.is_recording {
391 let pending_buffers = mem::take(&mut self.dst_buffers);
392 let pending_textures = mem::take(&mut self.dst_textures);
393 let pending_blas_s = mem::take(&mut self.copied_blas_s);
394
395 let cmd_buf = unsafe { self.command_encoder.end_encoding() }
396 .map_err(|e| device.handle_hal_error(e))?;
397 self.is_recording = false;
398
399 let new_encoder = command_allocator
400 .acquire_encoder(device.raw(), queue.raw())
401 .map_err(|e| device.handle_hal_error(e))?;
402
403 let encoder = EncoderInFlight {
404 inner: crate::command::InnerCommandEncoder {
405 raw: ManuallyDrop::new(mem::replace(&mut self.command_encoder, new_encoder)),
406 list: vec![cmd_buf],
407 device: device.clone(),
408 is_open: false,
409 api: crate::command::EncodingApi::InternalUse,
410 label: "(wgpu internal) PendingWrites command encoder".into(),
411 },
412 trackers: Tracker::new(device.ordered_buffer_usages, device.ordered_texture_usages),
413 temp_resources: mem::take(&mut self.temp_resources),
414 _indirect_draw_validation_resources: crate::indirect_validation::DrawResources::new(
415 device.clone(),
416 ),
417 pending_buffers,
418 pending_textures,
419 pending_blas_s,
420 };
421 Ok(Some(encoder))
422 } else {
423 self.dst_buffers.clear();
424 self.dst_textures.clear();
425 self.copied_blas_s.clear();
426 Ok(None)
427 }
428 }
429
430 pub fn activate(&mut self) -> &mut dyn hal::DynCommandEncoder {
431 if !self.is_recording {
432 unsafe {
433 self.command_encoder
434 .begin_encoding(hal_label(
435 Some("(wgpu internal) PendingWrites"),
436 self.instance_flags,
437 ))
438 .unwrap();
439 }
440 self.is_recording = true;
441 }
442 self.command_encoder.as_mut()
443 }
444}
445
446impl Drop for PendingWrites {
447 fn drop(&mut self) {
448 unsafe {
449 if self.is_recording {
450 self.command_encoder.discard_encoding();
451 }
452 }
453 }
454}
455
456#[derive(Clone, Debug, Error)]
457#[non_exhaustive]
458pub enum QueueWriteError {
459 #[error(transparent)]
460 Queue(#[from] DeviceError),
461 #[error(transparent)]
462 Transfer(#[from] TransferError),
463 #[error(transparent)]
464 MemoryInitFailure(#[from] ClearError),
465 #[error(transparent)]
466 DestroyedResource(#[from] DestroyedResourceError),
467 #[error(transparent)]
468 InvalidResource(#[from] InvalidResourceError),
469}
470
471impl WebGpuError for QueueWriteError {
472 fn webgpu_error_type(&self) -> ErrorType {
473 match self {
474 Self::Queue(e) => e.webgpu_error_type(),
475 Self::Transfer(e) => e.webgpu_error_type(),
476 Self::MemoryInitFailure(e) => e.webgpu_error_type(),
477 Self::DestroyedResource(e) => e.webgpu_error_type(),
478 Self::InvalidResource(e) => e.webgpu_error_type(),
479 }
480 }
481}
482
483#[derive(Clone, Debug, Error)]
484#[non_exhaustive]
485pub enum QueueSubmitError {
486 #[error(transparent)]
487 Queue(#[from] DeviceError),
488 #[error(transparent)]
489 DestroyedResource(#[from] DestroyedResourceError),
490 #[error(transparent)]
491 Unmap(#[from] BufferAccessError),
492 #[error("{0} is still mapped")]
493 BufferStillMapped(ResourceErrorIdent),
494 #[error(transparent)]
495 InvalidResource(#[from] InvalidResourceError),
496 #[error(transparent)]
497 CommandEncoder(#[from] CommandEncoderError),
498 #[error(transparent)]
499 ValidateAsActionsError(#[from] crate::ray_tracing::ValidateAsActionsError),
500}
501
502impl WebGpuError for QueueSubmitError {
503 fn webgpu_error_type(&self) -> ErrorType {
504 match self {
505 Self::Queue(e) => e.webgpu_error_type(),
506 Self::Unmap(e) => e.webgpu_error_type(),
507 Self::CommandEncoder(e) => e.webgpu_error_type(),
508 Self::ValidateAsActionsError(e) => e.webgpu_error_type(),
509 Self::InvalidResource(e) => e.webgpu_error_type(),
510 Self::DestroyedResource(_) | Self::BufferStillMapped(_) => ErrorType::Validation,
511 }
512 }
513}
514
515impl Queue {
518 pub fn write_buffer(
519 &self,
520 buffer: Arc<Buffer>,
521 buffer_offset: wgt::BufferAddress,
522 data: &[u8],
523 ) -> Result<(), QueueWriteError> {
524 profiling::scope!("Queue::write_buffer");
525 api_log!("Queue::write_buffer");
526
527 self.device.check_is_valid()?;
528
529 let data_size = data.len() as wgt::BufferAddress;
530
531 self.same_device_as(buffer.as_ref())?;
532
533 let data_size = if let Some(data_size) = wgt::BufferSize::new(data_size) {
534 data_size
535 } else {
536 log::trace!("Ignoring write_buffer of size 0");
539 return Ok(());
540 };
541
542 let mut staging_buffer = StagingBuffer::new(&self.device, data_size)?;
546
547 let staging_buffer = {
548 profiling::scope!("copy");
549 staging_buffer.write(data);
550 staging_buffer.flush()
551 };
552
553 let snatch_guard = self.device.snatchable_lock.read();
554 let mut pending_writes = self.pending_writes.lock();
555
556 let result = self.write_staging_buffer_impl(
557 &snatch_guard,
558 &mut pending_writes,
559 &staging_buffer,
560 buffer,
561 buffer_offset,
562 );
563
564 drop(snatch_guard);
565
566 pending_writes.consume(staging_buffer);
567
568 drop(pending_writes);
569
570 result
571 }
572
573 pub fn create_staging_buffer(
574 &self,
575 buffer_size: wgt::BufferSize,
576 ) -> Result<(StagingBuffer, NonNull<u8>), QueueWriteError> {
577 profiling::scope!("Queue::create_staging_buffer");
578 resource_log!("Queue::create_staging_buffer");
579
580 self.device.check_is_valid()?;
581
582 let staging_buffer = StagingBuffer::new(&self.device, buffer_size)?;
583 let ptr = unsafe { staging_buffer.ptr() };
584
585 Ok((staging_buffer, ptr))
586 }
587
588 pub fn write_staging_buffer(
589 &self,
590 buffer: Fallible<Buffer>,
591 buffer_offset: wgt::BufferAddress,
592 staging_buffer: StagingBuffer,
593 ) -> Result<(), QueueWriteError> {
594 profiling::scope!("Queue::write_staging_buffer");
595
596 self.device.check_is_valid()?;
597
598 let buffer = buffer.get()?;
599
600 let staging_buffer = staging_buffer.flush();
605
606 let snatch_guard = self.device.snatchable_lock.read();
607 let mut pending_writes = self.pending_writes.lock();
608
609 let result = self.write_staging_buffer_impl(
610 &snatch_guard,
611 &mut pending_writes,
612 &staging_buffer,
613 buffer,
614 buffer_offset,
615 );
616
617 drop(snatch_guard);
618
619 pending_writes.consume(staging_buffer);
620
621 drop(pending_writes);
622
623 result
624 }
625
626 pub fn validate_write_buffer(
627 &self,
628 buffer: Fallible<Buffer>,
629 buffer_offset: u64,
630 buffer_size: wgt::BufferSize,
631 ) -> Result<(), QueueWriteError> {
632 profiling::scope!("Queue::validate_write_buffer");
633
634 self.device.check_is_valid()?;
635
636 let buffer = buffer.get()?;
637
638 self.validate_write_buffer_impl(&buffer, buffer_offset, buffer_size)?;
639
640 Ok(())
641 }
642
643 fn validate_write_buffer_impl(
644 &self,
645 buffer: &Buffer,
646 buffer_offset: u64,
647 buffer_size: wgt::BufferSize,
648 ) -> Result<(), TransferError> {
649 if !matches!(&*buffer.map_state.lock(), BufferMapState::Idle) {
650 return Err(TransferError::BufferNotAvailable);
651 }
652 buffer.check_usage(wgt::BufferUsages::COPY_DST)?;
653 if !buffer_size.get().is_multiple_of(wgt::COPY_BUFFER_ALIGNMENT) {
654 return Err(TransferError::UnalignedCopySize(buffer_size.get()));
655 }
656 if !buffer_offset.is_multiple_of(wgt::COPY_BUFFER_ALIGNMENT) {
657 return Err(TransferError::UnalignedBufferOffset(buffer_offset));
658 }
659
660 if buffer_offset > buffer.size {
661 return Err(TransferError::BufferStartOffsetOverrun {
662 start_offset: buffer_offset,
663 buffer_size: buffer.size,
664 side: CopySide::Destination,
665 });
666 }
667 if buffer_size.get() > buffer.size - buffer_offset {
668 return Err(TransferError::BufferEndOffsetOverrun {
669 start_offset: buffer_offset,
670 size: buffer_size.get(),
671 buffer_size: buffer.size,
672 side: CopySide::Destination,
673 });
674 }
675
676 Ok(())
677 }
678
679 fn write_staging_buffer_impl(
680 &self,
681 snatch_guard: &SnatchGuard,
682 pending_writes: &mut PendingWrites,
683 staging_buffer: &FlushedStagingBuffer,
684 buffer: Arc<Buffer>,
685 buffer_offset: u64,
686 ) -> Result<(), QueueWriteError> {
687 self.device.check_is_valid()?;
688
689 let transition = {
690 let mut trackers = self.device.trackers.lock();
691 trackers
692 .buffers
693 .set_single(&buffer, wgt::BufferUses::COPY_DST)
694 };
695
696 let dst_raw = buffer.try_raw(snatch_guard)?;
697
698 self.same_device_as(buffer.as_ref())?;
699
700 self.validate_write_buffer_impl(&buffer, buffer_offset, staging_buffer.size)?;
701
702 let region = hal::BufferCopy {
703 src_offset: 0,
704 dst_offset: buffer_offset,
705 size: staging_buffer.size,
706 };
707 let barriers = iter::once(hal::BufferBarrier {
708 buffer: staging_buffer.raw(),
709 usage: hal::StateTransition {
710 from: wgt::BufferUses::MAP_WRITE,
711 to: wgt::BufferUses::COPY_SRC,
712 },
713 })
714 .chain(transition.map(|pending| pending.into_hal(&buffer, snatch_guard)))
715 .collect::<Vec<_>>();
716 let encoder = pending_writes.activate();
717 unsafe {
718 encoder.transition_buffers(&barriers);
719 encoder.copy_buffer_to_buffer(staging_buffer.raw(), dst_raw, &[region]);
720 }
721
722 pending_writes.insert_buffer(&buffer);
723
724 {
727 buffer
728 .initialization_status
729 .write()
730 .drain(buffer_offset..(buffer_offset + staging_buffer.size.get()));
731 }
732
733 Ok(())
734 }
735
736 pub fn write_texture(
737 &self,
738 destination: wgt::TexelCopyTextureInfo<Arc<Texture>>,
739 data: &[u8],
740 data_layout: &wgt::TexelCopyBufferLayout,
741 size: &wgt::Extent3d,
742 ) -> Result<(), QueueWriteError> {
743 profiling::scope!("Queue::write_texture");
744 api_log!("Queue::write_texture");
745
746 self.device.check_is_valid()?;
747
748 let dst = destination.texture;
749 let destination = wgt::TexelCopyTextureInfo {
750 texture: (),
751 mip_level: destination.mip_level,
752 origin: destination.origin,
753 aspect: destination.aspect,
754 };
755
756 self.same_device_as(dst.as_ref())?;
757
758 dst.check_usage(wgt::TextureUsages::COPY_DST)
759 .map_err(TransferError::MissingTextureUsage)?;
760
761 let (hal_copy_size, array_layer_count) =
764 validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, size)?;
765
766 let (selector, dst_base) = extract_texture_selector(&destination, size, &dst)?;
767
768 validate_texture_copy_dst_format(dst.desc.format, destination.aspect)?;
769
770 validate_texture_buffer_copy(
771 &destination,
772 dst_base.aspect,
773 &dst.desc,
774 data_layout,
775 false, )?;
777
778 let (required_bytes_in_copy, _source_bytes_per_array_layer, _) =
781 validate_linear_texture_data(
782 data_layout,
783 dst.desc.format,
784 destination.aspect,
785 data.len() as wgt::BufferAddress,
786 CopySide::Source,
787 size,
788 )?;
789
790 if dst.desc.format.is_depth_stencil_format() {
791 self.device
792 .require_downlevel_flags(wgt::DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES)
793 .map_err(TransferError::from)?;
794 }
795
796 let snatch_guard = self.device.snatchable_lock.read();
797
798 let dst_raw = dst.try_raw(&snatch_guard)?;
799
800 if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
803 log::trace!("Ignoring write_texture of size 0");
804 return Ok(());
805 }
806
807 let mut pending_writes = self.pending_writes.lock();
808 let encoder = pending_writes.activate();
809
810 let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
816 0..1
818 } else {
819 destination.origin.z..destination.origin.z + size.depth_or_array_layers
820 };
821 let mut dst_initialization_status = dst.initialization_status.write();
822 if dst_initialization_status.mips[destination.mip_level as usize]
823 .check(init_layer_range.clone())
824 .is_some()
825 {
826 if has_copy_partial_init_tracker_coverage(size, destination.mip_level, &dst.desc) {
827 for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
828 .drain(init_layer_range)
829 .collect::<Vec<core::ops::Range<u32>>>()
830 {
831 let mut trackers = self.device.trackers.lock();
832 crate::command::clear_texture(
833 &dst,
834 TextureInitRange {
835 mip_range: destination.mip_level..(destination.mip_level + 1),
836 layer_range,
837 },
838 encoder,
839 &mut trackers.textures,
840 &self.device.alignments,
841 self.device.zero_buffer.as_ref(),
842 &snatch_guard,
843 self.device.instance_flags,
844 )
845 .map_err(QueueWriteError::from)?;
846 }
847 } else {
848 dst_initialization_status.mips[destination.mip_level as usize]
849 .drain(init_layer_range);
850 }
851 }
852
853 let (block_width, block_height) = dst.desc.format.block_dimensions();
854 let width_in_blocks = size.width / block_width;
855 let height_in_blocks = size.height / block_height;
856
857 let block_size = dst
858 .desc
859 .format
860 .block_copy_size(Some(destination.aspect))
861 .unwrap();
862 let bytes_in_last_row = width_in_blocks * block_size;
863
864 let bytes_per_row = data_layout.bytes_per_row.unwrap_or(bytes_in_last_row);
865 let rows_per_image = data_layout.rows_per_image.unwrap_or(height_in_blocks);
866
867 let bytes_per_row_alignment = get_lowest_common_denom(
868 self.device.alignments.buffer_copy_pitch.get() as u32,
869 block_size,
870 );
871 let stage_bytes_per_row = wgt::math::align_to(bytes_in_last_row, bytes_per_row_alignment);
872
873 let staging_buffer = if stage_bytes_per_row == bytes_per_row {
877 profiling::scope!("copy aligned");
878 let stage_size = wgt::BufferSize::new(required_bytes_in_copy).unwrap();
880 let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
881 staging_buffer.write(&data[data_layout.offset as usize..]);
882 staging_buffer
883 } else {
884 profiling::scope!("copy chunked");
885 let block_rows_in_copy =
887 (size.depth_or_array_layers - 1) * rows_per_image + height_in_blocks;
888 let stage_size =
889 wgt::BufferSize::new(stage_bytes_per_row as u64 * block_rows_in_copy as u64)
890 .unwrap();
891 let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
892 for layer in 0..size.depth_or_array_layers {
893 let rows_offset = layer * rows_per_image;
894 for row in rows_offset..rows_offset + height_in_blocks {
895 let src_offset = data_layout.offset as u32 + row * bytes_per_row;
896 let dst_offset = row * stage_bytes_per_row;
897 unsafe {
898 staging_buffer.write_with_offset(
899 data,
900 src_offset as isize,
901 dst_offset as isize,
902 bytes_in_last_row as usize,
903 )
904 }
905 }
906 }
907 staging_buffer
908 };
909
910 let staging_buffer = staging_buffer.flush();
911
912 let regions = (0..array_layer_count)
913 .map(|array_layer_offset| {
914 let mut texture_base = dst_base.clone();
915 texture_base.array_layer += array_layer_offset;
916 hal::BufferTextureCopy {
917 buffer_layout: wgt::TexelCopyBufferLayout {
918 offset: array_layer_offset as u64
919 * rows_per_image as u64
920 * stage_bytes_per_row as u64,
921 bytes_per_row: Some(stage_bytes_per_row),
922 rows_per_image: Some(rows_per_image),
923 },
924 texture_base,
925 size: hal_copy_size,
926 }
927 })
928 .collect::<Vec<_>>();
929
930 {
931 let buffer_barrier = hal::BufferBarrier {
932 buffer: staging_buffer.raw(),
933 usage: hal::StateTransition {
934 from: wgt::BufferUses::MAP_WRITE,
935 to: wgt::BufferUses::COPY_SRC,
936 },
937 };
938
939 let mut trackers = self.device.trackers.lock();
940 let transition =
941 trackers
942 .textures
943 .set_single(&dst, selector, wgt::TextureUses::COPY_DST);
944 let texture_barriers = transition
945 .map(|pending| pending.into_hal(dst_raw))
946 .collect::<Vec<_>>();
947
948 unsafe {
949 encoder.transition_textures(&texture_barriers);
950 encoder.transition_buffers(&[buffer_barrier]);
951 encoder.copy_buffer_to_texture(staging_buffer.raw(), dst_raw, ®ions);
952 }
953 }
954
955 pending_writes.consume(staging_buffer);
956 pending_writes.insert_texture(&dst);
957
958 Ok(())
959 }
960
961 #[cfg(webgl)]
962 pub fn copy_external_image_to_texture(
963 &self,
964 source: &wgt::CopyExternalImageSourceInfo,
965 destination: wgt::CopyExternalImageDestInfo<Fallible<Texture>>,
966 size: wgt::Extent3d,
967 ) -> Result<(), QueueWriteError> {
968 use crate::conv;
969
970 profiling::scope!("Queue::copy_external_image_to_texture");
971
972 self.device.check_is_valid()?;
973
974 let mut needs_flag = false;
975 needs_flag |= matches!(source.source, wgt::ExternalImageSource::OffscreenCanvas(_));
976 needs_flag |= source.origin != wgt::Origin2d::ZERO;
977 needs_flag |= destination.color_space != wgt::PredefinedColorSpace::Srgb;
978 #[allow(clippy::bool_comparison)]
979 if matches!(source.source, wgt::ExternalImageSource::ImageBitmap(_)) {
980 needs_flag |= source.flip_y != false;
981 needs_flag |= destination.premultiplied_alpha != false;
982 }
983
984 if needs_flag {
985 self.device
986 .require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_EXTERNAL_TEXTURE_COPIES)
987 .map_err(TransferError::from)?;
988 }
989
990 let src_width = source.source.width();
991 let src_height = source.source.height();
992
993 let dst = destination.texture.get()?;
994 let premultiplied_alpha = destination.premultiplied_alpha;
995 let destination = wgt::TexelCopyTextureInfo {
996 texture: (),
997 mip_level: destination.mip_level,
998 origin: destination.origin,
999 aspect: destination.aspect,
1000 };
1001
1002 if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) {
1003 return Err(
1004 TransferError::ExternalCopyToForbiddenTextureFormat(dst.desc.format).into(),
1005 );
1006 }
1007 if dst.desc.dimension != wgt::TextureDimension::D2 {
1008 return Err(TransferError::InvalidDimensionExternal.into());
1009 }
1010 dst.check_usage(wgt::TextureUsages::COPY_DST | wgt::TextureUsages::RENDER_ATTACHMENT)
1011 .map_err(TransferError::MissingTextureUsage)?;
1012 if dst.desc.sample_count != 1 {
1013 return Err(TransferError::InvalidSampleCount {
1014 sample_count: dst.desc.sample_count,
1015 }
1016 .into());
1017 }
1018
1019 if source.origin.x + size.width > src_width {
1020 return Err(TransferError::TextureOverrun {
1021 start_offset: source.origin.x,
1022 end_offset: source.origin.x + size.width,
1023 texture_size: src_width,
1024 dimension: crate::resource::TextureErrorDimension::X,
1025 side: CopySide::Source,
1026 }
1027 .into());
1028 }
1029 if source.origin.y + size.height > src_height {
1030 return Err(TransferError::TextureOverrun {
1031 start_offset: source.origin.y,
1032 end_offset: source.origin.y + size.height,
1033 texture_size: src_height,
1034 dimension: crate::resource::TextureErrorDimension::Y,
1035 side: CopySide::Source,
1036 }
1037 .into());
1038 }
1039 if size.depth_or_array_layers != 1 {
1040 return Err(TransferError::TextureOverrun {
1041 start_offset: 0,
1042 end_offset: size.depth_or_array_layers,
1043 texture_size: 1,
1044 dimension: crate::resource::TextureErrorDimension::Z,
1045 side: CopySide::Source,
1046 }
1047 .into());
1048 }
1049
1050 let (hal_copy_size, _) =
1053 validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, &size)?;
1054
1055 let (selector, dst_base) = extract_texture_selector(&destination, &size, &dst)?;
1056
1057 if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
1060 log::trace!("Ignoring copy_external_image_to_texture of size 0");
1061 return Ok(());
1062 }
1063
1064 let mut pending_writes = self.pending_writes.lock();
1065 let encoder = pending_writes.activate();
1066
1067 let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
1073 0..1
1075 } else {
1076 destination.origin.z..destination.origin.z + size.depth_or_array_layers
1077 };
1078 let mut dst_initialization_status = dst.initialization_status.write();
1079 if dst_initialization_status.mips[destination.mip_level as usize]
1080 .check(init_layer_range.clone())
1081 .is_some()
1082 {
1083 if has_copy_partial_init_tracker_coverage(&size, destination.mip_level, &dst.desc) {
1084 for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
1085 .drain(init_layer_range)
1086 .collect::<Vec<core::ops::Range<u32>>>()
1087 {
1088 let mut trackers = self.device.trackers.lock();
1089 crate::command::clear_texture(
1090 &dst,
1091 TextureInitRange {
1092 mip_range: destination.mip_level..(destination.mip_level + 1),
1093 layer_range,
1094 },
1095 encoder,
1096 &mut trackers.textures,
1097 &self.device.alignments,
1098 self.device.zero_buffer.as_ref(),
1099 &self.device.snatchable_lock.read(),
1100 self.device.instance_flags,
1101 )
1102 .map_err(QueueWriteError::from)?;
1103 }
1104 } else {
1105 dst_initialization_status.mips[destination.mip_level as usize]
1106 .drain(init_layer_range);
1107 }
1108 }
1109
1110 let snatch_guard = self.device.snatchable_lock.read();
1111 let dst_raw = dst.try_raw(&snatch_guard)?;
1112
1113 let regions = hal::TextureCopy {
1114 src_base: hal::TextureCopyBase {
1115 mip_level: 0,
1116 array_layer: 0,
1117 origin: source.origin.to_3d(0),
1118 aspect: hal::FormatAspects::COLOR,
1119 },
1120 dst_base,
1121 size: hal_copy_size,
1122 };
1123
1124 let mut trackers = self.device.trackers.lock();
1125 let transitions = trackers
1126 .textures
1127 .set_single(&dst, selector, wgt::TextureUses::COPY_DST);
1128
1129 let encoder_webgl = encoder
1132 .as_any_mut()
1133 .downcast_mut::<hal::gles::CommandEncoder>()
1134 .unwrap();
1135 let dst_raw_webgl = dst_raw
1136 .as_any()
1137 .downcast_ref::<hal::gles::Texture>()
1138 .unwrap();
1139 let transitions_webgl = transitions.map(|pending| {
1140 let dyn_transition = pending.into_hal(dst_raw);
1141 hal::TextureBarrier {
1142 texture: dst_raw_webgl,
1143 range: dyn_transition.range,
1144 usage: dyn_transition.usage,
1145 }
1146 });
1147
1148 use hal::CommandEncoder as _;
1149 unsafe {
1150 encoder_webgl.transition_textures(transitions_webgl);
1151 encoder_webgl.copy_external_image_to_texture(
1152 source,
1153 dst_raw_webgl,
1154 premultiplied_alpha,
1155 iter::once(regions),
1156 );
1157 }
1158
1159 Ok(())
1160 }
1161
1162 #[cfg(feature = "trace")]
1163 fn trace_submission(
1164 &self,
1165 submit_index: SubmissionIndex,
1166 commands: Vec<crate::command::Command<crate::command::PointerReferences>>,
1167 ) {
1168 if let Some(ref mut trace) = *self.device.trace.lock() {
1169 trace.add(Action::Submit(submit_index, commands));
1170 }
1171 }
1172
1173 #[cfg(feature = "trace")]
1174 fn trace_failed_submission(
1175 &self,
1176 submit_index: SubmissionIndex,
1177 commands: Option<Vec<crate::command::Command<crate::command::PointerReferences>>>,
1178 error: alloc::string::String,
1179 ) {
1180 if let Some(ref mut trace) = *self.device.trace.lock() {
1181 trace.add(Action::FailedCommands {
1182 commands,
1183 failed_at_submit: Some(submit_index),
1184 error,
1185 });
1186 }
1187 }
1188
1189 pub fn submit(
1190 &self,
1191 command_buffers: &[Arc<CommandBuffer>],
1192 ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1193 profiling::scope!("Queue::submit");
1194 api_log!("Queue::submit");
1195
1196 let submit_index;
1197
1198 let res = 'error: {
1199 let snatch_guard = self.device.snatchable_lock.read();
1200
1201 let mut fence = self.device.fence.write();
1203
1204 let mut command_index_guard = self.device.command_indices.write();
1205 command_index_guard.active_submission_index += 1;
1206 submit_index = command_index_guard.active_submission_index;
1207
1208 if let Err(e) = self.device.check_is_valid() {
1209 break 'error Err(e.into());
1210 }
1211
1212 let mut active_executions = Vec::new();
1213
1214 let mut used_surface_textures = track::TextureUsageScope::default();
1215
1216 let mut submit_surface_textures_owned = FastHashMap::default();
1219
1220 {
1221 if !command_buffers.is_empty() {
1222 profiling::scope!("prepare");
1223
1224 let mut first_error = None;
1225
1226 for command_buffer in command_buffers {
1232 profiling::scope!("process command buffer");
1233
1234 used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1237
1238 #[allow(unused_mut)]
1241 let mut cmd_buf_data = command_buffer.take_finished();
1242
1243 if first_error.is_some() {
1244 continue;
1245 }
1246
1247 #[cfg(feature = "trace")]
1248 let trace_commands = cmd_buf_data
1249 .as_mut()
1250 .ok()
1251 .and_then(|data| mem::take(&mut data.trace_commands));
1252
1253 let mut baked = match cmd_buf_data {
1254 Ok(cmd_buf_data) => {
1255 let res = validate_command_buffer(
1256 command_buffer,
1257 self,
1258 &cmd_buf_data,
1259 &snatch_guard,
1260 &mut submit_surface_textures_owned,
1261 &mut used_surface_textures,
1262 &mut command_index_guard,
1263 );
1264 if let Err(err) = res {
1265 #[cfg(feature = "trace")]
1266 self.trace_failed_submission(
1267 submit_index,
1268 trace_commands,
1269 err.to_string(),
1270 );
1271 first_error.get_or_insert(err);
1272 continue;
1273 }
1274
1275 #[cfg(feature = "trace")]
1276 if let Some(commands) = trace_commands {
1277 self.trace_submission(submit_index, commands);
1278 }
1279
1280 cmd_buf_data.set_acceleration_structure_dependencies(&snatch_guard);
1281 cmd_buf_data.into_baked_commands()
1282 }
1283 Err(err) => {
1284 #[cfg(feature = "trace")]
1285 self.trace_failed_submission(
1286 submit_index,
1287 trace_commands,
1288 err.to_string(),
1289 );
1290 first_error.get_or_insert(err.into());
1291 continue;
1292 }
1293 };
1294
1295 if let Err(e) = baked.encoder.open_pass(hal_label(
1297 Some("(wgpu internal) Transit"),
1298 self.device.instance_flags,
1299 )) {
1300 break 'error Err(e.into());
1301 }
1302
1303 let mut trackers = self.device.trackers.lock();
1305 if let Err(e) = baked.initialize_buffer_memory(&mut trackers, &snatch_guard)
1306 {
1307 break 'error Err(e.into());
1308 }
1309 if let Err(e) = baked.initialize_texture_memory(
1310 &mut trackers,
1311 &self.device,
1312 &snatch_guard,
1313 ) {
1314 break 'error Err(e.into());
1315 }
1316
1317 CommandEncoder::insert_barriers_from_device_tracker(
1320 baked.encoder.raw.as_mut(),
1321 &mut trackers,
1322 &baked.trackers,
1323 &snatch_guard,
1324 );
1325
1326 if let Err(e) = baked.encoder.close_and_push_front() {
1327 break 'error Err(e.into());
1328 }
1329
1330 if !used_surface_textures.is_empty() {
1334 if let Err(e) = baked.encoder.open_pass(hal_label(
1335 Some("(wgpu internal) Present"),
1336 self.device.instance_flags,
1337 )) {
1338 break 'error Err(e.into());
1339 }
1340 let texture_barriers = trackers
1341 .textures
1342 .set_from_usage_scope_and_drain_transitions(
1343 &used_surface_textures,
1344 &snatch_guard,
1345 )
1346 .collect::<Vec<_>>();
1347 unsafe {
1348 baked.encoder.raw.transition_textures(&texture_barriers);
1349 };
1350 if let Err(e) = baked.encoder.close() {
1351 break 'error Err(e.into());
1352 }
1353 used_surface_textures = track::TextureUsageScope::default();
1354 }
1355
1356 active_executions.push(EncoderInFlight {
1358 inner: baked.encoder,
1359 trackers: baked.trackers,
1360 temp_resources: baked.temp_resources,
1361 _indirect_draw_validation_resources: baked
1362 .indirect_draw_validation_resources,
1363 pending_buffers: FastHashMap::default(),
1364 pending_textures: FastHashMap::default(),
1365 pending_blas_s: FastHashMap::default(),
1366 });
1367 }
1368
1369 if let Some(first_error) = first_error {
1370 break 'error Err(first_error);
1371 }
1372 }
1373 }
1374
1375 let mut pending_writes = self.pending_writes.lock();
1376
1377 {
1378 used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1379 for texture in pending_writes.dst_textures.values() {
1380 match texture.try_inner(&snatch_guard) {
1381 Ok(TextureInner::Native { .. }) => {}
1382 Ok(TextureInner::Surface { .. }) => {
1383 submit_surface_textures_owned
1385 .insert(Arc::as_ptr(texture), texture.clone());
1386
1387 unsafe {
1388 used_surface_textures
1389 .merge_single(texture, None, wgt::TextureUses::PRESENT)
1390 .unwrap()
1391 };
1392 }
1393 Err(DestroyedResourceError(_)) => {}
1398 }
1399 }
1400
1401 if !used_surface_textures.is_empty() {
1402 let mut trackers = self.device.trackers.lock();
1403
1404 let texture_barriers = trackers
1405 .textures
1406 .set_from_usage_scope_and_drain_transitions(
1407 &used_surface_textures,
1408 &snatch_guard,
1409 )
1410 .collect::<Vec<_>>();
1411 unsafe {
1412 pending_writes
1413 .command_encoder
1414 .transition_textures(&texture_barriers);
1415 };
1416 }
1417 }
1418
1419 match pending_writes.pre_submit(&self.device.command_allocator, &self.device, self) {
1420 Ok(Some(pending_execution)) => {
1421 active_executions.insert(0, pending_execution);
1422 }
1423 Ok(None) => {}
1424 Err(e) => break 'error Err(e.into()),
1425 }
1426 let hal_command_buffers = active_executions
1427 .iter()
1428 .flat_map(|e| e.inner.list.iter().map(|b| b.as_ref()))
1429 .collect::<Vec<_>>();
1430
1431 {
1432 let mut submit_surface_textures =
1433 SmallVec::<[&dyn hal::DynSurfaceTexture; 2]>::with_capacity(
1434 submit_surface_textures_owned.len(),
1435 );
1436
1437 for texture in submit_surface_textures_owned.values() {
1438 let raw = match texture.inner.get(&snatch_guard) {
1439 Some(TextureInner::Surface { raw, .. }) => raw.as_ref(),
1440 _ => unreachable!(),
1441 };
1442 submit_surface_textures.push(raw);
1443 }
1444
1445 if let Err(e) = unsafe {
1446 self.raw().submit(
1447 &hal_command_buffers,
1448 &submit_surface_textures,
1449 (fence.as_mut(), submit_index),
1450 )
1451 }
1452 .map_err(|e| self.device.handle_hal_error(e))
1453 {
1454 break 'error Err(e.into());
1455 }
1456
1457 drop(command_index_guard);
1458
1459 self.device
1461 .last_successful_submission_index
1462 .fetch_max(submit_index, Ordering::SeqCst);
1463 }
1464
1465 profiling::scope!("cleanup");
1466
1467 self.lock_life()
1469 .track_submission(submit_index, active_executions);
1470 drop(pending_writes);
1471
1472 let fence_guard = RwLockWriteGuard::downgrade(fence);
1475 let (closures, result) =
1476 self.device
1477 .maintain(fence_guard, wgt::PollType::Poll, snatch_guard);
1478 match result {
1479 Ok(status) => {
1480 debug_assert!(matches!(
1481 status,
1482 wgt::PollStatus::QueueEmpty | wgt::PollStatus::Poll
1483 ));
1484 }
1485 Err(WaitIdleError::Device(err)) => break 'error Err(QueueSubmitError::Queue(err)),
1486 Err(WaitIdleError::WrongSubmissionIndex(..)) => {
1487 unreachable!("Cannot get WrongSubmissionIndex from Poll")
1488 }
1489 Err(WaitIdleError::Timeout) => unreachable!("Cannot get Timeout from Poll"),
1490 };
1491
1492 Ok(closures)
1493 };
1494
1495 let callbacks = match res {
1496 Ok(ok) => ok,
1497 Err(e) => return Err((submit_index, e)),
1498 };
1499
1500 callbacks.fire();
1502
1503 self.device.lose_if_oom();
1504
1505 api_log!("Queue::submit returned submit index {submit_index}");
1506
1507 Ok(submit_index)
1508 }
1509
1510 pub fn get_timestamp_period(&self) -> f32 {
1511 unsafe { self.raw().get_timestamp_period() }
1512 }
1513
1514 pub fn on_submitted_work_done(
1516 &self,
1517 closure: SubmittedWorkDoneClosure,
1518 ) -> Option<SubmissionIndex> {
1519 api_log!("Queue::on_submitted_work_done");
1520 self.lock_life().add_work_done_closure(closure)
1522 }
1523
1524 pub fn compact_blas(&self, blas: &Arc<Blas>) -> Result<Arc<Blas>, CompactBlasError> {
1525 profiling::scope!("Queue::compact_blas");
1526 api_log!("Queue::compact_blas");
1527
1528 let new_label = blas.label.clone() + " (compacted)";
1529
1530 self.device.check_is_valid()?;
1531 self.same_device_as(blas.as_ref())?;
1532
1533 let device = blas.device.clone();
1534
1535 let snatch_guard = device.snatchable_lock.read();
1536
1537 let BlasCompactState::Ready { size } = *blas.compacted_state.lock() else {
1538 return Err(CompactBlasError::BlasNotReady);
1539 };
1540
1541 let mut size_info = blas.size_info;
1542 size_info.acceleration_structure_size = size;
1543
1544 let mut pending_writes = self.pending_writes.lock();
1545 let cmd_buf_raw = pending_writes.activate();
1546
1547 let raw = unsafe {
1548 device
1549 .raw()
1550 .create_acceleration_structure(&hal::AccelerationStructureDescriptor {
1551 label: hal_label(Some(&new_label), device.instance_flags),
1552 size: size_info.acceleration_structure_size,
1553 format: hal::AccelerationStructureFormat::BottomLevel,
1554 allow_compaction: false,
1555 })
1556 }
1557 .map_err(DeviceError::from_hal)?;
1558
1559 let src_raw = blas.try_raw(&snatch_guard)?;
1560
1561 unsafe {
1562 cmd_buf_raw.copy_acceleration_structure_to_acceleration_structure(
1563 src_raw,
1564 raw.as_ref(),
1565 wgt::AccelerationStructureCopy::Compact,
1566 )
1567 };
1568
1569 let handle = unsafe {
1570 device
1571 .raw()
1572 .get_acceleration_structure_device_address(raw.as_ref())
1573 };
1574
1575 drop(snatch_guard);
1576
1577 let mut command_indices_lock = device.command_indices.write();
1578 command_indices_lock.next_acceleration_structure_build_command_index += 1;
1579 let built_index =
1580 NonZeroU64::new(command_indices_lock.next_acceleration_structure_build_command_index)
1581 .unwrap();
1582
1583 let new_blas = Arc::new(Blas {
1584 raw: Snatchable::new(raw),
1585 device: device.clone(),
1586 size_info,
1587 sizes: blas.sizes.clone(),
1588 flags: blas.flags & !AccelerationStructureFlags::ALLOW_COMPACTION,
1589 update_mode: blas.update_mode,
1590 built_index: RwLock::new(rank::BLAS_BUILT_INDEX, Some(built_index)),
1592 handle,
1593 label: new_label,
1594 tracking_data: TrackingData::new(blas.device.tracker_indices.blas_s.clone()),
1595 compaction_buffer: None,
1596 compacted_state: Mutex::new(rank::BLAS_COMPACTION_STATE, BlasCompactState::Compacted),
1597 });
1598
1599 pending_writes.insert_blas(blas);
1600 pending_writes.insert_blas(&new_blas);
1601
1602 Ok(new_blas)
1603 }
1604}
1605
1606impl Global {
1607 pub fn queue_write_buffer(
1608 &self,
1609 queue_id: QueueId,
1610 buffer_id: id::BufferId,
1611 buffer_offset: wgt::BufferAddress,
1612 data: &[u8],
1613 ) -> Result<(), QueueWriteError> {
1614 let queue = self.hub.queues.get(queue_id);
1615 let buffer = self.hub.buffers.get(buffer_id).get()?;
1616
1617 #[cfg(feature = "trace")]
1618 if let Some(ref mut trace) = *queue.device.trace.lock() {
1619 use crate::device::trace::DataKind;
1620 let size = data.len() as u64;
1621 let data = trace.make_binary(DataKind::Bin, data);
1622 trace.add(Action::WriteBuffer {
1623 id: buffer.to_trace(),
1624 data,
1625 offset: buffer_offset,
1626 size,
1627 queued: true,
1628 });
1629 }
1630
1631 queue.write_buffer(buffer, buffer_offset, data)
1632 }
1633
1634 pub fn queue_create_staging_buffer(
1635 &self,
1636 queue_id: QueueId,
1637 buffer_size: wgt::BufferSize,
1638 id_in: Option<id::StagingBufferId>,
1639 ) -> Result<(id::StagingBufferId, NonNull<u8>), QueueWriteError> {
1640 let queue = self.hub.queues.get(queue_id);
1641 let (staging_buffer, ptr) = queue.create_staging_buffer(buffer_size)?;
1642
1643 let fid = self.hub.staging_buffers.prepare(id_in);
1644 let id = fid.assign(staging_buffer);
1645
1646 Ok((id, ptr))
1647 }
1648
1649 pub fn queue_write_staging_buffer(
1650 &self,
1651 queue_id: QueueId,
1652 buffer_id: id::BufferId,
1653 buffer_offset: wgt::BufferAddress,
1654 staging_buffer_id: id::StagingBufferId,
1655 ) -> Result<(), QueueWriteError> {
1656 let queue = self.hub.queues.get(queue_id);
1657 let buffer = self.hub.buffers.get(buffer_id);
1658 let staging_buffer = self.hub.staging_buffers.remove(staging_buffer_id);
1659 queue.write_staging_buffer(buffer, buffer_offset, staging_buffer)
1660 }
1661
1662 pub fn queue_validate_write_buffer(
1663 &self,
1664 queue_id: QueueId,
1665 buffer_id: id::BufferId,
1666 buffer_offset: u64,
1667 buffer_size: wgt::BufferSize,
1668 ) -> Result<(), QueueWriteError> {
1669 let queue = self.hub.queues.get(queue_id);
1670 let buffer = self.hub.buffers.get(buffer_id);
1671 queue.validate_write_buffer(buffer, buffer_offset, buffer_size)
1672 }
1673
1674 pub fn queue_write_texture(
1675 &self,
1676 queue_id: QueueId,
1677 destination: &wgt::TexelCopyTextureInfo<id::TextureId>,
1678 data: &[u8],
1679 data_layout: &wgt::TexelCopyBufferLayout,
1680 size: &wgt::Extent3d,
1681 ) -> Result<(), QueueWriteError> {
1682 let queue = self.hub.queues.get(queue_id);
1683 let texture = self.hub.textures.get(destination.texture).get()?;
1684 let destination = wgt::TexelCopyTextureInfo {
1685 texture,
1686 mip_level: destination.mip_level,
1687 origin: destination.origin,
1688 aspect: destination.aspect,
1689 };
1690
1691 #[cfg(feature = "trace")]
1692 if let Some(ref mut trace) = *queue.device.trace.lock() {
1693 use crate::device::trace::DataKind;
1694 let data = trace.make_binary(DataKind::Bin, data);
1695 trace.add(Action::WriteTexture {
1696 to: destination.to_trace(),
1697 data,
1698 layout: *data_layout,
1699 size: *size,
1700 });
1701 }
1702
1703 queue.write_texture(destination, data, data_layout, size)
1704 }
1705
1706 #[cfg(webgl)]
1707 pub fn queue_copy_external_image_to_texture(
1708 &self,
1709 queue_id: QueueId,
1710 source: &wgt::CopyExternalImageSourceInfo,
1711 destination: crate::command::CopyExternalImageDestInfo,
1712 size: wgt::Extent3d,
1713 ) -> Result<(), QueueWriteError> {
1714 let queue = self.hub.queues.get(queue_id);
1715 let destination = wgt::CopyExternalImageDestInfo {
1716 texture: self.hub.textures.get(destination.texture),
1717 mip_level: destination.mip_level,
1718 origin: destination.origin,
1719 aspect: destination.aspect,
1720 color_space: destination.color_space,
1721 premultiplied_alpha: destination.premultiplied_alpha,
1722 };
1723 queue.copy_external_image_to_texture(source, destination, size)
1724 }
1725
1726 pub fn queue_submit(
1727 &self,
1728 queue_id: QueueId,
1729 command_buffer_ids: &[id::CommandBufferId],
1730 ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1731 let queue = self.hub.queues.get(queue_id);
1732 let command_buffer_guard = self.hub.command_buffers.read();
1733 let command_buffers = command_buffer_ids
1734 .iter()
1735 .map(|id| command_buffer_guard.get(*id))
1736 .collect::<Vec<_>>();
1737 drop(command_buffer_guard);
1738 queue.submit(&command_buffers)
1739 }
1740
1741 pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> f32 {
1742 let queue = self.hub.queues.get(queue_id);
1743
1744 if queue.device.timestamp_normalizer.get().unwrap().enabled() {
1745 return 1.0;
1746 }
1747
1748 queue.get_timestamp_period()
1749 }
1750
1751 pub fn queue_on_submitted_work_done(
1752 &self,
1753 queue_id: QueueId,
1754 closure: SubmittedWorkDoneClosure,
1755 ) -> SubmissionIndex {
1756 api_log!("Queue::on_submitted_work_done {queue_id:?}");
1757
1758 let queue = self.hub.queues.get(queue_id);
1760 let result = queue.on_submitted_work_done(closure);
1761 result.unwrap_or(0) }
1763
1764 pub fn queue_compact_blas(
1765 &self,
1766 queue_id: QueueId,
1767 blas_id: BlasId,
1768 id_in: Option<BlasId>,
1769 ) -> (BlasId, Option<u64>, Option<CompactBlasError>) {
1770 api_log!("Queue::compact_blas {queue_id:?}, {blas_id:?}");
1771
1772 let fid = self.hub.blas_s.prepare(id_in);
1773
1774 let queue = self.hub.queues.get(queue_id);
1775 let blas = self.hub.blas_s.get(blas_id);
1776 let device = &queue.device;
1777
1778 let error = 'error: {
1781 match device.require_features(wgpu_types::Features::EXPERIMENTAL_RAY_QUERY) {
1782 Ok(_) => {}
1783 Err(err) => break 'error err.into(),
1784 }
1785
1786 let blas = match blas.get() {
1787 Ok(blas) => blas,
1788 Err(err) => break 'error err.into(),
1789 };
1790
1791 let new_blas = match queue.compact_blas(&blas) {
1792 Ok(blas) => blas,
1793 Err(err) => break 'error err,
1794 };
1795
1796 let old_blas_size = blas.size_info.acceleration_structure_size;
1798 let new_blas_size = new_blas.size_info.acceleration_structure_size;
1799 let handle = new_blas.handle;
1800
1801 let id = fid.assign(Fallible::Valid(new_blas));
1802
1803 api_log!("CommandEncoder::compact_blas {blas_id:?} (size: {old_blas_size}) -> {id:?} (size: {new_blas_size})");
1804
1805 return (id, Some(handle), None);
1806 };
1807
1808 let id = fid.assign(Fallible::Invalid(Arc::new(error.to_string())));
1809
1810 (id, None, Some(error))
1811 }
1812}
1813
1814fn validate_command_buffer(
1815 command_buffer: &CommandBuffer,
1816 queue: &Queue,
1817 cmd_buf_data: &crate::command::CommandBufferMutable,
1818 snatch_guard: &SnatchGuard,
1819 submit_surface_textures_owned: &mut FastHashMap<*const Texture, Arc<Texture>>,
1820 used_surface_textures: &mut track::TextureUsageScope,
1821 command_index_guard: &mut RwLockWriteGuard<CommandIndices>,
1822) -> Result<(), QueueSubmitError> {
1823 command_buffer.same_device_as(queue)?;
1824
1825 {
1826 profiling::scope!("check resource state");
1827
1828 {
1829 profiling::scope!("buffers");
1830 for buffer in cmd_buf_data.trackers.buffers.used_resources() {
1831 buffer.check_destroyed(snatch_guard)?;
1832
1833 match *buffer.map_state.lock() {
1834 BufferMapState::Idle => (),
1835 _ => return Err(QueueSubmitError::BufferStillMapped(buffer.error_ident())),
1836 }
1837 }
1838 }
1839 {
1840 profiling::scope!("textures");
1841 for texture in cmd_buf_data.trackers.textures.used_resources() {
1842 let should_extend = match texture.try_inner(snatch_guard)? {
1843 TextureInner::Native { .. } => false,
1844 TextureInner::Surface { .. } => {
1845 submit_surface_textures_owned.insert(Arc::as_ptr(texture), texture.clone());
1847
1848 true
1849 }
1850 };
1851 if should_extend {
1852 unsafe {
1853 used_surface_textures
1854 .merge_single(texture, None, wgt::TextureUses::PRESENT)
1855 .unwrap();
1856 };
1857 }
1858 }
1859 }
1860 {
1864 profiling::scope!("bind groups");
1865 for bind_group in &cmd_buf_data.trackers.bind_groups {
1866 bind_group.try_raw(snatch_guard)?;
1868 }
1869 }
1870
1871 if let Err(e) =
1872 cmd_buf_data.validate_acceleration_structure_actions(snatch_guard, command_index_guard)
1873 {
1874 return Err(e.into());
1875 }
1876 }
1877 Ok(())
1878}