wgpu/api/device.rs
1use alloc::{boxed::Box, string::String, sync::Arc};
2use core::{error, fmt, future::Future};
3
4use parking_lot::Mutex;
5
6use crate::api::blas::{Blas, BlasGeometrySizeDescriptors, CreateBlasDescriptor};
7use crate::api::tlas::{CreateTlasDescriptor, Tlas};
8use crate::*;
9
10/// Open connection to a graphics and/or compute device.
11///
12/// Responsible for the creation of most rendering and compute resources.
13/// These are then used in commands, which are submitted to a [`Queue`].
14///
15/// A device may be requested from an adapter with [`Adapter::request_device`].
16///
17/// Corresponds to [WebGPU `GPUDevice`](https://gpuweb.github.io/gpuweb/#gpu-device).
18#[derive(Debug, Clone)]
19pub struct Device {
20 pub(crate) inner: dispatch::DispatchDevice,
21}
22#[cfg(send_sync)]
23static_assertions::assert_impl_all!(Device: Send, Sync);
24
25crate::cmp::impl_eq_ord_hash_proxy!(Device => .inner);
26
27/// Describes a [`Device`].
28///
29/// For use with [`Adapter::request_device`].
30///
31/// Corresponds to [WebGPU `GPUDeviceDescriptor`](
32/// https://gpuweb.github.io/gpuweb/#dictdef-gpudevicedescriptor).
33pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
34static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync);
35
36impl Device {
37 #[cfg(custom)]
38 /// Creates Device from custom implementation
39 pub fn from_custom<T: custom::DeviceInterface>(device: T) -> Self {
40 Self {
41 inner: dispatch::DispatchDevice::custom(device),
42 }
43 }
44
45 /// Constructs a stub device for testing using [`Backend::Noop`].
46 ///
47 /// This is a convenience function which avoids the configuration, `async`, and fallibility
48 /// aspects of constructing a device through `Instance`.
49 #[cfg(feature = "noop")]
50 pub fn noop(desc: &DeviceDescriptor<'_>) -> (Device, Queue) {
51 use core::future::Future as _;
52 use core::pin::pin;
53 use core::task;
54 let ctx = &mut task::Context::from_waker(waker::noop_waker_ref());
55
56 let instance = Instance::new(&InstanceDescriptor {
57 backends: Backends::NOOP,
58 backend_options: BackendOptions {
59 noop: NoopBackendOptions { enable: true },
60 ..Default::default()
61 },
62 ..Default::default()
63 });
64
65 // Both of these futures are trivial and should complete instantaneously,
66 // so we do not need an executor and can just poll them once.
67 let task::Poll::Ready(Ok(adapter)) =
68 pin!(instance.request_adapter(&RequestAdapterOptions::default())).poll(ctx)
69 else {
70 unreachable!()
71 };
72 let task::Poll::Ready(Ok(device_and_queue)) = pin!(adapter.request_device(desc)).poll(ctx)
73 else {
74 unreachable!()
75 };
76 device_and_queue
77 }
78
79 /// Check for resource cleanups and mapping callbacks. Will block if [`PollType::Wait`] is passed.
80 ///
81 /// Return `true` if the queue is empty, or `false` if there are more queue
82 /// submissions still in flight. (Note that, unless access to the [`Queue`] is
83 /// coordinated somehow, this information could be out of date by the time
84 /// the caller receives it. `Queue`s can be shared between threads, so
85 /// other threads could submit new work at any time.)
86 ///
87 /// When running on WebGPU, this is a no-op. `Device`s are automatically polled.
88 pub fn poll(&self, poll_type: PollType) -> Result<crate::PollStatus, crate::PollError> {
89 self.inner.poll(poll_type)
90 }
91
92 /// The features which can be used on this device.
93 ///
94 /// No additional features can be used, even if the underlying adapter can support them.
95 #[must_use]
96 pub fn features(&self) -> Features {
97 self.inner.features()
98 }
99
100 /// The limits which can be used on this device.
101 ///
102 /// No better limits can be used, even if the underlying adapter can support them.
103 #[must_use]
104 pub fn limits(&self) -> Limits {
105 self.inner.limits()
106 }
107
108 /// Creates a shader module.
109 ///
110 /// <div class="warning">
111 // NOTE: Keep this in sync with `naga::front::wgsl::parse_str`!
112 // NOTE: Keep this in sync with `wgpu_core::Global::device_create_shader_module`!
113 ///
114 /// This function may consume a lot of stack space. Compiler-enforced limits for parsing
115 /// recursion exist; if shader compilation runs into them, it will return an error gracefully.
116 /// However, on some build profiles and platforms, the default stack size for a thread may be
117 /// exceeded before this limit is reached during parsing. Callers should ensure that there is
118 /// enough stack space for this, particularly if calls to this method are exposed to user
119 /// input.
120 ///
121 /// </div>
122 #[must_use]
123 pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule {
124 let module = self
125 .inner
126 .create_shader_module(desc, wgt::ShaderRuntimeChecks::checked());
127 ShaderModule { inner: module }
128 }
129
130 /// Deprecated: Use [`create_shader_module_trusted`][csmt] instead.
131 ///
132 /// # Safety
133 ///
134 /// See [`create_shader_module_trusted`][csmt].
135 ///
136 /// [csmt]: Self::create_shader_module_trusted
137 #[deprecated(
138 since = "24.0.0",
139 note = "Use `Device::create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked())` instead."
140 )]
141 #[must_use]
142 pub unsafe fn create_shader_module_unchecked(
143 &self,
144 desc: ShaderModuleDescriptor<'_>,
145 ) -> ShaderModule {
146 unsafe { self.create_shader_module_trusted(desc, crate::ShaderRuntimeChecks::unchecked()) }
147 }
148
149 /// Creates a shader module with flags to dictate runtime checks.
150 ///
151 /// When running on WebGPU, this will merely call [`create_shader_module`][csm].
152 ///
153 /// # Safety
154 ///
155 /// In contrast with [`create_shader_module`][csm] this function
156 /// creates a shader module with user-customizable runtime checks which allows shaders to
157 /// perform operations which can lead to undefined behavior like indexing out of bounds,
158 /// thus it's the caller responsibility to pass a shader which doesn't perform any of this
159 /// operations.
160 ///
161 /// See the documentation for [`ShaderRuntimeChecks`][src] for more information about specific checks.
162 ///
163 /// [csm]: Self::create_shader_module
164 /// [src]: crate::ShaderRuntimeChecks
165 #[must_use]
166 pub unsafe fn create_shader_module_trusted(
167 &self,
168 desc: ShaderModuleDescriptor<'_>,
169 runtime_checks: crate::ShaderRuntimeChecks,
170 ) -> ShaderModule {
171 let module = self.inner.create_shader_module(desc, runtime_checks);
172 ShaderModule { inner: module }
173 }
174
175 /// Creates a shader module which will bypass wgpu's shader tooling and validation and be used directly by the backend.
176 ///
177 /// # Safety
178 ///
179 /// This function passes data to the backend as-is and can potentially result in a
180 /// driver crash or bogus behaviour. No attempt is made to ensure that data is valid.
181 #[must_use]
182 pub unsafe fn create_shader_module_passthrough(
183 &self,
184 desc: ShaderModuleDescriptorPassthrough<'_>,
185 ) -> ShaderModule {
186 let module = unsafe { self.inner.create_shader_module_passthrough(&desc) };
187 ShaderModule { inner: module }
188 }
189
190 /// Creates an empty [`CommandEncoder`].
191 #[must_use]
192 pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder {
193 let encoder = self.inner.create_command_encoder(desc);
194 CommandEncoder { inner: encoder }
195 }
196
197 /// Creates an empty [`RenderBundleEncoder`].
198 #[must_use]
199 pub fn create_render_bundle_encoder<'a>(
200 &self,
201 desc: &RenderBundleEncoderDescriptor<'_>,
202 ) -> RenderBundleEncoder<'a> {
203 let encoder = self.inner.create_render_bundle_encoder(desc);
204 RenderBundleEncoder {
205 inner: encoder,
206 _p: core::marker::PhantomData,
207 }
208 }
209
210 /// Creates a new [`BindGroup`].
211 #[must_use]
212 pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup {
213 let group = self.inner.create_bind_group(desc);
214 BindGroup { inner: group }
215 }
216
217 /// Creates a [`BindGroupLayout`].
218 #[must_use]
219 pub fn create_bind_group_layout(
220 &self,
221 desc: &BindGroupLayoutDescriptor<'_>,
222 ) -> BindGroupLayout {
223 let layout = self.inner.create_bind_group_layout(desc);
224 BindGroupLayout { inner: layout }
225 }
226
227 /// Creates a [`PipelineLayout`].
228 #[must_use]
229 pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout {
230 let layout = self.inner.create_pipeline_layout(desc);
231 PipelineLayout { inner: layout }
232 }
233
234 /// Creates a [`RenderPipeline`].
235 #[must_use]
236 pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline {
237 let pipeline = self.inner.create_render_pipeline(desc);
238 RenderPipeline { inner: pipeline }
239 }
240
241 /// Creates a [`ComputePipeline`].
242 #[must_use]
243 pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline {
244 let pipeline = self.inner.create_compute_pipeline(desc);
245 ComputePipeline { inner: pipeline }
246 }
247
248 /// Creates a [`Buffer`].
249 #[must_use]
250 pub fn create_buffer(&self, desc: &BufferDescriptor<'_>) -> Buffer {
251 let mut map_context = MapContext::new(desc.size);
252 if desc.mapped_at_creation {
253 map_context.initial_range = 0..desc.size;
254 }
255
256 let buffer = self.inner.create_buffer(desc);
257
258 Buffer {
259 inner: buffer,
260 map_context: Arc::new(Mutex::new(map_context)),
261 size: desc.size,
262 usage: desc.usage,
263 }
264 }
265
266 /// Creates a new [`Texture`].
267 ///
268 /// `desc` specifies the general format of the texture.
269 #[must_use]
270 pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture {
271 let texture = self.inner.create_texture(desc);
272
273 Texture {
274 inner: texture,
275 descriptor: TextureDescriptor {
276 label: None,
277 view_formats: &[],
278 ..desc.clone()
279 },
280 }
281 }
282
283 /// Creates a [`Texture`] from a wgpu-hal Texture.
284 ///
285 /// # Safety
286 ///
287 /// - `hal_texture` must be created from this device internal handle
288 /// - `hal_texture` must be created respecting `desc`
289 /// - `hal_texture` must be initialized
290 #[cfg(wgpu_core)]
291 #[must_use]
292 pub unsafe fn create_texture_from_hal<A: wgc::hal_api::HalApi>(
293 &self,
294 hal_texture: A::Texture,
295 desc: &TextureDescriptor<'_>,
296 ) -> Texture {
297 let texture = unsafe {
298 let core_device = self.inner.as_core();
299 core_device
300 .context
301 .create_texture_from_hal::<A>(hal_texture, core_device, desc)
302 };
303 Texture {
304 inner: texture.into(),
305 descriptor: TextureDescriptor {
306 label: None,
307 view_formats: &[],
308 ..desc.clone()
309 },
310 }
311 }
312
313 /// Creates a [`Buffer`] from a wgpu-hal Buffer.
314 ///
315 /// # Safety
316 ///
317 /// - `hal_buffer` must be created from this device internal handle
318 /// - `hal_buffer` must be created respecting `desc`
319 /// - `hal_buffer` must be initialized
320 #[cfg(wgpu_core)]
321 #[must_use]
322 pub unsafe fn create_buffer_from_hal<A: wgc::hal_api::HalApi>(
323 &self,
324 hal_buffer: A::Buffer,
325 desc: &BufferDescriptor<'_>,
326 ) -> Buffer {
327 let mut map_context = MapContext::new(desc.size);
328 if desc.mapped_at_creation {
329 map_context.initial_range = 0..desc.size;
330 }
331
332 let buffer = unsafe {
333 let core_device = self.inner.as_core();
334 core_device
335 .context
336 .create_buffer_from_hal::<A>(hal_buffer, core_device, desc)
337 };
338
339 Buffer {
340 inner: buffer.into(),
341 map_context: Arc::new(Mutex::new(map_context)),
342 size: desc.size,
343 usage: desc.usage,
344 }
345 }
346
347 /// Creates a new [`Sampler`].
348 ///
349 /// `desc` specifies the behavior of the sampler.
350 #[must_use]
351 pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler {
352 let sampler = self.inner.create_sampler(desc);
353 Sampler { inner: sampler }
354 }
355
356 /// Creates a new [`QuerySet`].
357 #[must_use]
358 pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet {
359 let query_set = self.inner.create_query_set(desc);
360 QuerySet { inner: query_set }
361 }
362
363 /// Set a callback for errors that are not handled in error scopes.
364 pub fn on_uncaptured_error(&self, handler: Box<dyn UncapturedErrorHandler>) {
365 self.inner.on_uncaptured_error(handler)
366 }
367
368 /// Push an error scope.
369 pub fn push_error_scope(&self, filter: ErrorFilter) {
370 self.inner.push_error_scope(filter)
371 }
372
373 /// Pop an error scope.
374 pub fn pop_error_scope(&self) -> impl Future<Output = Option<Error>> + WasmNotSend {
375 self.inner.pop_error_scope()
376 }
377
378 /// Starts a capture in the attached graphics debugger.
379 ///
380 /// This behaves differently depending on which graphics debugger is attached:
381 ///
382 /// - Renderdoc: Calls [`StartFrameCapture(device, NULL)`][rd].
383 /// - Xcode: Creates a capture with [`MTLCaptureManager`][xcode].
384 /// - None: No action is taken.
385 ///
386 /// # Safety
387 ///
388 /// - There should not be any other captures currently active.
389 /// - All other safety rules are defined by the graphics debugger, see the
390 /// documentation for the specific debugger.
391 /// - In general, graphics debuggers can easily cause crashes, so this isn't
392 /// ever guaranteed to be sound.
393 ///
394 /// # Tips
395 ///
396 /// - Debuggers need to capture both the recording of the commands and the
397 /// submission of the commands to the GPU. Try to wrap all of your
398 /// gpu work in a capture.
399 /// - If you encounter issues, try waiting for the GPU to finish all work
400 /// before stopping the capture.
401 ///
402 /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv417StartFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
403 /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
404 #[doc(alias = "start_renderdoc_capture")]
405 #[doc(alias = "start_xcode_capture")]
406 pub unsafe fn start_graphics_debugger_capture(&self) {
407 unsafe { self.inner.start_graphics_debugger_capture() }
408 }
409
410 /// Stops the current capture in the attached graphics debugger.
411 ///
412 /// This behaves differently depending on which graphics debugger is attached:
413 ///
414 /// - Renderdoc: Calls [`EndFrameCapture(device, NULL)`][rd].
415 /// - Xcode: Stops the capture with [`MTLCaptureManager`][xcode].
416 /// - None: No action is taken.
417 ///
418 /// # Safety
419 ///
420 /// - There should be a capture currently active.
421 /// - All other safety rules are defined by the graphics debugger, see the
422 /// documentation for the specific debugger.
423 /// - In general, graphics debuggers can easily cause crashes, so this isn't
424 /// ever guaranteed to be sound.
425 ///
426 /// # Tips
427 ///
428 /// - If you encounter issues, try to submit all work to the GPU, and waiting
429 /// for that work to finish before stopping the capture.
430 ///
431 /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv415EndFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
432 /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
433 #[doc(alias = "stop_renderdoc_capture")]
434 #[doc(alias = "stop_xcode_capture")]
435 pub unsafe fn stop_graphics_debugger_capture(&self) {
436 unsafe { self.inner.stop_graphics_debugger_capture() }
437 }
438
439 /// Query internal counters from the native backend for debugging purposes.
440 ///
441 /// Some backends may not set all counters, or may not set any counter at all.
442 /// The `counters` cargo feature must be enabled for any counter to be set.
443 ///
444 /// If a counter is not set, its contains its default value (zero).
445 #[must_use]
446 pub fn get_internal_counters(&self) -> wgt::InternalCounters {
447 self.inner.get_internal_counters()
448 }
449
450 /// Generate an GPU memory allocation report if the underlying backend supports it.
451 ///
452 /// Backends that do not support producing these reports return `None`. A backend may
453 /// Support it and still return `None` if it is not using performing sub-allocation,
454 /// for example as a workaround for driver issues.
455 #[must_use]
456 pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> {
457 self.inner.generate_allocator_report()
458 }
459
460 /// Apply a callback to this `Device`'s underlying backend device.
461 ///
462 /// If this `Device` is implemented by the backend API given by `A` (Vulkan,
463 /// Dx12, etc.), then apply `hal_device_callback` to `Some(&device)`, where
464 /// `device` is the underlying backend device type, [`A::Device`].
465 ///
466 /// If this `Device` uses a different backend, apply `hal_device_callback`
467 /// to `None`.
468 ///
469 /// The device is locked for reading while `hal_device_callback` runs. If
470 /// the callback attempts to perform any `wgpu` operations that require
471 /// write access to the device (destroying a buffer, say), deadlock will
472 /// occur. The locks are automatically released when the callback returns.
473 ///
474 /// # Safety
475 ///
476 /// - The raw handle passed to the callback must not be manually destroyed.
477 ///
478 /// [`A::Device`]: hal::Api::Device
479 #[cfg(wgpu_core)]
480 pub unsafe fn as_hal<A: wgc::hal_api::HalApi, F: FnOnce(Option<&A::Device>) -> R, R>(
481 &self,
482 hal_device_callback: F,
483 ) -> R {
484 if let Some(core_device) = self.inner.as_core_opt() {
485 unsafe {
486 core_device
487 .context
488 .device_as_hal::<A, F, R>(core_device, hal_device_callback)
489 }
490 } else {
491 hal_device_callback(None)
492 }
493 }
494
495 /// Destroy this device.
496 pub fn destroy(&self) {
497 self.inner.destroy()
498 }
499
500 /// Set a DeviceLostCallback on this device.
501 pub fn set_device_lost_callback(
502 &self,
503 callback: impl Fn(DeviceLostReason, String) + Send + 'static,
504 ) {
505 self.inner.set_device_lost_callback(Box::new(callback))
506 }
507
508 /// Create a [`PipelineCache`] with initial data
509 ///
510 /// This can be passed to [`Device::create_compute_pipeline`]
511 /// and [`Device::create_render_pipeline`] to either accelerate these
512 /// or add the cache results from those.
513 ///
514 /// # Safety
515 ///
516 /// If the `data` field of `desc` is set, it must have previously been returned from a call
517 /// to [`PipelineCache::get_data`][^saving]. This `data` will only be used if it came
518 /// from an adapter with the same [`util::pipeline_cache_key`].
519 /// This *is* compatible across wgpu versions, as any data format change will
520 /// be accounted for.
521 ///
522 /// It is *not* supported to bring caches from previous direct uses of backend APIs
523 /// into this method.
524 ///
525 /// # Errors
526 ///
527 /// Returns an error value if:
528 /// * the [`PIPELINE_CACHE`](wgt::Features::PIPELINE_CACHE) feature is not enabled
529 /// * this device is invalid; or
530 /// * the device is out of memory
531 ///
532 /// This method also returns an error value if:
533 /// * The `fallback` field on `desc` is false; and
534 /// * the `data` provided would not be used[^data_not_used]
535 ///
536 /// If an error value is used in subsequent calls, default caching will be used.
537 ///
538 /// [^saving]: We do recognise that saving this data to disk means this condition
539 /// is impossible to fully prove. Consider the risks for your own application in this case.
540 ///
541 /// [^data_not_used]: This data may be not used if: the data was produced by a prior
542 /// version of wgpu; or was created for an incompatible adapter, or there was a GPU driver
543 /// update. In some cases, the data might not be used and a real value is returned,
544 /// this is left to the discretion of GPU drivers.
545 #[must_use]
546 pub unsafe fn create_pipeline_cache(
547 &self,
548 desc: &PipelineCacheDescriptor<'_>,
549 ) -> PipelineCache {
550 let cache = unsafe { self.inner.create_pipeline_cache(desc) };
551 PipelineCache { inner: cache }
552 }
553}
554
555/// [`Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE`] must be enabled on the device in order to call these functions.
556impl Device {
557 /// Create a bottom level acceleration structure, used inside a top level acceleration structure for ray tracing.
558 /// - `desc`: The descriptor of the acceleration structure.
559 /// - `sizes`: Size descriptor limiting what can be built into the acceleration structure.
560 ///
561 /// # Validation
562 /// If any of the following is not satisfied a validation error is generated
563 ///
564 /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE`] enabled.
565 /// if `sizes` is [`BlasGeometrySizeDescriptors::Triangles`] then the following must be satisfied
566 /// - For every geometry descriptor (for the purposes this is called `geo_desc`) of `sizes.descriptors` the following must be satisfied:
567 /// - `geo_desc.vertex_format` must be within allowed formats (allowed formats for a given feature set
568 /// may be queried with [`Features::allowed_vertex_formats_for_blas`]).
569 /// - Both or neither of `geo_desc.index_format` and `geo_desc.index_count` must be provided.
570 ///
571 /// [`Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE`]: wgt::Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE
572 /// [`Features::allowed_vertex_formats_for_blas`]: wgt::Features::allowed_vertex_formats_for_blas
573 #[must_use]
574 pub fn create_blas(
575 &self,
576 desc: &CreateBlasDescriptor<'_>,
577 sizes: BlasGeometrySizeDescriptors,
578 ) -> Blas {
579 let (handle, blas) = self.inner.create_blas(desc, sizes);
580
581 Blas {
582 inner: blas,
583 handle,
584 }
585 }
586
587 /// Create a top level acceleration structure, used for ray tracing.
588 /// - `desc`: The descriptor of the acceleration structure.
589 ///
590 /// # Validation
591 /// If any of the following is not satisfied a validation error is generated
592 ///
593 /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE`] enabled.
594 ///
595 /// [`Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE`]: wgt::Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE
596 #[must_use]
597 pub fn create_tlas(&self, desc: &CreateTlasDescriptor<'_>) -> Tlas {
598 let tlas = self.inner.create_tlas(desc);
599
600 Tlas {
601 shared: Arc::new(TlasShared {
602 inner: tlas,
603 max_instances: desc.max_instances,
604 }),
605 }
606 }
607}
608
609/// Requesting a device from an [`Adapter`] failed.
610#[derive(Clone, Debug)]
611pub struct RequestDeviceError {
612 pub(crate) inner: RequestDeviceErrorKind,
613}
614#[derive(Clone, Debug)]
615pub(crate) enum RequestDeviceErrorKind {
616 /// Error from [`wgpu_core`].
617 // must match dependency cfg
618 #[cfg(wgpu_core)]
619 Core(wgc::instance::RequestDeviceError),
620
621 /// Error from web API that was called by `wgpu` to request a device.
622 ///
623 /// (This is currently never used by the webgl backend, but it could be.)
624 #[cfg(webgpu)]
625 WebGpu(String),
626}
627
628static_assertions::assert_impl_all!(RequestDeviceError: Send, Sync);
629
630impl fmt::Display for RequestDeviceError {
631 fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
632 match &self.inner {
633 #[cfg(wgpu_core)]
634 RequestDeviceErrorKind::Core(error) => error.fmt(_f),
635 #[cfg(webgpu)]
636 RequestDeviceErrorKind::WebGpu(error) => {
637 write!(_f, "{error}")
638 }
639 #[cfg(not(any(webgpu, wgpu_core)))]
640 _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
641 }
642 }
643}
644
645impl error::Error for RequestDeviceError {
646 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
647 match &self.inner {
648 #[cfg(wgpu_core)]
649 RequestDeviceErrorKind::Core(error) => error.source(),
650 #[cfg(webgpu)]
651 RequestDeviceErrorKind::WebGpu(_) => None,
652 #[cfg(not(any(webgpu, wgpu_core)))]
653 _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
654 }
655 }
656}
657
658#[cfg(wgpu_core)]
659impl From<wgc::instance::RequestDeviceError> for RequestDeviceError {
660 fn from(error: wgc::instance::RequestDeviceError) -> Self {
661 Self {
662 inner: RequestDeviceErrorKind::Core(error),
663 }
664 }
665}
666
667/// Type for the callback of uncaptured error handler
668pub trait UncapturedErrorHandler: Fn(Error) + Send + 'static {}
669impl<T> UncapturedErrorHandler for T where T: Fn(Error) + Send + 'static {}
670
671/// Kinds of [`Error`]s a [`Device::push_error_scope()`] may be configured to catch.
672#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)]
673pub enum ErrorFilter {
674 /// Catch only out-of-memory errors.
675 OutOfMemory,
676 /// Catch only validation errors.
677 Validation,
678 /// Catch only internal errors.
679 Internal,
680}
681static_assertions::assert_impl_all!(ErrorFilter: Send, Sync);
682
683/// Lower level source of the error.
684///
685/// `Send + Sync` varies depending on configuration.
686#[cfg(send_sync)]
687#[cfg_attr(docsrs, doc(cfg(all())))]
688pub type ErrorSource = Box<dyn error::Error + Send + Sync + 'static>;
689/// Lower level source of the error.
690///
691/// `Send + Sync` varies depending on configuration.
692#[cfg(not(send_sync))]
693#[cfg_attr(docsrs, doc(cfg(all())))]
694pub type ErrorSource = Box<dyn error::Error + 'static>;
695
696/// Errors resulting from usage of GPU APIs.
697///
698/// By default, errors translate into panics. Depending on the backend and circumstances,
699/// errors may occur synchronously or asynchronously. When errors need to be handled, use
700/// [`Device::push_error_scope()`] or [`Device::on_uncaptured_error()`].
701#[derive(Debug)]
702pub enum Error {
703 /// Out of memory.
704 OutOfMemory {
705 /// Lower level source of the error.
706 source: ErrorSource,
707 },
708 /// Validation error, signifying a bug in code or data provided to `wgpu`.
709 Validation {
710 /// Lower level source of the error.
711 source: ErrorSource,
712 /// Description of the validation error.
713 description: String,
714 },
715 /// Internal error. Used for signalling any failures not explicitly expected by WebGPU.
716 ///
717 /// These could be due to internal implementation or system limits being reached.
718 Internal {
719 /// Lower level source of the error.
720 source: ErrorSource,
721 /// Description of the internal GPU error.
722 description: String,
723 },
724}
725#[cfg(send_sync)]
726static_assertions::assert_impl_all!(Error: Send, Sync);
727
728impl error::Error for Error {
729 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
730 match self {
731 Error::OutOfMemory { source } => Some(source.as_ref()),
732 Error::Validation { source, .. } => Some(source.as_ref()),
733 Error::Internal { source, .. } => Some(source.as_ref()),
734 }
735 }
736}
737
738impl fmt::Display for Error {
739 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
740 match self {
741 Error::OutOfMemory { .. } => f.write_str("Out of Memory"),
742 Error::Validation { description, .. } => f.write_str(description),
743 Error::Internal { description, .. } => f.write_str(description),
744 }
745 }
746}
747
748// Copied from [`futures::task::noop_waker`].
749// Needed until MSRV is 1.85 with `task::Waker::noop()` available
750#[cfg(feature = "noop")]
751mod waker {
752 use core::ptr::null;
753 use core::task::{RawWaker, RawWakerVTable, Waker};
754
755 unsafe fn noop_clone(_data: *const ()) -> RawWaker {
756 noop_raw_waker()
757 }
758
759 unsafe fn noop(_data: *const ()) {}
760
761 const NOOP_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(noop_clone, noop, noop, noop);
762
763 const fn noop_raw_waker() -> RawWaker {
764 RawWaker::new(null(), &NOOP_WAKER_VTABLE)
765 }
766
767 /// Get a static reference to a [`Waker`] which
768 /// does nothing when `wake()` is called on it.
769 #[inline]
770 pub fn noop_waker_ref() -> &'static Waker {
771 struct SyncRawWaker(RawWaker);
772 unsafe impl Sync for SyncRawWaker {}
773
774 static NOOP_WAKER_INSTANCE: SyncRawWaker = SyncRawWaker(noop_raw_waker());
775
776 // SAFETY: `Waker` is #[repr(transparent)] over its `RawWaker`.
777 unsafe { &*(&NOOP_WAKER_INSTANCE.0 as *const RawWaker as *const Waker) }
778 }
779}