Skip to main content

dawn_wgpu/
backend.rs

1use crate::dispatch::*;
2use crate::error::DawnError;
3use crate::future::*;
4use crate::mapping::*;
5use crate::types::*;
6use dawn_rs::*;
7use std::pin::Pin;
8use std::sync::Arc;
9use wgpu::custom::*;
10
11#[cfg(feature = "wire")]
12unsafe extern "C" {
13    fn dawn_rs_wire_set_native_procs();
14}
15
16#[cfg(feature = "wire")]
17fn ensure_native_procs() {
18    static INIT: std::sync::Once = std::sync::Once::new();
19    INIT.call_once(|| unsafe {
20        dawn_rs_wire_set_native_procs();
21    });
22}
23
24#[cfg(not(feature = "wire"))]
25fn ensure_native_procs() {}
26
27impl InstanceInterface for DawnInstance {
28    fn new(_desc: &wgpu::InstanceDescriptor) -> Self {
29        ensure_native_procs();
30        let mut desc = InstanceDescriptor::new();
31        desc.required_features = Some(vec![InstanceFeatureName::TimedWaitAny]);
32        let instance = Instance::new(Some(&desc));
33        Self::from_instance(instance)
34    }
35
36    unsafe fn create_surface(
37        &self,
38        target: wgpu::SurfaceTargetUnsafe,
39    ) -> Result<DispatchSurface, wgpu::CreateSurfaceError> {
40        match target {
41            #[cfg(target_os = "macos")]
42            wgpu::SurfaceTargetUnsafe::CoreAnimationLayer(layer) => {
43                let mut desc = SurfaceDescriptor::new();
44                let source = SurfaceSourceMetalLayer { layer: Some(layer) };
45                desc = desc.with_extension(SurfaceDescriptorExtension::from(source));
46                let surface = self.inner.clone().create_surface(&desc);
47                let dawn_surface = DawnSurface {
48                    inner: surface,
49                    metal_layer: None,
50                };
51                Ok(dispatch_surface(dawn_surface))
52            }
53            #[cfg(target_os = "macos")]
54            wgpu::SurfaceTargetUnsafe::RawHandle {
55                raw_window_handle, ..
56            } => {
57                use wgpu::rwh::RawWindowHandle;
58                match raw_window_handle {
59                    RawWindowHandle::AppKit(handle) => {
60                        let layer =
61                            unsafe { raw_window_metal::Layer::from_ns_view(handle.ns_view) };
62                        let layer_ptr = layer.into_raw();
63                        let mut desc = SurfaceDescriptor::new();
64                        let source = SurfaceSourceMetalLayer {
65                            layer: Some(layer_ptr.as_ptr().cast()),
66                        };
67                        desc = desc.with_extension(SurfaceDescriptorExtension::from(source));
68                        let surface = self.inner.clone().create_surface(&desc);
69                        let handle = MetalLayerHandle {
70                            ptr: layer_ptr.as_ptr().cast(),
71                        };
72                        let dawn_surface = DawnSurface {
73                            inner: surface,
74                            metal_layer: Some(Arc::new(handle)),
75                        };
76                        Ok(dispatch_surface(dawn_surface))
77                    }
78                    _ => panic!("wgpu-compat: unsupported raw window handle on macOS"),
79                }
80            }
81            #[cfg(target_os = "windows")]
82            wgpu::SurfaceTargetUnsafe::RawHandle {
83                raw_window_handle, ..
84            } => {
85                use wgpu::rwh::RawWindowHandle;
86                match raw_window_handle {
87                    RawWindowHandle::Win32(handle) => {
88                        let mut desc = SurfaceDescriptor::new();
89                        let source = SurfaceSourceWindowsHWND {
90                            hinstance: handle.hinstance.map(|h| h.get() as _),
91                            hwnd: Some(handle.hwnd.get() as _),
92                        };
93                        desc = desc.with_extension(SurfaceDescriptorExtension::from(source));
94                        let surface = self.inner.clone().create_surface(&desc);
95                        let dawn_surface = DawnSurface {
96                            inner: surface,
97                        };
98                        Ok(dispatch_surface(dawn_surface))
99                    }
100                    _ => panic!("wgpu-compat: unsupported raw window handle on Windows"),
101                }
102            }
103            #[cfg(all(unix, not(target_vendor = "apple")))]
104            wgpu::SurfaceTargetUnsafe::RawHandle {
105                raw_display_handle,
106                raw_window_handle,
107            } => {
108                use wgpu::rwh::{RawDisplayHandle, RawWindowHandle};
109                match (raw_display_handle, raw_window_handle) {
110                    (RawDisplayHandle::Wayland(display), RawWindowHandle::Wayland(window)) => {
111                        let mut desc = SurfaceDescriptor::new();
112                        let source = SurfaceSourceWaylandSurface {
113                            display: Some(display.display.as_ptr().cast()),
114                            surface: Some(window.surface.as_ptr().cast()),
115                        };
116                        desc = desc.with_extension(SurfaceDescriptorExtension::from(source));
117                        let surface = self.inner.clone().create_surface(&desc);
118                        let dawn_surface = DawnSurface {
119                            inner: surface,
120                        };
121                        Ok(dispatch_surface(dawn_surface))
122                    }
123                    (RawDisplayHandle::Xlib(display), RawWindowHandle::Xlib(window)) => {
124                        let mut desc = SurfaceDescriptor::new();
125                        let source = SurfaceSourceXlibWindow {
126                            display: Some(display.display.unwrap().as_ptr().cast()),
127                            window: Some(window.window as u64),
128                        };
129                        desc = desc.with_extension(SurfaceDescriptorExtension::from(source));
130                        let surface = self.inner.clone().create_surface(&desc);
131                        let dawn_surface = DawnSurface {
132                            inner: surface,
133                        };
134                        Ok(dispatch_surface(dawn_surface))
135                    }
136                    (RawDisplayHandle::Xcb(display), RawWindowHandle::Xcb(window)) => {
137                        let mut desc = SurfaceDescriptor::new();
138                        let source = SurfaceSourceXCBWindow {
139                            connection: Some(display.connection.unwrap().as_ptr().cast()),
140                            window: Some(window.window.get()),
141                        };
142                        desc = desc.with_extension(SurfaceDescriptorExtension::from(source));
143                        let surface = self.inner.clone().create_surface(&desc);
144                        let dawn_surface = DawnSurface {
145                            inner: surface,
146                        };
147                        Ok(dispatch_surface(dawn_surface))
148                    }
149                    _ => panic!("wgpu-compat: unsupported raw window handle on unix"),
150                }
151            }
152            _ => panic!("wgpu-compat: unsupported surface target"),
153        }
154    }
155
156    fn request_adapter(
157        &self,
158        options: &wgpu::RequestAdapterOptions<'_, '_>,
159    ) -> Pin<Box<dyn wgpu::custom::RequestAdapterFuture>> {
160        let (future, shared) = CallbackFuture::new();
161        let mut dawn_options = RequestAdapterOptions::new();
162        dawn_options.power_preference = Some(map_power_preference(options.power_preference));
163        dawn_options.force_fallback_adapter = Some(options.force_fallback_adapter);
164        if let Some(surface) = options.compatible_surface {
165            dawn_options.compatible_surface = Some(expect_surface_from_api(surface).inner.clone());
166        }
167        #[cfg(feature = "shared_texture_memory")]
168        {
169            #[cfg(target_os = "windows")]
170            {
171                #[cfg(target_vendor = "win7")]
172                {
173                    dawn_options.backend_type = Some(dawn_rs::BackendType::D3D11)
174                }
175                #[cfg(not(target_vendor = "win7"))]
176                {
177                    dawn_options.backend_type = Some(dawn_rs::BackendType::D3D12)
178                }
179            }
180
181            #[cfg(target_os = "macos")]
182            {
183                dawn_options.backend_type = Some(dawn_rs::BackendType::Metal);
184            }
185        }
186        let future_handle = self.inner.clone().request_adapter(
187            Some(&dawn_options),
188            move |status, adapter, _message| {
189                if status == RequestAdapterStatus::Success {
190                    let adapter = adapter.expect("wgpu-compat: missing adapter");
191                    complete_shared(&shared, Ok(dispatch_adapter(adapter)));
192                } else {
193                    complete_shared(
194                        &shared,
195                        Err(wgpu::RequestAdapterError::NotFound {
196                            active_backends: wgpu::Backends::empty(),
197                            requested_backends: wgpu::Backends::empty(),
198                            supported_backends: wgpu::Backends::empty(),
199                            no_fallback_backends: wgpu::Backends::empty(),
200                            no_adapter_backends: wgpu::Backends::empty(),
201                            incompatible_surface_backends: wgpu::Backends::empty(),
202                        }),
203                    );
204                }
205            },
206        );
207        let _ = self.inner.clone().wait_any(
208            Some(&mut [FutureWaitInfo {
209                future: Some(future_handle),
210                completed: None,
211            }]),
212            0,
213        );
214        Box::pin(future)
215    }
216
217    fn poll_all_devices(&self, _force_wait: bool) -> bool {
218        self.inner.clone().process_events();
219        true
220    }
221
222    fn wgsl_language_features(&self) -> wgpu::WgslLanguageFeatures {
223        let mut features = SupportedWGSLLanguageFeatures::new();
224        self.inner.clone().get_wgsl_language_features(&mut features);
225        let mut out = wgpu::WgslLanguageFeatures::empty();
226        if let Some(list) = features.features.as_ref() {
227            for feature in list {
228                if *feature == WGSLLanguageFeatureName::ReadonlyAndReadwriteStorageTextures {
229                    out |= wgpu::WgslLanguageFeatures::ReadOnlyAndReadWriteStorageTextures;
230                }
231            }
232        }
233        out
234    }
235
236    fn enumerate_adapters(
237        &self,
238        _backends: wgpu::Backends,
239    ) -> Pin<Box<dyn wgpu::custom::EnumerateAdapterFuture>> {
240        let (future, shared) = CallbackFuture::new();
241        complete_shared(&shared, Vec::new());
242        Box::pin(future)
243    }
244}
245
246impl AdapterInterface for DawnAdapter {
247    fn request_device(
248        &self,
249        desc: &wgpu::DeviceDescriptor<'_>,
250    ) -> Pin<Box<dyn wgpu::custom::RequestDeviceFuture>> {
251        let (future, shared) = CallbackFuture::new();
252        let mut dawn_desc = DeviceDescriptor::new();
253        dawn_desc.label = label_to_string(desc.label);
254        if !desc.required_features.is_empty() {
255            dawn_desc.required_features = Some(map_features_to_dawn(desc.required_features));
256        }
257        #[cfg(feature = "shared_texture_memory")]
258        {
259            let mut features = vec![];
260            #[cfg(target_os = "windows")]
261            {
262                features.push(FeatureName::SharedTextureMemoryDXGISharedHandle);
263                features.push(FeatureName::SharedFenceDXGISharedHandle);
264            }
265            #[cfg(target_os = "macos")]
266            {
267                features.push(FeatureName::SharedTextureMemoryIOSurface);
268                features.push(FeatureName::SharedFenceMTLSharedEvent);
269            }
270
271            #[cfg(target_os = "linux")]
272            {
273                features.push(FeatureName::SharedTextureMemoryDmaBuf);
274            }
275            if let Some(f) = dawn_desc.required_features.as_mut() {
276                f.extend(features);
277            } else {
278                dawn_desc.required_features = Some(features);
279            }
280        }
281        if desc.required_limits != wgpu::Limits::default() {
282            dawn_desc.required_limits = Some(map_limits_to_dawn(&desc.required_limits));
283        }
284        let error_info = dawn_rs::UncapturedErrorCallbackInfo::new();
285        error_info
286            .callback
287            .replace(Some(Box::new(|_devices, ty, message| {
288                panic!("Uncaptured error {:?}: {}", ty, message);
289            })));
290        dawn_desc.uncaptured_error_callback_info = Some(error_info);
291        let lost_info = dawn_rs::DeviceLostCallbackInfo::new();
292        lost_info
293            .callback
294            .replace(Some(Box::new(|_, reason, message| {
295                panic!("Device lost: {reason:?}: {message}");
296            })));
297        dawn_desc.device_lost_callback_info = Some(lost_info);
298        let _future_handle =
299            self.inner.clone()
300                .request_device(Some(&dawn_desc), move |status, device, message| {
301                    if status == RequestDeviceStatus::Success {
302                        let device = device.expect("wgpu-compat: missing device");
303                        let queue = device.get_queue();
304                        complete_shared(
305                            &shared,
306                            Ok((dispatch_device(device), dispatch_queue(queue))),
307                        );
308                    } else {
309                        panic!("wgpu-compat: request_device failed {}", message);
310                    }
311                });
312        Box::pin(future)
313    }
314
315    fn is_surface_supported(&self, surface: &DispatchSurface) -> bool {
316        surface.as_custom::<DawnSurface>().is_some()
317    }
318
319    fn features(&self) -> wgpu::Features {
320        let mut features = SupportedFeatures::new();
321        self.inner.clone().get_features(&mut features);
322        map_features_to_wgpu(&features)
323    }
324
325    fn limits(&self) -> wgpu::Limits {
326        let mut limits = Limits::new();
327        let _ = self.inner.clone().get_limits(&mut limits);
328        map_limits_to_wgpu(&limits)
329    }
330
331    fn downlevel_capabilities(&self) -> wgpu::DownlevelCapabilities {
332        wgpu::DownlevelCapabilities::default()
333    }
334
335    fn get_info(&self) -> wgpu::AdapterInfo {
336        let mut info = AdapterInfo::new();
337        let _ = self.inner.clone().get_info(&mut info);
338        wgpu::AdapterInfo {
339            name: info.description.clone().unwrap_or_default(),
340            vendor: info.vendor_id.unwrap_or(0),
341            device: info.device_id.unwrap_or(0),
342            device_type: match info.adapter_type.unwrap_or(AdapterType::Unknown) {
343                AdapterType::DiscreteGpu => wgpu::DeviceType::DiscreteGpu,
344                AdapterType::IntegratedGpu => wgpu::DeviceType::IntegratedGpu,
345                AdapterType::Cpu => wgpu::DeviceType::Cpu,
346                AdapterType::Unknown => wgpu::DeviceType::Other,
347            },
348            backend: map_backend_type_to_wgpu(info.backend_type.unwrap_or(BackendType::Undefined)),
349            driver: info.architecture.clone().unwrap_or_default(),
350            driver_info: info.device.clone().unwrap_or_default(),
351            device_pci_bus_id: String::new(),
352            subgroup_min_size: wgpu::MINIMUM_SUBGROUP_MIN_SIZE,
353            subgroup_max_size: wgpu::MAXIMUM_SUBGROUP_MAX_SIZE,
354            transient_saves_memory: false,
355        }
356    }
357
358    fn get_texture_format_features(
359        &self,
360        _format: wgpu::TextureFormat,
361    ) -> wgpu::TextureFormatFeatures {
362        wgpu::TextureFormatFeatures {
363            allowed_usages: wgpu::TextureUsages::empty(),
364            flags: wgpu::TextureFormatFeatureFlags::empty(),
365        }
366    }
367
368    fn get_presentation_timestamp(&self) -> wgpu::PresentationTimestamp {
369        wgpu::PresentationTimestamp::INVALID_TIMESTAMP
370    }
371}
372
373impl DeviceInterface for DawnDevice {
374    fn features(&self) -> wgpu::Features {
375        let adapter = self.inner.get_adapter();
376        DawnAdapter {
377            inner: adapter,
378        }
379        .features()
380    }
381
382    fn limits(&self) -> wgpu::Limits {
383        let adapter = self.inner.get_adapter();
384        DawnAdapter {
385            inner: adapter,
386        }
387        .limits()
388    }
389
390    fn create_shader_module(
391        &self,
392        desc: wgpu::ShaderModuleDescriptor<'_>,
393        _shader_bound_checks: wgpu::ShaderRuntimeChecks,
394    ) -> DispatchShaderModule {
395        let dawn_desc = map_shader_module_descriptor(desc);
396        let module = self.inner.create_shader_module(&dawn_desc);
397        dispatch_shader_module(module)
398    }
399
400    unsafe fn create_shader_module_passthrough(
401        &self,
402        _desc: &wgpu::ShaderModuleDescriptorPassthrough<'_>,
403    ) -> DispatchShaderModule {
404        panic!("wgpu-compat: create_shader_module_passthrough not supported");
405    }
406
407    fn create_bind_group_layout(
408        &self,
409        desc: &wgpu::BindGroupLayoutDescriptor<'_>,
410    ) -> DispatchBindGroupLayout {
411        let dawn_desc = map_bind_group_layout_descriptor(desc);
412        let layout = self.inner.create_bind_group_layout(&dawn_desc);
413        dispatch_bind_group_layout(layout)
414    }
415
416    fn create_bind_group(&self, desc: &wgpu::BindGroupDescriptor<'_>) -> DispatchBindGroup {
417        let dawn_desc = map_bind_group_descriptor(desc);
418        let group = self.inner.create_bind_group(&dawn_desc);
419        dispatch_bind_group(group)
420    }
421
422    fn create_pipeline_layout(
423        &self,
424        desc: &wgpu::PipelineLayoutDescriptor<'_>,
425    ) -> DispatchPipelineLayout {
426        let dawn_desc = map_pipeline_layout_descriptor(desc);
427        let layout = self.inner.create_pipeline_layout(&dawn_desc);
428        dispatch_pipeline_layout(layout)
429    }
430
431    fn create_render_pipeline(
432        &self,
433        desc: &wgpu::RenderPipelineDescriptor<'_>,
434    ) -> DispatchRenderPipeline {
435        let dawn_desc = map_render_pipeline_descriptor(desc);
436        let pipeline = self.inner.create_render_pipeline(&dawn_desc);
437        dispatch_render_pipeline(pipeline)
438    }
439
440    fn create_mesh_pipeline(
441        &self,
442        _desc: &wgpu::MeshPipelineDescriptor<'_>,
443    ) -> DispatchRenderPipeline {
444        panic!("wgpu-compat: mesh pipelines not supported");
445    }
446
447    fn create_compute_pipeline(
448        &self,
449        desc: &wgpu::ComputePipelineDescriptor<'_>,
450    ) -> DispatchComputePipeline {
451        let dawn_desc = map_compute_pipeline_descriptor(desc);
452        let pipeline = self.inner.create_compute_pipeline(&dawn_desc);
453        dispatch_compute_pipeline(pipeline)
454    }
455
456    unsafe fn create_pipeline_cache(
457        &self,
458        _desc: &wgpu::PipelineCacheDescriptor<'_>,
459    ) -> DispatchPipelineCache {
460        dispatch_pipeline_cache()
461    }
462
463    fn create_buffer(&self, desc: &wgpu::BufferDescriptor<'_>) -> DispatchBuffer {
464        let dawn_desc = map_buffer_descriptor(desc);
465        let buffer = self
466            .inner
467            .create_buffer(&dawn_desc)
468            .expect("wgpu-compat: create_buffer returned null");
469        dispatch_buffer(buffer)
470    }
471
472    fn create_texture(&self, desc: &wgpu::TextureDescriptor<'_>) -> DispatchTexture {
473        let dawn_desc = map_texture_descriptor(desc);
474        let texture = self.inner.create_texture(&dawn_desc);
475        dispatch_texture(texture)
476    }
477
478    fn create_external_texture(
479        &self,
480        desc: &wgpu::ExternalTextureDescriptor<'_>,
481        _planes: &[&wgpu::TextureView],
482    ) -> DispatchExternalTexture {
483        let mut dawn_desc = ExternalTextureDescriptor::new();
484        dawn_desc.label = label_to_string(desc.label);
485        let texture = self.inner.create_external_texture(&dawn_desc);
486        dispatch_external_texture(texture)
487    }
488
489    fn create_blas(
490        &self,
491        _desc: &wgpu::CreateBlasDescriptor<'_>,
492        _sizes: wgpu::BlasGeometrySizeDescriptors,
493    ) -> (Option<u64>, DispatchBlas) {
494        (None, dispatch_blas())
495    }
496
497    fn create_tlas(&self, _desc: &wgpu::CreateTlasDescriptor<'_>) -> DispatchTlas {
498        dispatch_tlas()
499    }
500
501    fn create_sampler(&self, desc: &wgpu::SamplerDescriptor<'_>) -> DispatchSampler {
502        let dawn_desc = map_sampler_descriptor(desc);
503        let sampler = self.inner.create_sampler(Some(&dawn_desc));
504        dispatch_sampler(sampler)
505    }
506
507    fn create_query_set(&self, desc: &wgpu::QuerySetDescriptor<'_>) -> DispatchQuerySet {
508        let ty = match desc.ty {
509            wgpu::QueryType::Occlusion => QueryType::Occlusion,
510            wgpu::QueryType::Timestamp => QueryType::Timestamp,
511            _ => panic!("wgpu-compat: query type not supported"),
512        };
513        let mut dawn_desc = QuerySetDescriptor::new();
514        dawn_desc.label = label_to_string(desc.label);
515        dawn_desc.r#type = Some(ty);
516        dawn_desc.count = Some(desc.count);
517        let set = self.inner.create_query_set(&dawn_desc);
518        dispatch_query_set(set)
519    }
520
521    fn create_command_encoder(
522        &self,
523        desc: &wgpu::CommandEncoderDescriptor<'_>,
524    ) -> DispatchCommandEncoder {
525        let dawn_desc = map_command_encoder_descriptor(desc);
526        let encoder = self.inner.create_command_encoder(Some(&dawn_desc));
527        dispatch_command_encoder(encoder)
528    }
529
530    fn create_render_bundle_encoder(
531        &self,
532        desc: &wgpu::RenderBundleEncoderDescriptor<'_>,
533    ) -> DispatchRenderBundleEncoder {
534        let dawn_desc = map_render_bundle_encoder_descriptor(desc);
535        let encoder = self.inner.create_render_bundle_encoder(&dawn_desc);
536        dispatch_render_bundle_encoder(encoder)
537    }
538
539    fn set_device_lost_callback(&self, _device_lost_callback: wgpu::custom::BoxDeviceLostCallback) {
540    }
541
542    fn on_uncaptured_error(&self, _handler: Arc<dyn wgpu::UncapturedErrorHandler>) {}
543
544    fn push_error_scope(&self, filter: wgpu::ErrorFilter) -> u32 {
545        let filter = match filter {
546            wgpu::ErrorFilter::Validation => ErrorFilter::Validation,
547            wgpu::ErrorFilter::OutOfMemory => ErrorFilter::OutOfMemory,
548            wgpu::ErrorFilter::Internal => ErrorFilter::Internal,
549        };
550        self.inner.push_error_scope(filter);
551        0
552    }
553
554    fn pop_error_scope(&self, _index: u32) -> Pin<Box<dyn wgpu::custom::PopErrorScopeFuture>> {
555        let (future, shared) = CallbackFuture::new();
556        let _ = self.inner.pop_error_scope(move |status, ty, message| {
557            if status == PopErrorScopeStatus::Success {
558                if ty == ErrorType::NoError {
559                    complete_shared(&shared, None);
560                } else {
561                    complete_shared(&shared, Some(map_uncaptured_error(ty, message)));
562                }
563            } else {
564                complete_shared(
565                    &shared,
566                    Some(wgpu::Error::Internal {
567                        source: Box::new(DawnError("pop_error_scope failed".to_string())),
568                        description: "pop_error_scope failed".to_string(),
569                    }),
570                );
571            }
572        });
573        Box::pin(future)
574    }
575
576    unsafe fn start_graphics_debugger_capture(&self) {
577        let _ = &self.inner;
578    }
579
580    unsafe fn stop_graphics_debugger_capture(&self) {
581        let _ = &self.inner;
582    }
583
584    fn poll(&self, _poll_type: wgt::PollType<u64>) -> Result<wgpu::PollStatus, wgpu::PollError> {
585        self.inner.tick();
586        Ok(wgpu::PollStatus::QueueEmpty)
587    }
588
589    fn get_internal_counters(&self) -> wgpu::InternalCounters {
590        wgpu::InternalCounters::default()
591    }
592
593    fn generate_allocator_report(&self) -> Option<wgpu::AllocatorReport> {
594        None
595    }
596
597    fn destroy(&self) {
598        self.inner.destroy();
599    }
600}
601
602impl QueueInterface for DawnQueue {
603    fn write_buffer(&self, buffer: &DispatchBuffer, offset: wgpu::BufferAddress, data: &[u8]) {
604        let buffer = expect_buffer(buffer);
605        let data_ptr = data.as_ptr().cast::<std::ffi::c_void>();
606        let data_slice = unsafe { std::slice::from_raw_parts(data_ptr, data.len()) };
607        self.inner.write_buffer(buffer, offset, data_slice);
608    }
609
610    fn create_staging_buffer(&self, size: wgpu::BufferSize) -> Option<DispatchQueueWriteBuffer> {
611        Some(dispatch_queue_write_buffer(vec![0; size.get() as usize]))
612    }
613
614    fn validate_write_buffer(
615        &self,
616        _buffer: &DispatchBuffer,
617        _offset: wgpu::BufferAddress,
618        _size: wgpu::BufferSize,
619    ) -> Option<()> {
620        Some(())
621    }
622
623    fn write_staging_buffer(
624        &self,
625        buffer: &DispatchBuffer,
626        offset: wgpu::BufferAddress,
627        staging_buffer: &DispatchQueueWriteBuffer,
628    ) {
629        let buffer = expect_buffer(buffer);
630        let staging = staging_buffer
631            .as_custom::<DawnQueueWriteBuffer>()
632            .expect("wgpu-compat: queue write buffer not dawn");
633        let data_ptr = staging.inner.as_ptr().cast::<std::ffi::c_void>();
634        let data_slice = unsafe { std::slice::from_raw_parts(data_ptr, staging.inner.len()) };
635        self.inner.write_buffer(buffer, offset, data_slice);
636    }
637
638    fn write_texture(
639        &self,
640        texture: wgpu::TexelCopyTextureInfo<'_>,
641        data: &[u8],
642        mut data_layout: wgpu::TexelCopyBufferLayout,
643        size: wgpu::Extent3d,
644    ) {
645        if data_layout.rows_per_image.is_none()
646            && (size.height > 1 || size.depth_or_array_layers > 1)
647        {
648            data_layout.rows_per_image = Some(size.height.max(1));
649        }
650        let destination = map_texel_copy_texture_info(texture);
651        let data_layout = map_texel_copy_buffer_layout(data_layout);
652        let write_size = map_extent_3d(size);
653        let data_ptr = data.as_ptr().cast::<std::ffi::c_void>();
654        let data_slice = unsafe { std::slice::from_raw_parts(data_ptr, data.len()) };
655        self.inner
656            .write_texture(&destination, data_slice, &data_layout, &write_size);
657    }
658
659    #[cfg(web)]
660    #[allow(unexpected_cfgs)]
661    fn copy_external_image_to_texture(
662        &self,
663        _source: &wgpu::CopyExternalImageSourceInfo,
664        _dest: wgpu::CopyExternalImageDestInfo<&wgpu::Texture>,
665        _size: wgpu::Extent3d,
666    ) {
667        unimplemented!();
668    }
669
670    fn submit(&self, command_buffers: &mut dyn Iterator<Item = DispatchCommandBuffer>) -> u64 {
671        let buffers = command_buffers
672            .map(|buffer| expect_command_buffer(&buffer))
673            .collect::<Vec<_>>();
674        self.inner.submit(&buffers);
675        0
676    }
677
678    fn get_timestamp_period(&self) -> f32 {
679        1.0
680    }
681
682    fn on_submitted_work_done(&self, callback: wgpu::custom::BoxSubmittedWorkDoneCallback) {
683        let mut callback = Some(callback);
684        let _ = self.inner.on_submitted_work_done(move |status, _message| {
685            let _ = status;
686            if let Some(cb) = callback.take() {
687                cb();
688            }
689        });
690    }
691
692    fn compact_blas(&self, _blas: &DispatchBlas) -> (Option<u64>, DispatchBlas) {
693        (None, dispatch_blas())
694    }
695}
696
697impl ShaderModuleInterface for DawnShaderModule {
698    fn get_compilation_info(&self) -> Pin<Box<dyn wgpu::custom::ShaderCompilationInfoFuture>> {
699        let (future, shared) = CallbackFuture::new();
700        let _ = self.inner.get_compilation_info(move |status, info| {
701            if status == CompilationInfoRequestStatus::Success {
702                complete_shared(&shared, map_compilation_info(info));
703            } else {
704                complete_shared(&shared, wgpu::CompilationInfo { messages: vec![] });
705            }
706        });
707        Box::pin(future)
708    }
709}
710
711impl BindGroupLayoutInterface for DawnBindGroupLayout {}
712impl BindGroupInterface for DawnBindGroup {}
713impl TextureViewInterface for DawnTextureView {}
714impl SamplerInterface for DawnSampler {}
715
716impl BufferInterface for DawnBuffer {
717    fn map_async(
718        &self,
719        mode: wgpu::MapMode,
720        range: std::ops::Range<wgpu::BufferAddress>,
721        callback: wgpu::custom::BufferMapCallback,
722    ) {
723        let mode = match mode {
724            wgpu::MapMode::Read => MapMode::READ,
725            wgpu::MapMode::Write => MapMode::WRITE,
726        };
727        let mut callback = Some(callback);
728        let _ = self.inner.map_async(
729            mode,
730            range.start as usize,
731            (range.end - range.start) as usize,
732            move |status, message| {
733                let result = match status {
734                    MapAsyncStatus::Success => Ok(()),
735                    _ => {
736                        let _ = message;
737                        Err(wgpu::BufferAsyncError)
738                    }
739                };
740                if let Some(cb) = callback.take() {
741                    cb(result);
742                }
743            },
744        );
745    }
746
747    fn get_mapped_range(
748        &self,
749        sub_range: std::ops::Range<wgpu::BufferAddress>,
750    ) -> DispatchBufferMappedRange {
751        let ptr = self.inner.get_mapped_range(
752            sub_range.start as usize,
753            (sub_range.end - sub_range.start) as usize,
754        );
755        dispatch_buffer_mapped_range(ptr.cast(), (sub_range.end - sub_range.start) as usize)
756    }
757
758    fn unmap(&self) {
759        self.inner.unmap();
760    }
761
762    fn destroy(&self) {
763        self.inner.destroy();
764    }
765}
766
767impl TextureInterface for DawnTexture {
768    fn create_view(&self, desc: &wgpu::TextureViewDescriptor<'_>) -> DispatchTextureView {
769        let desc = map_texture_view_descriptor(desc);
770        let view = self.inner.create_view(Some(&desc));
771        dispatch_texture_view(view)
772    }
773
774    fn destroy(&self) {
775        self.inner.destroy();
776    }
777}
778
779impl ExternalTextureInterface for DawnExternalTexture {
780    fn destroy(&self) {
781        self.inner.destroy();
782    }
783}
784
785impl BlasInterface for DawnBlas {
786    fn prepare_compact_async(&self, _callback: wgpu::custom::BlasCompactCallback) {
787        panic!("wgpu-compat: blas not supported");
788    }
789
790    fn ready_for_compaction(&self) -> bool {
791        false
792    }
793}
794
795impl TlasInterface for DawnTlas {}
796impl QuerySetInterface for DawnQuerySet {}
797impl PipelineLayoutInterface for DawnPipelineLayout {}
798
799impl RenderPipelineInterface for DawnRenderPipeline {
800    fn get_bind_group_layout(&self, index: u32) -> DispatchBindGroupLayout {
801        let layout = self.inner.get_bind_group_layout(index);
802        dispatch_bind_group_layout(layout)
803    }
804}
805
806impl ComputePipelineInterface for DawnComputePipeline {
807    fn get_bind_group_layout(&self, index: u32) -> DispatchBindGroupLayout {
808        let layout = self.inner.get_bind_group_layout(index);
809        dispatch_bind_group_layout(layout)
810    }
811}
812
813impl PipelineCacheInterface for DawnPipelineCache {
814    fn get_data(&self) -> Option<Vec<u8>> {
815        None
816    }
817}
818
819impl CommandEncoderInterface for DawnCommandEncoder {
820    fn copy_buffer_to_buffer(
821        &self,
822        source: &DispatchBuffer,
823        source_offset: wgpu::BufferAddress,
824        destination: &DispatchBuffer,
825        destination_offset: wgpu::BufferAddress,
826        copy_size: Option<wgpu::BufferAddress>,
827    ) {
828        let source = expect_buffer(source);
829        let destination = expect_buffer(destination);
830        self.inner.clone().copy_buffer_to_buffer(
831            source,
832            source_offset,
833            destination,
834            destination_offset,
835            copy_size.unwrap_or(WHOLE_SIZE),
836        );
837    }
838
839    fn copy_buffer_to_texture(
840        &self,
841        source: wgpu::TexelCopyBufferInfo<'_>,
842        destination: wgpu::TexelCopyTextureInfo<'_>,
843        copy_size: wgpu::Extent3d,
844    ) {
845        let source = map_texel_copy_buffer_info(source);
846        let dest = map_texel_copy_texture_info(destination);
847        let size = map_extent_3d(copy_size);
848        self.inner.clone()
849            .copy_buffer_to_texture(&source, &dest, &size);
850    }
851
852    fn copy_texture_to_buffer(
853        &self,
854        source: wgpu::TexelCopyTextureInfo<'_>,
855        destination: wgpu::TexelCopyBufferInfo<'_>,
856        copy_size: wgpu::Extent3d,
857    ) {
858        let source = map_texel_copy_texture_info(source);
859        let dest = map_texel_copy_buffer_info(destination);
860        let size = map_extent_3d(copy_size);
861        self.inner.clone()
862            .copy_texture_to_buffer(&source, &dest, &size);
863    }
864
865    fn copy_texture_to_texture(
866        &self,
867        source: wgpu::TexelCopyTextureInfo<'_>,
868        destination: wgpu::TexelCopyTextureInfo<'_>,
869        copy_size: wgpu::Extent3d,
870    ) {
871        let source = map_texel_copy_texture_info(source);
872        let dest = map_texel_copy_texture_info(destination);
873        let size = map_extent_3d(copy_size);
874        self.inner.clone()
875            .copy_texture_to_texture(&source, &dest, &size);
876    }
877
878    fn begin_compute_pass(&self, desc: &wgpu::ComputePassDescriptor<'_>) -> DispatchComputePass {
879        let dawn_desc = map_compute_pass_descriptor(desc);
880        let pass = self.inner.clone().begin_compute_pass(Some(&dawn_desc));
881        dispatch_compute_pass(pass)
882    }
883
884    fn begin_render_pass(&self, desc: &wgpu::RenderPassDescriptor<'_>) -> DispatchRenderPass {
885        let dawn_desc = map_render_pass_descriptor(desc);
886        let pass = self.inner.clone().begin_render_pass(&dawn_desc);
887        dispatch_render_pass(pass)
888    }
889
890    fn finish(&mut self) -> DispatchCommandBuffer {
891        let buffer = self.inner.clone().finish(None);
892        dispatch_command_buffer(buffer)
893    }
894
895    fn clear_texture(
896        &self,
897        texture: &DispatchTexture,
898        subresource_range: &wgpu::ImageSubresourceRange,
899    ) {
900        let _ = texture;
901        let _ = subresource_range;
902    }
903
904    fn clear_buffer(
905        &self,
906        buffer: &DispatchBuffer,
907        offset: wgpu::BufferAddress,
908        size: Option<wgpu::BufferAddress>,
909    ) {
910        let buffer = expect_buffer(buffer);
911        self.inner.clone()
912            .clear_buffer(buffer, offset, size.unwrap_or(WHOLE_SIZE));
913    }
914
915    fn insert_debug_marker(&self, label: &str) {
916        self.inner.clone().insert_debug_marker(label.to_string());
917    }
918
919    fn push_debug_group(&self, label: &str) {
920        self.inner.clone().push_debug_group(label.to_string());
921    }
922
923    fn pop_debug_group(&self) {
924        self.inner.clone().pop_debug_group();
925    }
926
927    fn write_timestamp(&self, query_set: &DispatchQuerySet, query_index: u32) {
928        let set = expect_query_set(query_set);
929        self.inner.clone().write_timestamp(set, query_index);
930    }
931
932    fn resolve_query_set(
933        &self,
934        query_set: &DispatchQuerySet,
935        first_query: u32,
936        query_count: u32,
937        destination: &DispatchBuffer,
938        destination_offset: wgpu::BufferAddress,
939    ) {
940        let set = expect_query_set(query_set);
941        let buffer = expect_buffer(destination);
942        self.inner.clone().resolve_query_set(
943            set,
944            first_query,
945            query_count,
946            buffer,
947            destination_offset,
948        );
949    }
950
951    fn mark_acceleration_structures_built<'a>(
952        &self,
953        _blas: &mut dyn Iterator<Item = &'a wgpu::Blas>,
954        _tlas: &mut dyn Iterator<Item = &'a wgpu::Tlas>,
955    ) {
956        panic!("wgpu-compat: blas/tlas not supported");
957    }
958
959    fn build_acceleration_structures<'a>(
960        &self,
961        _blas: &mut dyn Iterator<Item = &'a wgpu::BlasBuildEntry<'a>>,
962        _tlas: &mut dyn Iterator<Item = &'a wgpu::Tlas>,
963    ) {
964        panic!("wgpu-compat: blas/tlas not supported");
965    }
966
967    fn transition_resources<'a>(
968        &mut self,
969        _buffer_transitions: &mut dyn Iterator<Item = wgpu::BufferTransition<&'a DispatchBuffer>>,
970        _texture_transitions: &mut dyn Iterator<
971            Item = wgpu::TextureTransition<&'a DispatchTexture>,
972        >,
973    ) {
974    }
975}
976
977impl ComputePassInterface for DawnComputePass {
978    fn set_pipeline(&mut self, pipeline: &DispatchComputePipeline) {
979        let pipeline = expect_compute_pipeline(pipeline);
980        self.inner.clone().set_pipeline(pipeline);
981    }
982
983    fn set_bind_group(
984        &mut self,
985        index: u32,
986        bind_group: Option<&DispatchBindGroup>,
987        offsets: &[wgpu::DynamicOffset],
988    ) {
989        let group = bind_group.map(expect_bind_group);
990        self.inner.clone().set_bind_group(index, group, offsets);
991    }
992
993    fn set_immediates(&mut self, offset: u32, data: &[u8]) {
994        let data = bytes_to_u32(data);
995        let data_ptr = data.as_ptr().cast::<std::ffi::c_void>();
996        let data_len = data.len() * std::mem::size_of::<u32>();
997        let data_slice = unsafe { std::slice::from_raw_parts(data_ptr, data_len) };
998        self.inner.clone().set_immediates(offset, data_slice);
999    }
1000
1001    fn insert_debug_marker(&mut self, label: &str) {
1002        self.inner.clone().insert_debug_marker(label.to_string());
1003    }
1004
1005    fn push_debug_group(&mut self, group_label: &str) {
1006        self.inner.clone().push_debug_group(group_label.to_string());
1007    }
1008
1009    fn pop_debug_group(&mut self) {
1010        self.inner.clone().pop_debug_group();
1011    }
1012
1013    fn write_timestamp(&mut self, query_set: &DispatchQuerySet, query_index: u32) {
1014        let set = expect_query_set(query_set);
1015        self.inner.clone().write_timestamp(set, query_index);
1016    }
1017
1018    fn begin_pipeline_statistics_query(
1019        &mut self,
1020        _query_set: &DispatchQuerySet,
1021        _query_index: u32,
1022    ) {
1023        panic!("wgpu-compat: pipeline statistics not supported");
1024    }
1025
1026    fn end_pipeline_statistics_query(&mut self) {
1027        panic!("wgpu-compat: pipeline statistics not supported");
1028    }
1029
1030    fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) {
1031        self.inner.clone().dispatch_workgroups(x, y, z);
1032    }
1033
1034    fn dispatch_workgroups_indirect(
1035        &mut self,
1036        indirect_buffer: &DispatchBuffer,
1037        indirect_offset: wgpu::BufferAddress,
1038    ) {
1039        let buffer = expect_buffer(indirect_buffer);
1040        self.inner.clone()
1041            .dispatch_workgroups_indirect(buffer, indirect_offset);
1042    }
1043
1044    fn end(&mut self) {
1045        if !self.ended {
1046            self.inner.clone().end();
1047            self.ended = true;
1048        }
1049    }
1050}
1051
1052impl Drop for DawnComputePass {
1053    fn drop(&mut self) {
1054        if !self.ended {
1055            self.inner.clone().end();
1056            self.ended = true;
1057        }
1058    }
1059}
1060
1061impl RenderPassInterface for DawnRenderPass {
1062    fn set_pipeline(&mut self, pipeline: &DispatchRenderPipeline) {
1063        let pipeline = expect_render_pipeline(pipeline);
1064        self.inner.clone().set_pipeline(pipeline);
1065    }
1066
1067    fn set_bind_group(
1068        &mut self,
1069        index: u32,
1070        bind_group: Option<&DispatchBindGroup>,
1071        offsets: &[wgpu::DynamicOffset],
1072    ) {
1073        let group = bind_group.map(expect_bind_group);
1074        self.inner.clone().set_bind_group(index, group, offsets);
1075    }
1076
1077    fn set_index_buffer(
1078        &mut self,
1079        buffer: &DispatchBuffer,
1080        index_format: wgpu::IndexFormat,
1081        offset: wgpu::BufferAddress,
1082        size: Option<wgpu::BufferSize>,
1083    ) {
1084        let buffer = expect_buffer(buffer);
1085        let size = size.map(|v| v.get()).unwrap_or(WHOLE_SIZE);
1086        self.inner.clone()
1087            .set_index_buffer(buffer, map_index_format(index_format), offset, size);
1088    }
1089
1090    fn set_vertex_buffer(
1091        &mut self,
1092        slot: u32,
1093        buffer: &DispatchBuffer,
1094        offset: wgpu::BufferAddress,
1095        size: Option<wgpu::BufferSize>,
1096    ) {
1097        let buffer = expect_buffer(buffer);
1098        let size = size.map(|v| v.get()).unwrap_or(WHOLE_SIZE);
1099        self.inner.clone()
1100            .set_vertex_buffer(slot, Some(buffer), offset, size);
1101    }
1102
1103    fn set_immediates(&mut self, offset: u32, data: &[u8]) {
1104        let data = bytes_to_u32(data);
1105        let data_ptr = data.as_ptr().cast::<std::ffi::c_void>();
1106        let data_len = data.len() * std::mem::size_of::<u32>();
1107        let data_slice = unsafe { std::slice::from_raw_parts(data_ptr, data_len) };
1108        self.inner.clone().set_immediates(offset, data_slice);
1109    }
1110
1111    fn set_blend_constant(&mut self, color: wgpu::Color) {
1112        let color = map_color(color);
1113        self.inner.clone().set_blend_constant(&color);
1114    }
1115
1116    fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) {
1117        self.inner.clone().set_scissor_rect(x, y, width, height);
1118    }
1119
1120    fn set_viewport(
1121        &mut self,
1122        x: f32,
1123        y: f32,
1124        width: f32,
1125        height: f32,
1126        min_depth: f32,
1127        max_depth: f32,
1128    ) {
1129        self.inner.clone()
1130            .set_viewport(x, y, width, height, min_depth, max_depth);
1131    }
1132
1133    fn set_stencil_reference(&mut self, reference: u32) {
1134        self.inner.clone().set_stencil_reference(reference);
1135    }
1136
1137    fn draw(&mut self, vertices: std::ops::Range<u32>, instances: std::ops::Range<u32>) {
1138        self.inner.clone().draw(
1139            vertices.end - vertices.start,
1140            instances.end - instances.start,
1141            vertices.start,
1142            instances.start,
1143        );
1144    }
1145
1146    fn draw_indexed(
1147        &mut self,
1148        indices: std::ops::Range<u32>,
1149        base_vertex: i32,
1150        instances: std::ops::Range<u32>,
1151    ) {
1152        self.inner.clone().draw_indexed(
1153            indices.end - indices.start,
1154            instances.end - instances.start,
1155            indices.start,
1156            base_vertex,
1157            instances.start,
1158        );
1159    }
1160
1161    fn draw_mesh_tasks(&mut self, _group_count_x: u32, _group_count_y: u32, _group_count_z: u32) {
1162        panic!("wgpu-compat: mesh tasks not supported");
1163    }
1164
1165    fn draw_indirect(
1166        &mut self,
1167        indirect_buffer: &DispatchBuffer,
1168        indirect_offset: wgpu::BufferAddress,
1169    ) {
1170        let buffer = expect_buffer(indirect_buffer);
1171        self.inner.clone().draw_indirect(buffer, indirect_offset);
1172    }
1173
1174    fn draw_indexed_indirect(
1175        &mut self,
1176        indirect_buffer: &DispatchBuffer,
1177        indirect_offset: wgpu::BufferAddress,
1178    ) {
1179        let buffer = expect_buffer(indirect_buffer);
1180        self.inner.clone()
1181            .draw_indexed_indirect(buffer, indirect_offset);
1182    }
1183
1184    fn draw_mesh_tasks_indirect(
1185        &mut self,
1186        _indirect_buffer: &DispatchBuffer,
1187        _indirect_offset: wgpu::BufferAddress,
1188    ) {
1189        panic!("wgpu-compat: mesh tasks not supported");
1190    }
1191
1192    fn multi_draw_indirect(
1193        &mut self,
1194        indirect_buffer: &DispatchBuffer,
1195        indirect_offset: wgpu::BufferAddress,
1196        count: u32,
1197    ) {
1198        let buffer = expect_buffer(indirect_buffer);
1199        self.inner.clone()
1200            .multi_draw_indirect(buffer, indirect_offset, count, None, 0);
1201    }
1202
1203    fn multi_draw_indexed_indirect(
1204        &mut self,
1205        indirect_buffer: &DispatchBuffer,
1206        indirect_offset: wgpu::BufferAddress,
1207        count: u32,
1208    ) {
1209        let buffer = expect_buffer(indirect_buffer);
1210        self.inner.clone()
1211            .multi_draw_indexed_indirect(buffer, indirect_offset, count, None, 0);
1212    }
1213
1214    fn multi_draw_indirect_count(
1215        &mut self,
1216        _indirect_buffer: &DispatchBuffer,
1217        _indirect_offset: wgpu::BufferAddress,
1218        _count_buffer: &DispatchBuffer,
1219        _count_buffer_offset: wgpu::BufferAddress,
1220        _max_count: u32,
1221    ) {
1222        panic!("wgpu-compat: multi_draw_indirect_count not supported");
1223    }
1224
1225    fn multi_draw_mesh_tasks_indirect(
1226        &mut self,
1227        _indirect_buffer: &DispatchBuffer,
1228        _indirect_offset: wgpu::BufferAddress,
1229        _count: u32,
1230    ) {
1231        panic!("wgpu-compat: mesh tasks not supported");
1232    }
1233
1234    fn multi_draw_indexed_indirect_count(
1235        &mut self,
1236        _indirect_buffer: &DispatchBuffer,
1237        _indirect_offset: wgpu::BufferAddress,
1238        _count_buffer: &DispatchBuffer,
1239        _count_buffer_offset: wgpu::BufferAddress,
1240        _max_count: u32,
1241    ) {
1242        panic!("wgpu-compat: multi_draw_indexed_indirect_count not supported");
1243    }
1244
1245    fn multi_draw_mesh_tasks_indirect_count(
1246        &mut self,
1247        _indirect_buffer: &DispatchBuffer,
1248        _indirect_offset: wgpu::BufferAddress,
1249        _count_buffer: &DispatchBuffer,
1250        _count_buffer_offset: wgpu::BufferAddress,
1251        _max_count: u32,
1252    ) {
1253        panic!("wgpu-compat: mesh tasks not supported");
1254    }
1255
1256    fn insert_debug_marker(&mut self, label: &str) {
1257        self.inner.clone().insert_debug_marker(label.to_string());
1258    }
1259
1260    fn push_debug_group(&mut self, group_label: &str) {
1261        self.inner.clone().push_debug_group(group_label.to_string());
1262    }
1263
1264    fn pop_debug_group(&mut self) {
1265        self.inner.clone().pop_debug_group();
1266    }
1267
1268    fn write_timestamp(&mut self, query_set: &DispatchQuerySet, query_index: u32) {
1269        let set = expect_query_set(query_set);
1270        self.inner.clone().write_timestamp(set, query_index);
1271    }
1272
1273    fn begin_occlusion_query(&mut self, query_index: u32) {
1274        self.inner.clone().begin_occlusion_query(query_index);
1275    }
1276
1277    fn end_occlusion_query(&mut self) {
1278        self.inner.clone().end_occlusion_query();
1279    }
1280
1281    fn begin_pipeline_statistics_query(
1282        &mut self,
1283        _query_set: &DispatchQuerySet,
1284        _query_index: u32,
1285    ) {
1286        panic!("wgpu-compat: pipeline statistics not supported");
1287    }
1288
1289    fn end_pipeline_statistics_query(&mut self) {
1290        panic!("wgpu-compat: pipeline statistics not supported");
1291    }
1292
1293    fn execute_bundles(&mut self, render_bundles: &mut dyn Iterator<Item = &DispatchRenderBundle>) {
1294        let bundles = render_bundles.map(expect_render_bundle).collect::<Vec<_>>();
1295        self.inner.clone().execute_bundles(&bundles);
1296    }
1297
1298    fn end(&mut self) {
1299        if !self.ended {
1300            self.inner.clone().end();
1301            self.ended = true;
1302        }
1303    }
1304}
1305
1306impl Drop for DawnRenderPass {
1307    fn drop(&mut self) {
1308        if !self.ended {
1309            self.inner.clone().end();
1310            self.ended = true;
1311        }
1312    }
1313}
1314
1315impl RenderBundleEncoderInterface for DawnRenderBundleEncoder {
1316    fn set_pipeline(&mut self, pipeline: &DispatchRenderPipeline) {
1317        let pipeline = expect_render_pipeline(pipeline);
1318        self.inner.clone().set_pipeline(pipeline);
1319    }
1320
1321    fn set_bind_group(
1322        &mut self,
1323        index: u32,
1324        bind_group: Option<&DispatchBindGroup>,
1325        offsets: &[wgpu::DynamicOffset],
1326    ) {
1327        let group = bind_group.map(expect_bind_group);
1328        self.inner.clone().set_bind_group(index, group, offsets);
1329    }
1330
1331    fn set_index_buffer(
1332        &mut self,
1333        buffer: &DispatchBuffer,
1334        index_format: wgpu::IndexFormat,
1335        offset: wgpu::BufferAddress,
1336        size: Option<wgpu::BufferSize>,
1337    ) {
1338        let buffer = expect_buffer(buffer);
1339        let size = size.map(|v| v.get()).unwrap_or(WHOLE_SIZE);
1340        self.inner.clone()
1341            .set_index_buffer(buffer, map_index_format(index_format), offset, size);
1342    }
1343
1344    fn set_vertex_buffer(
1345        &mut self,
1346        slot: u32,
1347        buffer: &DispatchBuffer,
1348        offset: wgpu::BufferAddress,
1349        size: Option<wgpu::BufferSize>,
1350    ) {
1351        let buffer = expect_buffer(buffer);
1352        let size = size.map(|v| v.get()).unwrap_or(WHOLE_SIZE);
1353        self.inner.clone()
1354            .set_vertex_buffer(slot, Some(buffer), offset, size);
1355    }
1356
1357    fn set_immediates(&mut self, offset: u32, data: &[u8]) {
1358        let data = bytes_to_u32(data);
1359        let data_ptr = data.as_ptr().cast::<std::ffi::c_void>();
1360        let data_len = data.len() * std::mem::size_of::<u32>();
1361        let data_slice = unsafe { std::slice::from_raw_parts(data_ptr, data_len) };
1362        self.inner.clone().set_immediates(offset, data_slice);
1363    }
1364
1365    fn draw(&mut self, vertices: std::ops::Range<u32>, instances: std::ops::Range<u32>) {
1366        self.inner.clone().draw(
1367            vertices.end - vertices.start,
1368            instances.end - instances.start,
1369            vertices.start,
1370            instances.start,
1371        );
1372    }
1373
1374    fn draw_indexed(
1375        &mut self,
1376        indices: std::ops::Range<u32>,
1377        base_vertex: i32,
1378        instances: std::ops::Range<u32>,
1379    ) {
1380        self.inner.clone().draw_indexed(
1381            indices.end - indices.start,
1382            instances.end - instances.start,
1383            indices.start,
1384            base_vertex,
1385            instances.start,
1386        );
1387    }
1388
1389    fn draw_indirect(
1390        &mut self,
1391        indirect_buffer: &DispatchBuffer,
1392        indirect_offset: wgpu::BufferAddress,
1393    ) {
1394        let buffer = expect_buffer(indirect_buffer);
1395        self.inner.clone().draw_indirect(buffer, indirect_offset);
1396    }
1397
1398    fn draw_indexed_indirect(
1399        &mut self,
1400        indirect_buffer: &DispatchBuffer,
1401        indirect_offset: wgpu::BufferAddress,
1402    ) {
1403        let buffer = expect_buffer(indirect_buffer);
1404        self.inner.clone()
1405            .draw_indexed_indirect(buffer, indirect_offset);
1406    }
1407
1408    fn finish(self, desc: &wgpu::RenderBundleDescriptor<'_>) -> DispatchRenderBundle {
1409        let mut dawn_desc = RenderBundleDescriptor::new();
1410        dawn_desc.label = label_to_string(desc.label);
1411        let bundle = self.inner.clone().finish(Some(&dawn_desc));
1412        dispatch_render_bundle(bundle)
1413    }
1414}
1415
1416impl CommandBufferInterface for DawnCommandBuffer {}
1417impl RenderBundleInterface for DawnRenderBundle {}
1418
1419impl SurfaceInterface for DawnSurface {
1420    fn get_capabilities(&self, adapter: &DispatchAdapter) -> wgpu::SurfaceCapabilities {
1421        let adapter = expect_adapter(adapter);
1422        let mut caps = SurfaceCapabilities::new();
1423        let _ = self.inner.clone().get_capabilities(adapter, &mut caps);
1424        map_surface_capabilities(caps)
1425    }
1426
1427    fn configure(&self, device: &DispatchDevice, config: &wgpu::SurfaceConfiguration) {
1428        let mut config = map_surface_configuration(config);
1429        config.device = Some(expect_device(device));
1430        self.inner.clone().configure(&config);
1431    }
1432
1433    fn get_current_texture(
1434        &self,
1435    ) -> (
1436        Option<DispatchTexture>,
1437        wgpu::SurfaceStatus,
1438        DispatchSurfaceOutputDetail,
1439    ) {
1440        let mut surface_texture = SurfaceTexture::new();
1441        self.inner.clone().get_current_texture(&mut surface_texture);
1442        let status = match surface_texture
1443            .status
1444            .unwrap_or(SurfaceGetCurrentTextureStatus::Error)
1445        {
1446            SurfaceGetCurrentTextureStatus::SuccessOptimal => wgpu::SurfaceStatus::Good,
1447            SurfaceGetCurrentTextureStatus::SuccessSuboptimal => wgpu::SurfaceStatus::Suboptimal,
1448            SurfaceGetCurrentTextureStatus::Timeout => wgpu::SurfaceStatus::Timeout,
1449            SurfaceGetCurrentTextureStatus::Outdated => wgpu::SurfaceStatus::Outdated,
1450            SurfaceGetCurrentTextureStatus::Lost => wgpu::SurfaceStatus::Lost,
1451            SurfaceGetCurrentTextureStatus::Error => wgpu::SurfaceStatus::Unknown,
1452        };
1453        (
1454            surface_texture.texture.map(dispatch_texture),
1455            status,
1456            dispatch_surface_output_detail(self.inner.clone()),
1457        )
1458    }
1459}
1460
1461impl SurfaceOutputDetailInterface for DawnSurfaceOutputDetail {
1462    fn present(&self) {
1463        let _ = self.surface.clone().present();
1464    }
1465
1466    fn texture_discard(&self) {
1467        // Dawn does not expose an explicit surface texture discard API.
1468    }
1469}
1470
1471impl QueueWriteBufferInterface for DawnQueueWriteBuffer {
1472    fn slice(&self) -> &[u8] {
1473        &self.inner
1474    }
1475
1476    fn slice_mut(&mut self) -> &mut [u8] {
1477        &mut self.inner
1478    }
1479}
1480
1481impl BufferMappedRangeInterface for DawnBufferMappedRange {
1482    fn slice(&self) -> &[u8] {
1483        if self.data.is_null() || self.size == 0 {
1484            return &[];
1485        }
1486        unsafe { std::slice::from_raw_parts(self.data, self.size) }
1487    }
1488
1489    fn slice_mut(&mut self) -> &mut [u8] {
1490        if self.data.is_null() || self.size == 0 {
1491            return &mut [];
1492        }
1493        unsafe { std::slice::from_raw_parts_mut(self.data, self.size) }
1494    }
1495
1496    #[cfg(web)]
1497    #[allow(unexpected_cfgs)]
1498    fn as_uint8array(&self) -> &js_sys::Uint8Array {
1499        unimplemented!();
1500    }
1501}