1use crate::dispatch::*;
2use crate::error::DawnError;
3use crate::future::*;
4use crate::mapping::*;
5use crate::types::*;
6use dawn_rs::*;
7use std::pin::Pin;
8use std::sync::{Arc, Mutex};
9use wgpu::custom::*;
10
11fn as_c_void_slice(data: &[u8]) -> &[std::ffi::c_void] {
17 unsafe { std::slice::from_raw_parts(data.as_ptr().cast::<std::ffi::c_void>(), data.len()) }
18}
19
20fn u32_as_c_void_slice(data: &[u32]) -> &[std::ffi::c_void] {
25 let byte_len = data.len() * std::mem::size_of::<u32>();
26 unsafe { std::slice::from_raw_parts(data.as_ptr().cast::<std::ffi::c_void>(), byte_len) }
27}
28
29#[cfg(feature = "wire")]
30unsafe extern "C" {
31 fn dawn_rs_wire_set_native_procs();
32}
33
34#[cfg(feature = "wire")]
35fn ensure_native_procs() {
36 static INIT: std::sync::Once = std::sync::Once::new();
37 INIT.call_once(|| unsafe {
38 dawn_rs_wire_set_native_procs();
39 });
40}
41
42#[cfg(not(feature = "wire"))]
43fn ensure_native_procs() {}
44
45impl InstanceInterface for DawnInstance {
46 fn new(_desc: &wgpu::InstanceDescriptor) -> Self {
47 ensure_native_procs();
48 Self::from_factory(
49 move || {
50 let mut desc = InstanceDescriptor::new();
51 desc.required_features = Some(vec![InstanceFeatureName::TimedWaitAny]);
52 let mut limits = InstanceLimits::new();
53 limits.timed_wait_any_max_count = Some(64);
54 desc.required_limits = Some(limits);
55 Instance::new(Some(&desc))
56 },
57 #[cfg(feature = "wire")]
58 None,
59 )
60 }
61
62 unsafe fn create_surface(
63 &self,
64 target: wgpu::SurfaceTargetUnsafe,
65 ) -> Result<DispatchSurface, wgpu::CreateSurfaceError> {
66 match target {
67 #[cfg(target_os = "macos")]
68 wgpu::SurfaceTargetUnsafe::CoreAnimationLayer(layer) => {
69 let layer_addr = layer as usize;
70 let surface = self.with_instance(move |state| {
71 let mut desc = SurfaceDescriptor::new();
72 let source = SurfaceSourceMetalLayer {
73 layer: Some(layer_addr as *mut std::ffi::c_void),
74 };
75 desc = desc.with_extension(SurfaceDescriptorExtension::from(source));
76 state.instance.clone().create_surface(&desc)
77 });
78 let dawn_surface = DawnSurface {
79 inner: surface,
80 metal_layer: None,
81 };
82 Ok(dispatch_surface(dawn_surface))
83 }
84 #[cfg(target_os = "macos")]
85 wgpu::SurfaceTargetUnsafe::RawHandle {
86 raw_window_handle, ..
87 } => {
88 use wgpu::rwh::RawWindowHandle;
89 match raw_window_handle {
90 RawWindowHandle::AppKit(handle) => {
91 let layer =
92 unsafe { raw_window_metal::Layer::from_ns_view(handle.ns_view) };
93 let layer_ptr = layer.into_raw();
94 let layer_addr = layer_ptr.as_ptr() as usize;
95 let surface = self.with_instance(move |state| {
96 let mut desc = SurfaceDescriptor::new();
97 let source = SurfaceSourceMetalLayer {
98 layer: Some(layer_addr as *mut std::ffi::c_void),
99 };
100 desc = desc.with_extension(SurfaceDescriptorExtension::from(source));
101 state.instance.clone().create_surface(&desc)
102 });
103 let handle = MetalLayerHandle {
104 ptr: layer_addr as *mut std::ffi::c_void,
105 };
106 let dawn_surface = DawnSurface {
107 inner: surface,
108 metal_layer: Some(Arc::new(handle)),
109 };
110 Ok(dispatch_surface(dawn_surface))
111 }
112 _ => panic!("wgpu-compat: unsupported raw window handle on macOS"),
113 }
114 }
115 #[cfg(target_os = "windows")]
116 wgpu::SurfaceTargetUnsafe::RawHandle {
117 raw_window_handle, ..
118 } => {
119 use wgpu::rwh::RawWindowHandle;
120 match raw_window_handle {
121 RawWindowHandle::Win32(handle) => {
122 let hinstance = handle.hinstance.map(|h| h.get() as usize);
123 let hwnd = handle.hwnd.get() as usize;
124 let surface = self.with_instance(move |state| {
125 let mut desc = SurfaceDescriptor::new();
126 let source = SurfaceSourceWindowsHWND {
127 hinstance: hinstance.map(|h| h as _),
128 hwnd: Some(hwnd as _),
129 };
130 desc = desc.with_extension(SurfaceDescriptorExtension::from(source));
131 state.instance.clone().create_surface(&desc)
132 });
133 let dawn_surface = DawnSurface { inner: surface };
134 Ok(dispatch_surface(dawn_surface))
135 }
136 _ => panic!("wgpu-compat: unsupported raw window handle on Windows"),
137 }
138 }
139 #[cfg(all(unix, not(target_vendor = "apple")))]
140 wgpu::SurfaceTargetUnsafe::RawHandle {
141 raw_display_handle,
142 raw_window_handle,
143 } => {
144 use wgpu::rwh::{RawDisplayHandle, RawWindowHandle};
145 match (raw_display_handle, raw_window_handle) {
146 (RawDisplayHandle::Wayland(display), RawWindowHandle::Wayland(window)) => {
147 let display_ptr = display.display.as_ptr() as usize;
148 let surface_ptr = window.surface.as_ptr() as usize;
149 let surface = self.with_instance(move |state| {
150 let mut desc = SurfaceDescriptor::new();
151 let source = SurfaceSourceWaylandSurface {
152 display: Some(display_ptr as *mut std::ffi::c_void),
153 surface: Some(surface_ptr as *mut std::ffi::c_void),
154 };
155 desc = desc.with_extension(SurfaceDescriptorExtension::from(source));
156 state.instance.clone().create_surface(&desc)
157 });
158 let dawn_surface = DawnSurface { inner: surface };
159 Ok(dispatch_surface(dawn_surface))
160 }
161 (RawDisplayHandle::Xlib(display), RawWindowHandle::Xlib(window)) => {
162 let display_ptr = display.display.unwrap().as_ptr() as usize;
163 let window_id = window.window as u64;
164 let surface = self.with_instance(move |state| {
165 let mut desc = SurfaceDescriptor::new();
166 let source = SurfaceSourceXlibWindow {
167 display: Some(display_ptr as *mut std::ffi::c_void),
168 window: Some(window_id),
169 };
170 desc = desc.with_extension(SurfaceDescriptorExtension::from(source));
171 state.instance.clone().create_surface(&desc)
172 });
173 let dawn_surface = DawnSurface { inner: surface };
174 Ok(dispatch_surface(dawn_surface))
175 }
176 (RawDisplayHandle::Xcb(display), RawWindowHandle::Xcb(window)) => {
177 let connection_ptr = display.connection.unwrap().as_ptr() as usize;
178 let window_id = window.window.get();
179 let surface = self.with_instance(move |state| {
180 let mut desc = SurfaceDescriptor::new();
181 let source = SurfaceSourceXCBWindow {
182 connection: Some(connection_ptr as *mut std::ffi::c_void),
183 window: Some(window_id),
184 };
185 desc = desc.with_extension(SurfaceDescriptorExtension::from(source));
186 state.instance.clone().create_surface(&desc)
187 });
188 let dawn_surface = DawnSurface { inner: surface };
189 Ok(dispatch_surface(dawn_surface))
190 }
191 _ => panic!("wgpu-compat: unsupported raw window handle on unix"),
192 }
193 }
194 _ => panic!("wgpu-compat: unsupported surface target"),
195 }
196 }
197
198 fn request_adapter(
199 &self,
200 options: &wgpu::RequestAdapterOptions<'_, '_>,
201 ) -> Pin<Box<dyn wgpu::custom::RequestAdapterFuture>> {
202 let (future, shared) = CallbackFuture::new();
203 let mut dawn_options = RequestAdapterOptions::new();
204 dawn_options.power_preference = Some(map_power_preference(options.power_preference));
205 dawn_options.force_fallback_adapter = Some(options.force_fallback_adapter);
206 if let Some(surface) = options.compatible_surface {
207 dawn_options.compatible_surface = Some(expect_surface_from_api(surface).inner.clone());
208 }
209 #[cfg(feature = "shared_texture_memory")]
210 {
211 #[cfg(target_os = "windows")]
212 {
213 #[cfg(target_vendor = "win7")]
214 {
215 dawn_options.backend_type = Some(dawn_rs::BackendType::D3D11)
216 }
217 #[cfg(not(target_vendor = "win7"))]
218 {
219 dawn_options.backend_type = Some(dawn_rs::BackendType::D3D12)
220 }
221 }
222
223 #[cfg(target_os = "macos")]
224 {
225 dawn_options.backend_type = Some(dawn_rs::BackendType::Metal);
226 }
227 }
228 let worker = Arc::clone(&self.inner);
229 let future_handle = self.with_instance(move |state| {
230 state.instance.clone().request_adapter(
231 Some(&dawn_options),
232 move |status, adapter, _message| {
233 if status == RequestAdapterStatus::Success {
234 let adapter = adapter.expect("wgpu-compat: missing adapter");
235 complete_shared(
236 &shared,
237 Ok(dispatch_adapter(DawnAdapter::from_adapter(
238 Arc::clone(&worker),
239 adapter,
240 ))),
241 );
242 } else {
243 complete_shared(
244 &shared,
245 Err(wgpu::RequestAdapterError::NotFound {
246 active_backends: wgpu::Backends::empty(),
247 requested_backends: wgpu::Backends::empty(),
248 supported_backends: wgpu::Backends::empty(),
249 no_fallback_backends: wgpu::Backends::empty(),
250 no_adapter_backends: wgpu::Backends::empty(),
251 incompatible_surface_backends: wgpu::Backends::empty(),
252 }),
253 );
254 }
255 },
256 )
257 });
258 let _ = self.with_instance(move |state| {
259 state.instance.clone().wait_any(
260 Some(&mut [FutureWaitInfo {
261 future: Some(future_handle),
262 completed: None,
263 }]),
264 0,
265 )
266 });
267 Box::pin(future)
268 }
269
270 fn poll_all_devices(&self, _force_wait: bool) -> bool {
271 self.with_instance(move |state| state.instance.clone().process_events());
272 true
273 }
274
275 fn wgsl_language_features(&self) -> wgpu::WgslLanguageFeatures {
276 let feature_list = self.with_instance(move |state| {
277 let mut features = SupportedWGSLLanguageFeatures::new();
278 state
279 .instance
280 .clone()
281 .get_wgsl_language_features(&mut features);
282 features.features.clone().unwrap_or_default()
283 });
284 let mut out = wgpu::WgslLanguageFeatures::empty();
285 for feature in feature_list {
286 if feature == WGSLLanguageFeatureName::ReadonlyAndReadwriteStorageTextures {
287 out |= wgpu::WgslLanguageFeatures::ReadOnlyAndReadWriteStorageTextures;
288 }
289 }
290 out
291 }
292
293 fn enumerate_adapters(
294 &self,
295 _backends: wgpu::Backends,
296 ) -> Pin<Box<dyn wgpu::custom::EnumerateAdapterFuture>> {
297 Box::pin(std::future::ready(Vec::new()))
298 }
299}
300
301impl AdapterInterface for DawnAdapter {
302 fn request_device(
303 &self,
304 desc: &wgpu::DeviceDescriptor<'_>,
305 ) -> Pin<Box<dyn wgpu::custom::RequestDeviceFuture>> {
306 let (future, shared) = CallbackFuture::new();
307 let label = label_to_string(desc.label);
308 let mut required_features = vec![
309 FeatureName::DawnInternalUsages,
310 FeatureName::ImplicitDeviceSynchronization,
311 ];
312 if !desc.required_features.is_empty() {
313 required_features.extend(map_features_to_dawn(desc.required_features));
314 }
315
316 #[cfg(feature = "shared_texture_memory")]
317 {
318 #[cfg(target_os = "windows")]
319 {
320 required_features.push(FeatureName::SharedTextureMemoryDXGISharedHandle);
321 required_features.push(FeatureName::SharedFenceDXGISharedHandle);
322 }
323 #[cfg(target_os = "macos")]
324 {
325 required_features.push(FeatureName::SharedTextureMemoryIOSurface);
326 required_features.push(FeatureName::SharedFenceMTLSharedEvent);
327 }
328
329 #[cfg(target_os = "linux")]
330 {
331 required_features.push(FeatureName::SharedTextureMemoryDmaBuf);
332 }
333 }
334 let required_limits = if desc.required_limits != wgpu::Limits::default() {
335 Some(map_limits_to_dawn(&desc.required_limits))
336 } else {
337 None
338 };
339 let uncaptured_error_handler: Arc<Mutex<Option<Arc<dyn wgpu::UncapturedErrorHandler>>>> =
340 Arc::new(Mutex::new(None));
341 let device_lost_callback: Arc<Mutex<Option<wgpu::custom::BoxDeviceLostCallback>>> =
342 Arc::new(Mutex::new(None));
343
344 let error_handler_state = Arc::clone(&uncaptured_error_handler);
345 let lost_callback_state = Arc::clone(&device_lost_callback);
346 let _future_handle = self.with_adapter(move |adapter| {
347 let mut desc = DeviceDescriptor::new();
348 desc.label = label;
349 desc.required_features = Some(required_features);
350 desc.required_limits = required_limits;
351
352 let error_info = dawn_rs::UncapturedErrorCallbackInfo::new();
353 let error_handler_state = Arc::clone(&error_handler_state);
354 error_info
355 .callback
356 .replace(Some(Box::new(move |_devices, ty, message| {
357 if ty == ErrorType::NoError {
358 return;
359 }
360 let handler = error_handler_state
361 .lock()
362 .expect("wgpu-compat: uncaptured error handler mutex poisoned")
363 .clone();
364 if let Some(handler) = handler {
365 handler(map_uncaptured_error(ty, message));
366 }
367 })));
368 desc.uncaptured_error_callback_info = Some(error_info);
369
370 let lost_info = dawn_rs::DeviceLostCallbackInfo::new();
371 let lost_callback_state = Arc::clone(&lost_callback_state);
372 lost_info
373 .callback
374 .replace(Some(Box::new(move |_, reason, message| {
375 let callback = lost_callback_state
376 .lock()
377 .expect("wgpu-compat: device lost callback mutex poisoned")
378 .take();
379 if let Some(callback) = callback {
380 callback(
381 match reason {
382 DeviceLostReason::Destroyed => wgpu::DeviceLostReason::Destroyed,
383 _ => wgpu::DeviceLostReason::Unknown,
384 },
385 message,
386 );
387 }
388 })));
389 desc.device_lost_callback_info = Some(lost_info);
390
391 adapter
392 .clone()
393 .request_device(Some(&desc), move |status, device, message| {
394 if status == RequestDeviceStatus::Success {
395 let device = device.expect("wgpu-compat: missing device");
396 let queue = device.get_queue();
397 complete_shared(
398 &shared,
399 Ok((
400 dispatch_device_with_callback_state(
401 device,
402 Arc::clone(&device_lost_callback),
403 Arc::clone(&uncaptured_error_handler),
404 ),
405 dispatch_queue(queue),
406 )),
407 );
408 } else {
409 panic!("wgpu-compat: request_device failed {}", message);
410 }
411 })
412 });
413 Box::pin(future)
414 }
415
416 fn is_surface_supported(&self, surface: &DispatchSurface) -> bool {
417 surface.as_custom::<DawnSurface>().is_some()
418 }
419
420 fn features(&self) -> wgpu::Features {
421 let feature_list = self.with_adapter(move |adapter| {
422 let mut features = SupportedFeatures::new();
423 adapter.get_features(&mut features);
424 features.features.clone().unwrap_or_default()
425 });
426 let mut features = SupportedFeatures::new();
427 features.features = Some(feature_list);
428 map_features_to_wgpu(&features)
429 }
430
431 fn limits(&self) -> wgpu::Limits {
432 let limits = self.with_adapter(move |adapter| {
433 let mut limits = Limits::new();
434 let _ = adapter.get_limits(&mut limits);
435 limits
436 });
437 map_limits_to_wgpu(&limits)
438 }
439
440 fn downlevel_capabilities(&self) -> wgpu::DownlevelCapabilities {
441 wgpu::DownlevelCapabilities::default()
442 }
443
444 fn get_info(&self) -> wgpu::AdapterInfo {
445 let info_tuple = self.with_adapter(move |adapter| {
446 let mut info = AdapterInfo::new();
447 let _ = adapter.get_info(&mut info);
448 (
449 info.description.clone().unwrap_or_default(),
450 info.vendor_id.unwrap_or(0),
451 info.device_id.unwrap_or(0),
452 info.adapter_type.unwrap_or(AdapterType::Unknown),
453 info.backend_type.unwrap_or(BackendType::Undefined),
454 info.architecture.clone().unwrap_or_default(),
455 info.device.clone().unwrap_or_default(),
456 )
457 });
458 let (name, vendor, device, adapter_type, backend_type, driver, driver_info) = info_tuple;
459 wgpu::AdapterInfo {
460 name,
461 vendor,
462 device,
463 device_type: match adapter_type {
464 AdapterType::DiscreteGpu => wgpu::DeviceType::DiscreteGpu,
465 AdapterType::IntegratedGpu => wgpu::DeviceType::IntegratedGpu,
466 AdapterType::Cpu => wgpu::DeviceType::Cpu,
467 AdapterType::Unknown => wgpu::DeviceType::Other,
468 },
469 backend: map_backend_type_to_wgpu(backend_type),
470 driver,
471 driver_info,
472 device_pci_bus_id: String::new(),
473 subgroup_min_size: wgpu::MINIMUM_SUBGROUP_MIN_SIZE,
474 subgroup_max_size: wgpu::MAXIMUM_SUBGROUP_MAX_SIZE,
475 transient_saves_memory: false,
476 }
477 }
478
479 fn get_texture_format_features(
480 &self,
481 _format: wgpu::TextureFormat,
482 ) -> wgpu::TextureFormatFeatures {
483 wgpu::TextureFormatFeatures {
484 allowed_usages: wgpu::TextureUsages::empty(),
485 flags: wgpu::TextureFormatFeatureFlags::empty(),
486 }
487 }
488
489 fn get_presentation_timestamp(&self) -> wgpu::PresentationTimestamp {
490 wgpu::PresentationTimestamp::INVALID_TIMESTAMP
491 }
492}
493
494impl DeviceInterface for DawnDevice {
495 fn features(&self) -> wgpu::Features {
496 let adapter = self.inner.get_adapter();
497 let mut supported = SupportedFeatures::new();
498 adapter.get_features(&mut supported);
499 map_features_to_wgpu(&supported)
500 }
501
502 fn limits(&self) -> wgpu::Limits {
503 let mut limits = Limits::new();
504 let adapter = self.inner.get_adapter();
505 let _ = adapter.get_limits(&mut limits);
506 map_limits_to_wgpu(&limits)
507 }
508
509 fn create_shader_module(
510 &self,
511 desc: wgpu::ShaderModuleDescriptor<'_>,
512 _shader_bound_checks: wgpu::ShaderRuntimeChecks,
513 ) -> DispatchShaderModule {
514 let dawn_desc = map_shader_module_descriptor(desc);
515 let module = self.inner.create_shader_module(&dawn_desc);
516 dispatch_shader_module(module)
517 }
518
519 unsafe fn create_shader_module_passthrough(
520 &self,
521 _desc: &wgpu::ShaderModuleDescriptorPassthrough<'_>,
522 ) -> DispatchShaderModule {
523 panic!("wgpu-compat: create_shader_module_passthrough not supported");
524 }
525
526 fn create_bind_group_layout(
527 &self,
528 desc: &wgpu::BindGroupLayoutDescriptor<'_>,
529 ) -> DispatchBindGroupLayout {
530 let dawn_desc = map_bind_group_layout_descriptor(desc);
531 let layout = self.inner.create_bind_group_layout(&dawn_desc);
532 dispatch_bind_group_layout(layout)
533 }
534
535 fn create_bind_group(&self, desc: &wgpu::BindGroupDescriptor<'_>) -> DispatchBindGroup {
536 let dawn_desc = map_bind_group_descriptor(desc);
537 let group = self.inner.create_bind_group(&dawn_desc);
538 dispatch_bind_group(group)
539 }
540
541 fn create_pipeline_layout(
542 &self,
543 desc: &wgpu::PipelineLayoutDescriptor<'_>,
544 ) -> DispatchPipelineLayout {
545 let dawn_desc = map_pipeline_layout_descriptor(desc);
546 let layout = self.inner.create_pipeline_layout(&dawn_desc);
547 dispatch_pipeline_layout(layout)
548 }
549
550 fn create_render_pipeline(
551 &self,
552 desc: &wgpu::RenderPipelineDescriptor<'_>,
553 ) -> DispatchRenderPipeline {
554 let dawn_desc = map_render_pipeline_descriptor(desc);
555 let pipeline = self.inner.create_render_pipeline(&dawn_desc);
556 dispatch_render_pipeline(pipeline)
557 }
558
559 fn create_mesh_pipeline(
560 &self,
561 _desc: &wgpu::MeshPipelineDescriptor<'_>,
562 ) -> DispatchRenderPipeline {
563 panic!("wgpu-compat: mesh pipelines not supported");
564 }
565
566 fn create_compute_pipeline(
567 &self,
568 desc: &wgpu::ComputePipelineDescriptor<'_>,
569 ) -> DispatchComputePipeline {
570 let dawn_desc = map_compute_pipeline_descriptor(desc);
571 let pipeline = self.inner.create_compute_pipeline(&dawn_desc);
572 dispatch_compute_pipeline(pipeline)
573 }
574
575 unsafe fn create_pipeline_cache(
576 &self,
577 _desc: &wgpu::PipelineCacheDescriptor<'_>,
578 ) -> DispatchPipelineCache {
579 dispatch_pipeline_cache()
580 }
581
582 fn create_buffer(&self, desc: &wgpu::BufferDescriptor<'_>) -> DispatchBuffer {
583 let dawn_desc = map_buffer_descriptor(desc);
584 let buffer = self
585 .inner
586 .create_buffer(&dawn_desc)
587 .expect("wgpu-compat: create_buffer returned null");
588 dispatch_buffer(buffer)
589 }
590
591 fn create_texture(&self, desc: &wgpu::TextureDescriptor<'_>) -> DispatchTexture {
592 let dawn_desc = map_texture_descriptor(desc);
593 let texture = self.inner.create_texture(&dawn_desc);
594 dispatch_texture(texture)
595 }
596
597 fn create_external_texture(
598 &self,
599 desc: &wgpu::ExternalTextureDescriptor<'_>,
600 _planes: &[&wgpu::TextureView],
601 ) -> DispatchExternalTexture {
602 let mut dawn_desc = ExternalTextureDescriptor::new();
603 dawn_desc.label = label_to_string(desc.label);
604 let texture = self.inner.create_external_texture(&dawn_desc);
605 dispatch_external_texture(texture)
606 }
607
608 fn create_blas(
609 &self,
610 _desc: &wgpu::CreateBlasDescriptor<'_>,
611 _sizes: wgpu::BlasGeometrySizeDescriptors,
612 ) -> (Option<u64>, DispatchBlas) {
613 (None, dispatch_blas())
614 }
615
616 fn create_tlas(&self, _desc: &wgpu::CreateTlasDescriptor<'_>) -> DispatchTlas {
617 dispatch_tlas()
618 }
619
620 fn create_sampler(&self, desc: &wgpu::SamplerDescriptor<'_>) -> DispatchSampler {
621 let dawn_desc = map_sampler_descriptor(desc);
622 let sampler = self.inner.create_sampler(Some(&dawn_desc));
623 dispatch_sampler(sampler)
624 }
625
626 fn create_query_set(&self, desc: &wgpu::QuerySetDescriptor<'_>) -> DispatchQuerySet {
627 let ty = match desc.ty {
628 wgpu::QueryType::Occlusion => QueryType::Occlusion,
629 wgpu::QueryType::Timestamp => QueryType::Timestamp,
630 _ => panic!("wgpu-compat: query type not supported"),
631 };
632 let mut dawn_desc = QuerySetDescriptor::new();
633 dawn_desc.label = label_to_string(desc.label);
634 dawn_desc.r#type = Some(ty);
635 dawn_desc.count = Some(desc.count);
636 let set = self.inner.create_query_set(&dawn_desc);
637 dispatch_query_set(set)
638 }
639
640 fn create_command_encoder(
641 &self,
642 desc: &wgpu::CommandEncoderDescriptor<'_>,
643 ) -> DispatchCommandEncoder {
644 let dawn_desc = map_command_encoder_descriptor(desc);
645 let encoder = self.inner.create_command_encoder(Some(&dawn_desc));
646 dispatch_command_encoder(encoder)
647 }
648
649 fn create_render_bundle_encoder(
650 &self,
651 desc: &wgpu::RenderBundleEncoderDescriptor<'_>,
652 ) -> DispatchRenderBundleEncoder {
653 let dawn_desc = map_render_bundle_encoder_descriptor(desc);
654 let encoder = self.inner.create_render_bundle_encoder(&dawn_desc);
655 dispatch_render_bundle_encoder(encoder)
656 }
657
658 fn set_device_lost_callback(&self, device_lost_callback: wgpu::custom::BoxDeviceLostCallback) {
659 self.device_lost_callback
660 .lock()
661 .expect("wgpu-compat: device lost callback mutex poisoned")
662 .replace(device_lost_callback);
663 }
664
665 fn on_uncaptured_error(&self, handler: Arc<dyn wgpu::UncapturedErrorHandler>) {
666 self.uncaptured_error_handler
667 .lock()
668 .expect("wgpu-compat: uncaptured error handler mutex poisoned")
669 .replace(handler);
670 }
671
672 fn push_error_scope(&self, filter: wgpu::ErrorFilter) -> u32 {
673 let filter = match filter {
674 wgpu::ErrorFilter::Validation => ErrorFilter::Validation,
675 wgpu::ErrorFilter::OutOfMemory => ErrorFilter::OutOfMemory,
676 wgpu::ErrorFilter::Internal => ErrorFilter::Internal,
677 };
678 self.inner.push_error_scope(filter);
679 0
680 }
681
682 fn pop_error_scope(&self, _index: u32) -> Pin<Box<dyn wgpu::custom::PopErrorScopeFuture>> {
683 let (future, shared) = CallbackFuture::new();
684 let _ = self.inner.pop_error_scope(move |status, ty, message| {
685 if status == PopErrorScopeStatus::Success {
686 if ty == ErrorType::NoError {
687 complete_shared(&shared, None);
688 } else {
689 complete_shared(&shared, Some(map_uncaptured_error(ty, message)));
690 }
691 } else {
692 complete_shared(
693 &shared,
694 Some(wgpu::Error::Internal {
695 source: Box::new(DawnError("pop_error_scope failed".to_string())),
696 description: "pop_error_scope failed".to_string(),
697 }),
698 );
699 }
700 });
701 Box::pin(future)
702 }
703
704 unsafe fn start_graphics_debugger_capture(&self) {
705 let _ = &self.inner;
706 }
707
708 unsafe fn stop_graphics_debugger_capture(&self) {
709 let _ = &self.inner;
710 }
711
712 fn poll(&self, _poll_type: wgt::PollType<u64>) -> Result<wgpu::PollStatus, wgpu::PollError> {
713 self.inner.tick();
714 Ok(wgpu::PollStatus::QueueEmpty)
715 }
716
717 fn get_internal_counters(&self) -> wgpu::InternalCounters {
718 wgpu::InternalCounters::default()
719 }
720
721 fn generate_allocator_report(&self) -> Option<wgpu::AllocatorReport> {
722 None
723 }
724
725 fn destroy(&self) {
726 self.inner.destroy();
727 }
728}
729
730impl QueueInterface for DawnQueue {
731 fn write_buffer(&self, buffer: &DispatchBuffer, offset: wgpu::BufferAddress, data: &[u8]) {
732 let buffer = expect_buffer(buffer);
733 self.inner
734 .write_buffer(buffer, offset, as_c_void_slice(data));
735 }
736
737 fn create_staging_buffer(&self, size: wgpu::BufferSize) -> Option<DispatchQueueWriteBuffer> {
738 Some(dispatch_queue_write_buffer(vec![0; size.get() as usize]))
739 }
740
741 fn validate_write_buffer(
742 &self,
743 _buffer: &DispatchBuffer,
744 _offset: wgpu::BufferAddress,
745 _size: wgpu::BufferSize,
746 ) -> Option<()> {
747 Some(())
748 }
749
750 fn write_staging_buffer(
751 &self,
752 buffer: &DispatchBuffer,
753 offset: wgpu::BufferAddress,
754 staging_buffer: &DispatchQueueWriteBuffer,
755 ) {
756 let buffer = expect_buffer(buffer);
757 let staging = staging_buffer
758 .as_custom::<DawnQueueWriteBuffer>()
759 .expect("wgpu-compat: queue write buffer not dawn");
760 self.inner
761 .write_buffer(buffer, offset, as_c_void_slice(&staging.inner));
762 }
763
764 fn write_texture(
765 &self,
766 texture: wgpu::TexelCopyTextureInfo<'_>,
767 data: &[u8],
768 mut data_layout: wgpu::TexelCopyBufferLayout,
769 size: wgpu::Extent3d,
770 ) {
771 if data_layout.rows_per_image.is_none()
772 && (size.height > 1 || size.depth_or_array_layers > 1)
773 {
774 data_layout.rows_per_image = Some(size.height.max(1));
775 }
776 let destination = map_texel_copy_texture_info(texture);
777 let data_layout = map_texel_copy_buffer_layout(data_layout);
778 let write_size = map_extent_3d(size);
779 self.inner.write_texture(
780 &destination,
781 as_c_void_slice(data),
782 &data_layout,
783 &write_size,
784 );
785 }
786
787 #[cfg(web)]
788 #[allow(unexpected_cfgs)]
789 fn copy_external_image_to_texture(
790 &self,
791 _source: &wgpu::CopyExternalImageSourceInfo,
792 _dest: wgpu::CopyExternalImageDestInfo<&wgpu::Texture>,
793 _size: wgpu::Extent3d,
794 ) {
795 unimplemented!();
796 }
797
798 fn submit(&self, command_buffers: &mut dyn Iterator<Item = DispatchCommandBuffer>) -> u64 {
799 let buffers = command_buffers
800 .map(|buffer| expect_command_buffer(&buffer))
801 .collect::<Vec<_>>();
802 self.inner.submit(&buffers);
803 0
804 }
805
806 fn get_timestamp_period(&self) -> f32 {
807 1.0
808 }
809
810 fn on_submitted_work_done(&self, callback: wgpu::custom::BoxSubmittedWorkDoneCallback) {
811 let mut callback = Some(callback);
812 let _ = self.inner.on_submitted_work_done(move |status, _message| {
813 let _ = status;
814 if let Some(cb) = callback.take() {
815 cb();
816 }
817 });
818 }
819
820 fn compact_blas(&self, _blas: &DispatchBlas) -> (Option<u64>, DispatchBlas) {
821 (None, dispatch_blas())
822 }
823}
824
825impl ShaderModuleInterface for DawnShaderModule {
826 fn get_compilation_info(&self) -> Pin<Box<dyn wgpu::custom::ShaderCompilationInfoFuture>> {
827 let (future, shared) = CallbackFuture::new();
828 let _ = self.inner.get_compilation_info(move |status, info| {
829 if status == CompilationInfoRequestStatus::Success {
830 complete_shared(&shared, map_compilation_info(info));
831 } else {
832 complete_shared(&shared, wgpu::CompilationInfo { messages: vec![] });
833 }
834 });
835 Box::pin(future)
836 }
837}
838
839impl BindGroupLayoutInterface for DawnBindGroupLayout {}
840impl BindGroupInterface for DawnBindGroup {}
841impl TextureViewInterface for DawnTextureView {}
842impl SamplerInterface for DawnSampler {}
843
844impl BufferInterface for DawnBuffer {
845 fn map_async(
846 &self,
847 mode: wgpu::MapMode,
848 range: std::ops::Range<wgpu::BufferAddress>,
849 callback: wgpu::custom::BufferMapCallback,
850 ) {
851 let mode = match mode {
852 wgpu::MapMode::Read => MapMode::READ,
853 wgpu::MapMode::Write => MapMode::WRITE,
854 };
855 let mut callback = Some(callback);
856 let _ = self.inner.map_async(
857 mode,
858 range.start as usize,
859 (range.end - range.start) as usize,
860 move |status, message| {
861 let result = match status {
862 MapAsyncStatus::Success => Ok(()),
863 _ => {
864 let _ = message;
865 Err(wgpu::BufferAsyncError)
866 }
867 };
868 if let Some(cb) = callback.take() {
869 cb(result);
870 }
871 },
872 );
873 }
874
875 fn get_mapped_range(
876 &self,
877 sub_range: std::ops::Range<wgpu::BufferAddress>,
878 ) -> DispatchBufferMappedRange {
879 let ptr = self.inner.get_mapped_range(
880 sub_range.start as usize,
881 (sub_range.end - sub_range.start) as usize,
882 );
883 dispatch_buffer_mapped_range(ptr.cast(), (sub_range.end - sub_range.start) as usize)
884 }
885
886 fn unmap(&self) {
887 self.inner.unmap();
888 }
889
890 fn destroy(&self) {
891 self.inner.destroy();
892 }
893}
894
895impl TextureInterface for DawnTexture {
896 fn create_view(&self, desc: &wgpu::TextureViewDescriptor<'_>) -> DispatchTextureView {
897 let desc = map_texture_view_descriptor(desc);
898 let view = self.inner.create_view(Some(&desc));
899 dispatch_texture_view(view)
900 }
901
902 fn destroy(&self) {
903 self.inner.destroy();
904 }
905}
906
907impl ExternalTextureInterface for DawnExternalTexture {
908 fn destroy(&self) {
909 self.inner.destroy();
910 }
911}
912
913impl BlasInterface for DawnBlas {
914 fn prepare_compact_async(&self, _callback: wgpu::custom::BlasCompactCallback) {
915 panic!("wgpu-compat: blas not supported");
916 }
917
918 fn ready_for_compaction(&self) -> bool {
919 false
920 }
921}
922
923impl TlasInterface for DawnTlas {}
924impl QuerySetInterface for DawnQuerySet {}
925impl PipelineLayoutInterface for DawnPipelineLayout {}
926
927impl RenderPipelineInterface for DawnRenderPipeline {
928 fn get_bind_group_layout(&self, index: u32) -> DispatchBindGroupLayout {
929 let layout = self.inner.get_bind_group_layout(index);
930 dispatch_bind_group_layout(layout)
931 }
932}
933
934impl ComputePipelineInterface for DawnComputePipeline {
935 fn get_bind_group_layout(&self, index: u32) -> DispatchBindGroupLayout {
936 let layout = self.inner.get_bind_group_layout(index);
937 dispatch_bind_group_layout(layout)
938 }
939}
940
941impl PipelineCacheInterface for DawnPipelineCache {
942 fn get_data(&self) -> Option<Vec<u8>> {
943 None
944 }
945}
946
947impl CommandEncoderInterface for DawnCommandEncoder {
948 fn copy_buffer_to_buffer(
949 &self,
950 source: &DispatchBuffer,
951 source_offset: wgpu::BufferAddress,
952 destination: &DispatchBuffer,
953 destination_offset: wgpu::BufferAddress,
954 copy_size: Option<wgpu::BufferAddress>,
955 ) {
956 let source = expect_buffer(source);
957 let destination = expect_buffer(destination);
958 self.inner.clone().copy_buffer_to_buffer(
959 source,
960 source_offset,
961 destination,
962 destination_offset,
963 copy_size.unwrap_or(WHOLE_SIZE),
964 );
965 }
966
967 fn copy_buffer_to_texture(
968 &self,
969 source: wgpu::TexelCopyBufferInfo<'_>,
970 destination: wgpu::TexelCopyTextureInfo<'_>,
971 copy_size: wgpu::Extent3d,
972 ) {
973 let source = map_texel_copy_buffer_info(source);
974 let dest = map_texel_copy_texture_info(destination);
975 let size = map_extent_3d(copy_size);
976 self.inner
977 .clone()
978 .copy_buffer_to_texture(&source, &dest, &size);
979 }
980
981 fn copy_texture_to_buffer(
982 &self,
983 source: wgpu::TexelCopyTextureInfo<'_>,
984 destination: wgpu::TexelCopyBufferInfo<'_>,
985 copy_size: wgpu::Extent3d,
986 ) {
987 let source = map_texel_copy_texture_info(source);
988 let dest = map_texel_copy_buffer_info(destination);
989 let size = map_extent_3d(copy_size);
990 self.inner
991 .clone()
992 .copy_texture_to_buffer(&source, &dest, &size);
993 }
994
995 fn copy_texture_to_texture(
996 &self,
997 source: wgpu::TexelCopyTextureInfo<'_>,
998 destination: wgpu::TexelCopyTextureInfo<'_>,
999 copy_size: wgpu::Extent3d,
1000 ) {
1001 let source = map_texel_copy_texture_info(source);
1002 let dest = map_texel_copy_texture_info(destination);
1003 let size = map_extent_3d(copy_size);
1004 self.inner
1005 .clone()
1006 .copy_texture_to_texture(&source, &dest, &size);
1007 }
1008
1009 fn begin_compute_pass(&self, desc: &wgpu::ComputePassDescriptor<'_>) -> DispatchComputePass {
1010 let dawn_desc = map_compute_pass_descriptor(desc);
1011 let pass = self.inner.clone().begin_compute_pass(Some(&dawn_desc));
1012 dispatch_compute_pass(pass)
1013 }
1014
1015 fn begin_render_pass(&self, desc: &wgpu::RenderPassDescriptor<'_>) -> DispatchRenderPass {
1016 let dawn_desc = map_render_pass_descriptor(desc);
1017 let pass = self.inner.clone().begin_render_pass(&dawn_desc);
1018 dispatch_render_pass(pass)
1019 }
1020
1021 fn finish(&mut self) -> DispatchCommandBuffer {
1022 let buffer = self.inner.clone().finish(None);
1023 dispatch_command_buffer(buffer)
1024 }
1025
1026 fn clear_texture(
1027 &self,
1028 texture: &DispatchTexture,
1029 subresource_range: &wgpu::ImageSubresourceRange,
1030 ) {
1031 let _ = texture;
1032 let _ = subresource_range;
1033 }
1034
1035 fn clear_buffer(
1036 &self,
1037 buffer: &DispatchBuffer,
1038 offset: wgpu::BufferAddress,
1039 size: Option<wgpu::BufferAddress>,
1040 ) {
1041 let buffer = expect_buffer(buffer);
1042 self.inner
1043 .clone()
1044 .clear_buffer(buffer, offset, size.unwrap_or(WHOLE_SIZE));
1045 }
1046
1047 fn insert_debug_marker(&self, label: &str) {
1048 self.inner.clone().insert_debug_marker(label.to_string());
1049 }
1050
1051 fn push_debug_group(&self, label: &str) {
1052 self.inner.clone().push_debug_group(label.to_string());
1053 }
1054
1055 fn pop_debug_group(&self) {
1056 self.inner.clone().pop_debug_group();
1057 }
1058
1059 fn write_timestamp(&self, query_set: &DispatchQuerySet, query_index: u32) {
1060 let set = expect_query_set(query_set);
1061 self.inner.clone().write_timestamp(set, query_index);
1062 }
1063
1064 fn resolve_query_set(
1065 &self,
1066 query_set: &DispatchQuerySet,
1067 first_query: u32,
1068 query_count: u32,
1069 destination: &DispatchBuffer,
1070 destination_offset: wgpu::BufferAddress,
1071 ) {
1072 let set = expect_query_set(query_set);
1073 let buffer = expect_buffer(destination);
1074 self.inner.clone().resolve_query_set(
1075 set,
1076 first_query,
1077 query_count,
1078 buffer,
1079 destination_offset,
1080 );
1081 }
1082
1083 fn mark_acceleration_structures_built<'a>(
1084 &self,
1085 _blas: &mut dyn Iterator<Item = &'a wgpu::Blas>,
1086 _tlas: &mut dyn Iterator<Item = &'a wgpu::Tlas>,
1087 ) {
1088 panic!("wgpu-compat: blas/tlas not supported");
1089 }
1090
1091 fn build_acceleration_structures<'a>(
1092 &self,
1093 _blas: &mut dyn Iterator<Item = &'a wgpu::BlasBuildEntry<'a>>,
1094 _tlas: &mut dyn Iterator<Item = &'a wgpu::Tlas>,
1095 ) {
1096 panic!("wgpu-compat: blas/tlas not supported");
1097 }
1098
1099 fn transition_resources<'a>(
1100 &mut self,
1101 _buffer_transitions: &mut dyn Iterator<Item = wgpu::BufferTransition<&'a DispatchBuffer>>,
1102 _texture_transitions: &mut dyn Iterator<
1103 Item = wgpu::TextureTransition<&'a DispatchTexture>,
1104 >,
1105 ) {
1106 }
1107}
1108
1109impl ComputePassInterface for DawnComputePass {
1110 fn set_pipeline(&mut self, pipeline: &DispatchComputePipeline) {
1111 let pipeline = expect_compute_pipeline(pipeline);
1112 self.inner.clone().set_pipeline(pipeline);
1113 }
1114
1115 fn set_bind_group(
1116 &mut self,
1117 index: u32,
1118 bind_group: Option<&DispatchBindGroup>,
1119 offsets: &[wgpu::DynamicOffset],
1120 ) {
1121 let group = bind_group.map(expect_bind_group);
1122 self.inner.clone().set_bind_group(index, group, offsets);
1123 }
1124
1125 fn set_immediates(&mut self, offset: u32, data: &[u8]) {
1126 let data = bytes_to_u32(data);
1127 self.inner
1128 .clone()
1129 .set_immediates(offset, u32_as_c_void_slice(&data));
1130 }
1131
1132 fn insert_debug_marker(&mut self, label: &str) {
1133 self.inner.clone().insert_debug_marker(label.to_string());
1134 }
1135
1136 fn push_debug_group(&mut self, group_label: &str) {
1137 self.inner.clone().push_debug_group(group_label.to_string());
1138 }
1139
1140 fn pop_debug_group(&mut self) {
1141 self.inner.clone().pop_debug_group();
1142 }
1143
1144 fn write_timestamp(&mut self, query_set: &DispatchQuerySet, query_index: u32) {
1145 let set = expect_query_set(query_set);
1146 self.inner.clone().write_timestamp(set, query_index);
1147 }
1148
1149 fn begin_pipeline_statistics_query(
1150 &mut self,
1151 _query_set: &DispatchQuerySet,
1152 _query_index: u32,
1153 ) {
1154 panic!("wgpu-compat: pipeline statistics not supported");
1155 }
1156
1157 fn end_pipeline_statistics_query(&mut self) {
1158 panic!("wgpu-compat: pipeline statistics not supported");
1159 }
1160
1161 fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) {
1162 self.inner.clone().dispatch_workgroups(x, y, z);
1163 }
1164
1165 fn dispatch_workgroups_indirect(
1166 &mut self,
1167 indirect_buffer: &DispatchBuffer,
1168 indirect_offset: wgpu::BufferAddress,
1169 ) {
1170 let buffer = expect_buffer(indirect_buffer);
1171 self.inner
1172 .clone()
1173 .dispatch_workgroups_indirect(buffer, indirect_offset);
1174 }
1175
1176 fn end(&mut self) {
1177 if !self.ended {
1178 self.inner.clone().end();
1179 self.ended = true;
1180 }
1181 }
1182}
1183
1184impl Drop for DawnComputePass {
1185 fn drop(&mut self) {
1186 if !self.ended {
1187 self.inner.clone().end();
1188 self.ended = true;
1189 }
1190 }
1191}
1192
1193impl RenderPassInterface for DawnRenderPass {
1194 fn set_pipeline(&mut self, pipeline: &DispatchRenderPipeline) {
1195 let pipeline = expect_render_pipeline(pipeline);
1196 self.inner.clone().set_pipeline(pipeline);
1197 }
1198
1199 fn set_bind_group(
1200 &mut self,
1201 index: u32,
1202 bind_group: Option<&DispatchBindGroup>,
1203 offsets: &[wgpu::DynamicOffset],
1204 ) {
1205 let group = bind_group.map(expect_bind_group);
1206 self.inner.clone().set_bind_group(index, group, offsets);
1207 }
1208
1209 fn set_index_buffer(
1210 &mut self,
1211 buffer: &DispatchBuffer,
1212 index_format: wgpu::IndexFormat,
1213 offset: wgpu::BufferAddress,
1214 size: Option<wgpu::BufferSize>,
1215 ) {
1216 let buffer = expect_buffer(buffer);
1217 let size = size.map(|v| v.get()).unwrap_or(WHOLE_SIZE);
1218 self.inner
1219 .clone()
1220 .set_index_buffer(buffer, map_index_format(index_format), offset, size);
1221 }
1222
1223 fn set_vertex_buffer(
1224 &mut self,
1225 slot: u32,
1226 buffer: &DispatchBuffer,
1227 offset: wgpu::BufferAddress,
1228 size: Option<wgpu::BufferSize>,
1229 ) {
1230 let buffer = expect_buffer(buffer);
1231 let size = size.map(|v| v.get()).unwrap_or(WHOLE_SIZE);
1232 self.inner
1233 .clone()
1234 .set_vertex_buffer(slot, Some(buffer), offset, size);
1235 }
1236
1237 fn set_immediates(&mut self, offset: u32, data: &[u8]) {
1238 let data = bytes_to_u32(data);
1239 self.inner
1240 .clone()
1241 .set_immediates(offset, u32_as_c_void_slice(&data));
1242 }
1243
1244 fn set_blend_constant(&mut self, color: wgpu::Color) {
1245 let color = map_color(color);
1246 self.inner.clone().set_blend_constant(&color);
1247 }
1248
1249 fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) {
1250 self.inner.clone().set_scissor_rect(x, y, width, height);
1251 }
1252
1253 fn set_viewport(
1254 &mut self,
1255 x: f32,
1256 y: f32,
1257 width: f32,
1258 height: f32,
1259 min_depth: f32,
1260 max_depth: f32,
1261 ) {
1262 self.inner
1263 .clone()
1264 .set_viewport(x, y, width, height, min_depth, max_depth);
1265 }
1266
1267 fn set_stencil_reference(&mut self, reference: u32) {
1268 self.inner.clone().set_stencil_reference(reference);
1269 }
1270
1271 fn draw(&mut self, vertices: std::ops::Range<u32>, instances: std::ops::Range<u32>) {
1272 self.inner.clone().draw(
1273 vertices.end - vertices.start,
1274 instances.end - instances.start,
1275 vertices.start,
1276 instances.start,
1277 );
1278 }
1279
1280 fn draw_indexed(
1281 &mut self,
1282 indices: std::ops::Range<u32>,
1283 base_vertex: i32,
1284 instances: std::ops::Range<u32>,
1285 ) {
1286 self.inner.clone().draw_indexed(
1287 indices.end - indices.start,
1288 instances.end - instances.start,
1289 indices.start,
1290 base_vertex,
1291 instances.start,
1292 );
1293 }
1294
1295 fn draw_mesh_tasks(&mut self, _group_count_x: u32, _group_count_y: u32, _group_count_z: u32) {
1296 panic!("wgpu-compat: mesh tasks not supported");
1297 }
1298
1299 fn draw_indirect(
1300 &mut self,
1301 indirect_buffer: &DispatchBuffer,
1302 indirect_offset: wgpu::BufferAddress,
1303 ) {
1304 let buffer = expect_buffer(indirect_buffer);
1305 self.inner.clone().draw_indirect(buffer, indirect_offset);
1306 }
1307
1308 fn draw_indexed_indirect(
1309 &mut self,
1310 indirect_buffer: &DispatchBuffer,
1311 indirect_offset: wgpu::BufferAddress,
1312 ) {
1313 let buffer = expect_buffer(indirect_buffer);
1314 self.inner
1315 .clone()
1316 .draw_indexed_indirect(buffer, indirect_offset);
1317 }
1318
1319 fn draw_mesh_tasks_indirect(
1320 &mut self,
1321 _indirect_buffer: &DispatchBuffer,
1322 _indirect_offset: wgpu::BufferAddress,
1323 ) {
1324 panic!("wgpu-compat: mesh tasks not supported");
1325 }
1326
1327 fn multi_draw_indirect(
1328 &mut self,
1329 indirect_buffer: &DispatchBuffer,
1330 indirect_offset: wgpu::BufferAddress,
1331 count: u32,
1332 ) {
1333 let buffer = expect_buffer(indirect_buffer);
1334 self.inner
1335 .clone()
1336 .multi_draw_indirect(buffer, indirect_offset, count, None, 0);
1337 }
1338
1339 fn multi_draw_indexed_indirect(
1340 &mut self,
1341 indirect_buffer: &DispatchBuffer,
1342 indirect_offset: wgpu::BufferAddress,
1343 count: u32,
1344 ) {
1345 let buffer = expect_buffer(indirect_buffer);
1346 self.inner
1347 .clone()
1348 .multi_draw_indexed_indirect(buffer, indirect_offset, count, None, 0);
1349 }
1350
1351 fn multi_draw_indirect_count(
1352 &mut self,
1353 _indirect_buffer: &DispatchBuffer,
1354 _indirect_offset: wgpu::BufferAddress,
1355 _count_buffer: &DispatchBuffer,
1356 _count_buffer_offset: wgpu::BufferAddress,
1357 _max_count: u32,
1358 ) {
1359 panic!("wgpu-compat: multi_draw_indirect_count not supported");
1360 }
1361
1362 fn multi_draw_mesh_tasks_indirect(
1363 &mut self,
1364 _indirect_buffer: &DispatchBuffer,
1365 _indirect_offset: wgpu::BufferAddress,
1366 _count: u32,
1367 ) {
1368 panic!("wgpu-compat: mesh tasks not supported");
1369 }
1370
1371 fn multi_draw_indexed_indirect_count(
1372 &mut self,
1373 _indirect_buffer: &DispatchBuffer,
1374 _indirect_offset: wgpu::BufferAddress,
1375 _count_buffer: &DispatchBuffer,
1376 _count_buffer_offset: wgpu::BufferAddress,
1377 _max_count: u32,
1378 ) {
1379 panic!("wgpu-compat: multi_draw_indexed_indirect_count not supported");
1380 }
1381
1382 fn multi_draw_mesh_tasks_indirect_count(
1383 &mut self,
1384 _indirect_buffer: &DispatchBuffer,
1385 _indirect_offset: wgpu::BufferAddress,
1386 _count_buffer: &DispatchBuffer,
1387 _count_buffer_offset: wgpu::BufferAddress,
1388 _max_count: u32,
1389 ) {
1390 panic!("wgpu-compat: mesh tasks not supported");
1391 }
1392
1393 fn insert_debug_marker(&mut self, label: &str) {
1394 self.inner.clone().insert_debug_marker(label.to_string());
1395 }
1396
1397 fn push_debug_group(&mut self, group_label: &str) {
1398 self.inner.clone().push_debug_group(group_label.to_string());
1399 }
1400
1401 fn pop_debug_group(&mut self) {
1402 self.inner.clone().pop_debug_group();
1403 }
1404
1405 fn write_timestamp(&mut self, query_set: &DispatchQuerySet, query_index: u32) {
1406 let set = expect_query_set(query_set);
1407 self.inner.clone().write_timestamp(set, query_index);
1408 }
1409
1410 fn begin_occlusion_query(&mut self, query_index: u32) {
1411 self.inner.clone().begin_occlusion_query(query_index);
1412 }
1413
1414 fn end_occlusion_query(&mut self) {
1415 self.inner.clone().end_occlusion_query();
1416 }
1417
1418 fn begin_pipeline_statistics_query(
1419 &mut self,
1420 _query_set: &DispatchQuerySet,
1421 _query_index: u32,
1422 ) {
1423 panic!("wgpu-compat: pipeline statistics not supported");
1424 }
1425
1426 fn end_pipeline_statistics_query(&mut self) {
1427 panic!("wgpu-compat: pipeline statistics not supported");
1428 }
1429
1430 fn execute_bundles(&mut self, render_bundles: &mut dyn Iterator<Item = &DispatchRenderBundle>) {
1431 let bundles = render_bundles.map(expect_render_bundle).collect::<Vec<_>>();
1432 self.inner.clone().execute_bundles(&bundles);
1433 }
1434
1435 fn end(&mut self) {
1436 if !self.ended {
1437 self.inner.clone().end();
1438 self.ended = true;
1439 }
1440 }
1441}
1442
1443impl Drop for DawnRenderPass {
1444 fn drop(&mut self) {
1445 if !self.ended {
1446 self.inner.clone().end();
1447 self.ended = true;
1448 }
1449 }
1450}
1451
1452impl RenderBundleEncoderInterface for DawnRenderBundleEncoder {
1453 fn set_pipeline(&mut self, pipeline: &DispatchRenderPipeline) {
1454 let pipeline = expect_render_pipeline(pipeline);
1455 self.inner.clone().set_pipeline(pipeline);
1456 }
1457
1458 fn set_bind_group(
1459 &mut self,
1460 index: u32,
1461 bind_group: Option<&DispatchBindGroup>,
1462 offsets: &[wgpu::DynamicOffset],
1463 ) {
1464 let group = bind_group.map(expect_bind_group);
1465 self.inner.clone().set_bind_group(index, group, offsets);
1466 }
1467
1468 fn set_index_buffer(
1469 &mut self,
1470 buffer: &DispatchBuffer,
1471 index_format: wgpu::IndexFormat,
1472 offset: wgpu::BufferAddress,
1473 size: Option<wgpu::BufferSize>,
1474 ) {
1475 let buffer = expect_buffer(buffer);
1476 let size = size.map(|v| v.get()).unwrap_or(WHOLE_SIZE);
1477 self.inner
1478 .clone()
1479 .set_index_buffer(buffer, map_index_format(index_format), offset, size);
1480 }
1481
1482 fn set_vertex_buffer(
1483 &mut self,
1484 slot: u32,
1485 buffer: &DispatchBuffer,
1486 offset: wgpu::BufferAddress,
1487 size: Option<wgpu::BufferSize>,
1488 ) {
1489 let buffer = expect_buffer(buffer);
1490 let size = size.map(|v| v.get()).unwrap_or(WHOLE_SIZE);
1491 self.inner
1492 .clone()
1493 .set_vertex_buffer(slot, Some(buffer), offset, size);
1494 }
1495
1496 fn set_immediates(&mut self, offset: u32, data: &[u8]) {
1497 let data = bytes_to_u32(data);
1498 self.inner
1499 .clone()
1500 .set_immediates(offset, u32_as_c_void_slice(&data));
1501 }
1502
1503 fn draw(&mut self, vertices: std::ops::Range<u32>, instances: std::ops::Range<u32>) {
1504 self.inner.clone().draw(
1505 vertices.end - vertices.start,
1506 instances.end - instances.start,
1507 vertices.start,
1508 instances.start,
1509 );
1510 }
1511
1512 fn draw_indexed(
1513 &mut self,
1514 indices: std::ops::Range<u32>,
1515 base_vertex: i32,
1516 instances: std::ops::Range<u32>,
1517 ) {
1518 self.inner.clone().draw_indexed(
1519 indices.end - indices.start,
1520 instances.end - instances.start,
1521 indices.start,
1522 base_vertex,
1523 instances.start,
1524 );
1525 }
1526
1527 fn draw_indirect(
1528 &mut self,
1529 indirect_buffer: &DispatchBuffer,
1530 indirect_offset: wgpu::BufferAddress,
1531 ) {
1532 let buffer = expect_buffer(indirect_buffer);
1533 self.inner.clone().draw_indirect(buffer, indirect_offset);
1534 }
1535
1536 fn draw_indexed_indirect(
1537 &mut self,
1538 indirect_buffer: &DispatchBuffer,
1539 indirect_offset: wgpu::BufferAddress,
1540 ) {
1541 let buffer = expect_buffer(indirect_buffer);
1542 self.inner
1543 .clone()
1544 .draw_indexed_indirect(buffer, indirect_offset);
1545 }
1546
1547 fn finish(self, desc: &wgpu::RenderBundleDescriptor<'_>) -> DispatchRenderBundle {
1548 let mut dawn_desc = RenderBundleDescriptor::new();
1549 dawn_desc.label = label_to_string(desc.label);
1550 let bundle = self.inner.clone().finish(Some(&dawn_desc));
1551 dispatch_render_bundle(bundle)
1552 }
1553}
1554
1555impl CommandBufferInterface for DawnCommandBuffer {}
1556impl RenderBundleInterface for DawnRenderBundle {}
1557
1558impl SurfaceInterface for DawnSurface {
1559 fn get_capabilities(&self, adapter: &DispatchAdapter) -> wgpu::SurfaceCapabilities {
1560 let adapter = adapter
1561 .as_custom::<DawnAdapter>()
1562 .expect("wgpu-compat: adapter not dawn")
1563 .clone();
1564 let surface = self.inner.clone();
1565 adapter.with_adapter(move |adapter| {
1566 let mut caps = SurfaceCapabilities::new();
1567 let _ = surface.get_capabilities(adapter.clone(), &mut caps);
1568 map_surface_capabilities(caps)
1569 })
1570 }
1571
1572 fn configure(&self, device: &DispatchDevice, config: &wgpu::SurfaceConfiguration) {
1573 let mut config = map_surface_configuration(config);
1574 config.device = Some(expect_device(device));
1575 self.inner.clone().configure(&config);
1576 }
1577
1578 fn get_current_texture(
1579 &self,
1580 ) -> (
1581 Option<DispatchTexture>,
1582 wgpu::SurfaceStatus,
1583 DispatchSurfaceOutputDetail,
1584 ) {
1585 let mut surface_texture = SurfaceTexture::new();
1586 self.inner.clone().get_current_texture(&mut surface_texture);
1587 let status = match surface_texture
1588 .status
1589 .unwrap_or(SurfaceGetCurrentTextureStatus::Error)
1590 {
1591 SurfaceGetCurrentTextureStatus::SuccessOptimal => wgpu::SurfaceStatus::Good,
1592 SurfaceGetCurrentTextureStatus::SuccessSuboptimal => wgpu::SurfaceStatus::Suboptimal,
1593 SurfaceGetCurrentTextureStatus::Timeout => wgpu::SurfaceStatus::Timeout,
1594 SurfaceGetCurrentTextureStatus::Outdated => wgpu::SurfaceStatus::Outdated,
1595 SurfaceGetCurrentTextureStatus::Lost => wgpu::SurfaceStatus::Lost,
1596 SurfaceGetCurrentTextureStatus::Error => wgpu::SurfaceStatus::Unknown,
1597 };
1598 (
1599 surface_texture.texture.map(dispatch_texture),
1600 status,
1601 dispatch_surface_output_detail(self.inner.clone()),
1602 )
1603 }
1604}
1605
1606impl SurfaceOutputDetailInterface for DawnSurfaceOutputDetail {
1607 fn present(&self) {
1608 let _ = self.surface.clone().present();
1609 }
1610
1611 fn texture_discard(&self) {
1612 }
1614}
1615
1616impl QueueWriteBufferInterface for DawnQueueWriteBuffer {
1617 fn slice(&self) -> &[u8] {
1618 &self.inner
1619 }
1620
1621 fn slice_mut(&mut self) -> &mut [u8] {
1622 &mut self.inner
1623 }
1624}
1625
1626impl BufferMappedRangeInterface for DawnBufferMappedRange {
1627 fn slice(&self) -> &[u8] {
1628 if self.data.is_null() || self.size == 0 {
1629 return &[];
1630 }
1631 unsafe { std::slice::from_raw_parts(self.data, self.size) }
1632 }
1633
1634 fn slice_mut(&mut self) -> &mut [u8] {
1635 if self.data.is_null() || self.size == 0 {
1636 return &mut [];
1637 }
1638 unsafe { std::slice::from_raw_parts_mut(self.data, self.size) }
1639 }
1640
1641 #[cfg(web)]
1642 #[allow(unexpected_cfgs)]
1643 fn as_uint8array(&self) -> &js_sys::Uint8Array {
1644 unimplemented!();
1645 }
1646}