li_wgpu/lib.rs
1//! A cross-platform graphics and compute library based on [WebGPU](https://gpuweb.github.io/gpuweb/).
2//!
3//! To start using the API, create an [`Instance`].
4
5#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
6#![doc(html_logo_url = "https://raw.githubusercontent.com/gfx-rs/wgpu/master/logo.png")]
7#![warn(missing_docs, unsafe_op_in_unsafe_fn)]
8
9mod backend;
10mod context;
11pub mod util;
12#[macro_use]
13mod macros;
14
15use std::{
16 any::Any,
17 borrow::Cow,
18 error, fmt,
19 future::Future,
20 marker::PhantomData,
21 num::NonZeroU32,
22 ops::{Bound, Deref, DerefMut, Range, RangeBounds},
23 sync::Arc,
24 thread,
25};
26
27use context::{Context, DeviceRequest, DynContext, ObjectId};
28use parking_lot::Mutex;
29
30pub use wgt::{
31 AdapterInfo, AddressMode, AstcBlock, AstcChannel, Backend, Backends, BindGroupLayoutEntry,
32 BindingType, BlendComponent, BlendFactor, BlendOperation, BlendState, BufferAddress,
33 BufferBindingType, BufferSize, BufferUsages, Color, ColorTargetState, ColorWrites,
34 CommandBufferDescriptor, CompareFunction, CompositeAlphaMode, DepthBiasState,
35 DepthStencilState, DeviceType, DownlevelCapabilities, DownlevelFlags, Dx12Compiler,
36 DynamicOffset, Extent3d, Face, Features, FilterMode, FrontFace, Gles3MinorVersion,
37 ImageDataLayout, ImageSubresourceRange, IndexFormat, InstanceDescriptor, InstanceFlags, Limits,
38 MultisampleState, Origin2d, Origin3d, PipelineStatisticsTypes, PolygonMode, PowerPreference,
39 PredefinedColorSpace, PresentMode, PresentationTimestamp, PrimitiveState, PrimitiveTopology,
40 PushConstantRange, QueryType, RenderBundleDepthStencil, SamplerBindingType, SamplerBorderColor,
41 ShaderLocation, ShaderModel, ShaderStages, StencilFaceState, StencilOperation, StencilState,
42 StorageTextureAccess, SurfaceCapabilities, SurfaceStatus, TextureAspect, TextureDimension,
43 TextureFormat, TextureFormatFeatureFlags, TextureFormatFeatures, TextureSampleType,
44 TextureUsages, TextureViewDimension, VertexAttribute, VertexFormat, VertexStepMode,
45 WasmNotSend, WasmNotSync, COPY_BUFFER_ALIGNMENT, COPY_BYTES_PER_ROW_ALIGNMENT, MAP_ALIGNMENT,
46 PUSH_CONSTANT_ALIGNMENT, QUERY_RESOLVE_BUFFER_ALIGNMENT, QUERY_SET_MAX_QUERIES, QUERY_SIZE,
47 VERTEX_STRIDE_ALIGNMENT,
48};
49
50#[cfg(any(
51 not(target_arch = "wasm32"),
52 feature = "webgl",
53 target_os = "emscripten"
54))]
55#[doc(hidden)]
56pub use ::hal;
57#[cfg(feature = "naga")]
58pub use ::naga;
59#[cfg(any(
60 not(target_arch = "wasm32"),
61 feature = "webgl",
62 target_os = "emscripten"
63))]
64#[doc(hidden)]
65pub use ::wgc as core;
66
67// wasm-only types, we try to keep as many types non-platform
68// specific, but these need to depend on web-sys.
69#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
70pub use wgt::{ExternalImageSource, ImageCopyExternalImage};
71
72/// Filter for error scopes.
73#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)]
74pub enum ErrorFilter {
75 /// Catch only out-of-memory errors.
76 OutOfMemory,
77 /// Catch only validation errors.
78 Validation,
79}
80static_assertions::assert_impl_all!(ErrorFilter: Send, Sync);
81
82type C = dyn DynContext;
83#[cfg(any(
84 not(target_arch = "wasm32"),
85 all(
86 feature = "fragile-send-sync-non-atomic-wasm",
87 not(target_feature = "atomics")
88 )
89))]
90type Data = dyn Any + Send + Sync;
91#[cfg(not(any(
92 not(target_arch = "wasm32"),
93 all(
94 feature = "fragile-send-sync-non-atomic-wasm",
95 not(target_feature = "atomics")
96 )
97)))]
98type Data = dyn Any;
99
100/// Context for all other wgpu objects. Instance of wgpu.
101///
102/// This is the first thing you create when using wgpu.
103/// Its primary use is to create [`Adapter`]s and [`Surface`]s.
104///
105/// Does not have to be kept alive.
106///
107/// Corresponds to [WebGPU `GPU`](https://gpuweb.github.io/gpuweb/#gpu-interface).
108#[derive(Debug)]
109pub struct Instance {
110 context: Arc<C>,
111}
112#[cfg(any(
113 not(target_arch = "wasm32"),
114 all(
115 feature = "fragile-send-sync-non-atomic-wasm",
116 not(target_feature = "atomics")
117 )
118))]
119static_assertions::assert_impl_all!(Instance: Send, Sync);
120
121/// Handle to a physical graphics and/or compute device.
122///
123/// Adapters can be used to open a connection to the corresponding [`Device`]
124/// on the host system by using [`Adapter::request_device`].
125///
126/// Does not have to be kept alive.
127///
128/// Corresponds to [WebGPU `GPUAdapter`](https://gpuweb.github.io/gpuweb/#gpu-adapter).
129#[derive(Debug)]
130pub struct Adapter {
131 context: Arc<C>,
132 id: ObjectId,
133 data: Box<Data>,
134}
135#[cfg(any(
136 not(target_arch = "wasm32"),
137 all(
138 feature = "fragile-send-sync-non-atomic-wasm",
139 not(target_feature = "atomics")
140 )
141))]
142static_assertions::assert_impl_all!(Adapter: Send, Sync);
143
144impl Drop for Adapter {
145 fn drop(&mut self) {
146 if !thread::panicking() {
147 self.context.adapter_drop(&self.id, self.data.as_ref())
148 }
149 }
150}
151
152/// Open connection to a graphics and/or compute device.
153///
154/// Responsible for the creation of most rendering and compute resources.
155/// These are then used in commands, which are submitted to a [`Queue`].
156///
157/// A device may be requested from an adapter with [`Adapter::request_device`].
158///
159/// Corresponds to [WebGPU `GPUDevice`](https://gpuweb.github.io/gpuweb/#gpu-device).
160#[derive(Debug)]
161pub struct Device {
162 context: Arc<C>,
163 id: ObjectId,
164 data: Box<Data>,
165}
166#[cfg(any(
167 not(target_arch = "wasm32"),
168 all(
169 feature = "fragile-send-sync-non-atomic-wasm",
170 not(target_feature = "atomics")
171 )
172))]
173static_assertions::assert_impl_all!(Device: Send, Sync);
174
175/// Identifier for a particular call to [`Queue::submit`]. Can be used
176/// as part of an argument to [`Device::poll`] to block for a particular
177/// submission to finish.
178///
179/// This type is unique to the Rust API of `wgpu`.
180/// There is no analogue in the WebGPU specification.
181#[derive(Debug, Clone)]
182pub struct SubmissionIndex(ObjectId, Arc<crate::Data>);
183#[cfg(any(
184 not(target_arch = "wasm32"),
185 all(
186 feature = "fragile-send-sync-non-atomic-wasm",
187 not(target_feature = "atomics")
188 )
189))]
190static_assertions::assert_impl_all!(SubmissionIndex: Send, Sync);
191
192/// The main purpose of this struct is to resolve mapped ranges (convert sizes
193/// to end points), and to ensure that the sub-ranges don't intersect.
194#[derive(Debug)]
195struct MapContext {
196 total_size: BufferAddress,
197 initial_range: Range<BufferAddress>,
198 sub_ranges: Vec<Range<BufferAddress>>,
199}
200
201impl MapContext {
202 fn new(total_size: BufferAddress) -> Self {
203 Self {
204 total_size,
205 initial_range: 0..0,
206 sub_ranges: Vec::new(),
207 }
208 }
209
210 fn reset(&mut self) {
211 self.initial_range = 0..0;
212
213 assert!(
214 self.sub_ranges.is_empty(),
215 "You cannot unmap a buffer that still has accessible mapped views"
216 );
217 }
218
219 fn add(&mut self, offset: BufferAddress, size: Option<BufferSize>) -> BufferAddress {
220 let end = match size {
221 Some(s) => offset + s.get(),
222 None => self.initial_range.end,
223 };
224 assert!(self.initial_range.start <= offset && end <= self.initial_range.end);
225 for sub in self.sub_ranges.iter() {
226 assert!(
227 end <= sub.start || offset >= sub.end,
228 "Intersecting map range with {sub:?}"
229 );
230 }
231 self.sub_ranges.push(offset..end);
232 end
233 }
234
235 fn remove(&mut self, offset: BufferAddress, size: Option<BufferSize>) {
236 let end = match size {
237 Some(s) => offset + s.get(),
238 None => self.initial_range.end,
239 };
240
241 let index = self
242 .sub_ranges
243 .iter()
244 .position(|r| *r == (offset..end))
245 .expect("unable to remove range from map context");
246 self.sub_ranges.swap_remove(index);
247 }
248}
249
250/// Handle to a GPU-accessible buffer.
251///
252/// Created with [`Device::create_buffer`] or
253/// [`DeviceExt::create_buffer_init`](util::DeviceExt::create_buffer_init).
254///
255/// Corresponds to [WebGPU `GPUBuffer`](https://gpuweb.github.io/gpuweb/#buffer-interface).
256#[derive(Debug)]
257pub struct Buffer {
258 context: Arc<C>,
259 id: ObjectId,
260 data: Box<Data>,
261 map_context: Mutex<MapContext>,
262 size: wgt::BufferAddress,
263 usage: BufferUsages,
264 // Todo: missing map_state https://www.w3.org/TR/webgpu/#dom-gpubuffer-mapstate
265}
266#[cfg(any(
267 not(target_arch = "wasm32"),
268 all(
269 feature = "fragile-send-sync-non-atomic-wasm",
270 not(target_feature = "atomics")
271 )
272))]
273static_assertions::assert_impl_all!(Buffer: Send, Sync);
274
275/// Slice into a [`Buffer`].
276///
277/// It can be created with [`Buffer::slice`]. To use the whole buffer, call with unbounded slice:
278///
279/// `buffer.slice(..)`
280///
281/// This type is unique to the Rust API of `wgpu`. In the WebGPU specification,
282/// an offset and size are specified as arguments to each call working with the [`Buffer`], instead.
283#[derive(Copy, Clone, Debug)]
284pub struct BufferSlice<'a> {
285 buffer: &'a Buffer,
286 offset: BufferAddress,
287 size: Option<BufferSize>,
288}
289#[cfg(any(
290 not(target_arch = "wasm32"),
291 all(
292 feature = "fragile-send-sync-non-atomic-wasm",
293 not(target_feature = "atomics")
294 )
295))]
296static_assertions::assert_impl_all!(BufferSlice: Send, Sync);
297
298/// Handle to a texture on the GPU.
299///
300/// It can be created with [`Device::create_texture`].
301///
302/// Corresponds to [WebGPU `GPUTexture`](https://gpuweb.github.io/gpuweb/#texture-interface).
303#[derive(Debug)]
304pub struct Texture {
305 context: Arc<C>,
306 id: ObjectId,
307 data: Box<Data>,
308 owned: bool,
309 descriptor: TextureDescriptor<'static>,
310}
311#[cfg(any(
312 not(target_arch = "wasm32"),
313 all(
314 feature = "fragile-send-sync-non-atomic-wasm",
315 not(target_feature = "atomics")
316 )
317))]
318static_assertions::assert_impl_all!(Texture: Send, Sync);
319
320/// Handle to a texture view.
321///
322/// A `TextureView` object describes a texture and associated metadata needed by a
323/// [`RenderPipeline`] or [`BindGroup`].
324///
325/// Corresponds to [WebGPU `GPUTextureView`](https://gpuweb.github.io/gpuweb/#gputextureview).
326#[derive(Debug)]
327pub struct TextureView {
328 context: Arc<C>,
329 id: ObjectId,
330 data: Box<Data>,
331}
332#[cfg(any(
333 not(target_arch = "wasm32"),
334 all(
335 feature = "fragile-send-sync-non-atomic-wasm",
336 not(target_feature = "atomics")
337 )
338))]
339static_assertions::assert_impl_all!(TextureView: Send, Sync);
340
341/// Handle to a sampler.
342///
343/// A `Sampler` object defines how a pipeline will sample from a [`TextureView`]. Samplers define
344/// image filters (including anisotropy) and address (wrapping) modes, among other things. See
345/// the documentation for [`SamplerDescriptor`] for more information.
346///
347/// It can be created with [`Device::create_sampler`].
348///
349/// Corresponds to [WebGPU `GPUSampler`](https://gpuweb.github.io/gpuweb/#sampler-interface).
350#[derive(Debug)]
351pub struct Sampler {
352 context: Arc<C>,
353 id: ObjectId,
354 data: Box<Data>,
355}
356#[cfg(any(
357 not(target_arch = "wasm32"),
358 all(
359 feature = "fragile-send-sync-non-atomic-wasm",
360 not(target_feature = "atomics")
361 )
362))]
363static_assertions::assert_impl_all!(Sampler: Send, Sync);
364
365impl Drop for Sampler {
366 fn drop(&mut self) {
367 if !thread::panicking() {
368 self.context.sampler_drop(&self.id, self.data.as_ref());
369 }
370 }
371}
372
373/// Describes a [`Surface`].
374///
375/// For use with [`Surface::configure`].
376///
377/// Corresponds to [WebGPU `GPUCanvasConfiguration`](
378/// https://gpuweb.github.io/gpuweb/#canvas-configuration).
379pub type SurfaceConfiguration = wgt::SurfaceConfiguration<Vec<TextureFormat>>;
380static_assertions::assert_impl_all!(SurfaceConfiguration: Send, Sync);
381
382/// Handle to a presentable surface.
383///
384/// A `Surface` represents a platform-specific surface (e.g. a window) onto which rendered images may
385/// be presented. A `Surface` may be created with the unsafe function [`Instance::create_surface`].
386///
387/// This type is unique to the Rust API of `wgpu`. In the WebGPU specification,
388/// [`GPUCanvasContext`](https://gpuweb.github.io/gpuweb/#canvas-context)
389/// serves a similar role.
390#[derive(Debug)]
391pub struct Surface {
392 context: Arc<C>,
393 id: ObjectId,
394 data: Box<Data>,
395 // Stores the latest `SurfaceConfiguration` that was set using `Surface::configure`.
396 // It is required to set the attributes of the `SurfaceTexture` in the
397 // `Surface::get_current_texture` method.
398 // Because the `Surface::configure` method operates on an immutable reference this type has to
399 // be wrapped in a mutex and since the configuration is only supplied after the surface has
400 // been created is is additionally wrapped in an option.
401 config: Mutex<Option<SurfaceConfiguration>>,
402}
403#[cfg(any(
404 not(target_arch = "wasm32"),
405 all(
406 feature = "fragile-send-sync-non-atomic-wasm",
407 not(target_feature = "atomics")
408 )
409))]
410static_assertions::assert_impl_all!(Surface: Send, Sync);
411
412impl Drop for Surface {
413 fn drop(&mut self) {
414 if !thread::panicking() {
415 self.context.surface_drop(&self.id, self.data.as_ref())
416 }
417 }
418}
419
420/// Handle to a binding group layout.
421///
422/// A `BindGroupLayout` is a handle to the GPU-side layout of a binding group. It can be used to
423/// create a [`BindGroupDescriptor`] object, which in turn can be used to create a [`BindGroup`]
424/// object with [`Device::create_bind_group`]. A series of `BindGroupLayout`s can also be used to
425/// create a [`PipelineLayoutDescriptor`], which can be used to create a [`PipelineLayout`].
426///
427/// It can be created with [`Device::create_bind_group_layout`].
428///
429/// Corresponds to [WebGPU `GPUBindGroupLayout`](
430/// https://gpuweb.github.io/gpuweb/#gpubindgrouplayout).
431#[derive(Debug)]
432pub struct BindGroupLayout {
433 context: Arc<C>,
434 id: ObjectId,
435 data: Box<Data>,
436}
437#[cfg(any(
438 not(target_arch = "wasm32"),
439 all(
440 feature = "fragile-send-sync-non-atomic-wasm",
441 not(target_feature = "atomics")
442 )
443))]
444static_assertions::assert_impl_all!(BindGroupLayout: Send, Sync);
445
446impl Drop for BindGroupLayout {
447 fn drop(&mut self) {
448 if !thread::panicking() {
449 self.context
450 .bind_group_layout_drop(&self.id, self.data.as_ref());
451 }
452 }
453}
454
455/// Handle to a binding group.
456///
457/// A `BindGroup` represents the set of resources bound to the bindings described by a
458/// [`BindGroupLayout`]. It can be created with [`Device::create_bind_group`]. A `BindGroup` can
459/// be bound to a particular [`RenderPass`] with [`RenderPass::set_bind_group`], or to a
460/// [`ComputePass`] with [`ComputePass::set_bind_group`].
461///
462/// Corresponds to [WebGPU `GPUBindGroup`](https://gpuweb.github.io/gpuweb/#gpubindgroup).
463#[derive(Debug)]
464pub struct BindGroup {
465 context: Arc<C>,
466 id: ObjectId,
467 data: Box<Data>,
468}
469#[cfg(any(
470 not(target_arch = "wasm32"),
471 all(
472 feature = "fragile-send-sync-non-atomic-wasm",
473 not(target_feature = "atomics")
474 )
475))]
476static_assertions::assert_impl_all!(BindGroup: Send, Sync);
477
478impl Drop for BindGroup {
479 fn drop(&mut self) {
480 if !thread::panicking() {
481 self.context.bind_group_drop(&self.id, self.data.as_ref());
482 }
483 }
484}
485
486/// Handle to a compiled shader module.
487///
488/// A `ShaderModule` represents a compiled shader module on the GPU. It can be created by passing
489/// source code to [`Device::create_shader_module`] or valid SPIR-V binary to
490/// [`Device::create_shader_module_spirv`]. Shader modules are used to define programmable stages
491/// of a pipeline.
492///
493/// Corresponds to [WebGPU `GPUShaderModule`](https://gpuweb.github.io/gpuweb/#shader-module).
494#[derive(Debug)]
495pub struct ShaderModule {
496 context: Arc<C>,
497 id: ObjectId,
498 data: Box<Data>,
499}
500#[cfg(any(
501 not(target_arch = "wasm32"),
502 all(
503 feature = "fragile-send-sync-non-atomic-wasm",
504 not(target_feature = "atomics")
505 )
506))]
507static_assertions::assert_impl_all!(ShaderModule: Send, Sync);
508
509impl Drop for ShaderModule {
510 fn drop(&mut self) {
511 if !thread::panicking() {
512 self.context
513 .shader_module_drop(&self.id, self.data.as_ref());
514 }
515 }
516}
517
518/// Source of a shader module.
519///
520/// The source will be parsed and validated.
521///
522/// Any necessary shader translation (e.g. from WGSL to SPIR-V or vice versa)
523/// will be done internally by wgpu.
524///
525/// This type is unique to the Rust API of `wgpu`. In the WebGPU specification,
526/// only WGSL source code strings are accepted.
527#[cfg_attr(feature = "naga", allow(clippy::large_enum_variant))]
528#[derive(Clone, Debug)]
529#[non_exhaustive]
530pub enum ShaderSource<'a> {
531 /// SPIR-V module represented as a slice of words.
532 ///
533 /// See also: [`util::make_spirv`], [`include_spirv`]
534 #[cfg(feature = "spirv")]
535 SpirV(Cow<'a, [u32]>),
536 /// GLSL module as a string slice.
537 ///
538 /// Note: GLSL is not yet fully supported and must be a specific ShaderStage.
539 #[cfg(feature = "glsl")]
540 Glsl {
541 /// The source code of the shader.
542 shader: Cow<'a, str>,
543 /// The shader stage that the shader targets. For example, `naga::ShaderStage::Vertex`
544 stage: naga::ShaderStage,
545 /// Defines to unlock configured shader features.
546 defines: naga::FastHashMap<String, String>,
547 },
548 /// WGSL module as a string slice.
549 #[cfg(feature = "wgsl")]
550 Wgsl(Cow<'a, str>),
551 /// Naga module.
552 #[cfg(feature = "naga")]
553 Naga(Cow<'static, naga::Module>),
554 /// Dummy variant because `Naga` doesn't have a lifetime and without enough active features it
555 /// could be the last one active.
556 #[doc(hidden)]
557 Dummy(PhantomData<&'a ()>),
558}
559static_assertions::assert_impl_all!(ShaderSource: Send, Sync);
560
561/// Descriptor for use with [`Device::create_shader_module`].
562///
563/// Corresponds to [WebGPU `GPUShaderModuleDescriptor`](
564/// https://gpuweb.github.io/gpuweb/#dictdef-gpushadermoduledescriptor).
565#[derive(Clone, Debug)]
566pub struct ShaderModuleDescriptor<'a> {
567 /// Debug label of the shader module. This will show up in graphics debuggers for easy identification.
568 pub label: Label<'a>,
569 /// Source code for the shader.
570 pub source: ShaderSource<'a>,
571}
572static_assertions::assert_impl_all!(ShaderModuleDescriptor: Send, Sync);
573
574/// Descriptor for a shader module given by SPIR-V binary, for use with
575/// [`Device::create_shader_module_spirv`].
576///
577/// This type is unique to the Rust API of `wgpu`. In the WebGPU specification,
578/// only WGSL source code strings are accepted.
579#[derive(Debug)]
580pub struct ShaderModuleDescriptorSpirV<'a> {
581 /// Debug label of the shader module. This will show up in graphics debuggers for easy identification.
582 pub label: Label<'a>,
583 /// Binary SPIR-V data, in 4-byte words.
584 pub source: Cow<'a, [u32]>,
585}
586static_assertions::assert_impl_all!(ShaderModuleDescriptorSpirV: Send, Sync);
587
588/// Handle to a pipeline layout.
589///
590/// A `PipelineLayout` object describes the available binding groups of a pipeline.
591/// It can be created with [`Device::create_pipeline_layout`].
592///
593/// Corresponds to [WebGPU `GPUPipelineLayout`](https://gpuweb.github.io/gpuweb/#gpupipelinelayout).
594#[derive(Debug)]
595pub struct PipelineLayout {
596 context: Arc<C>,
597 id: ObjectId,
598 data: Box<Data>,
599}
600#[cfg(any(
601 not(target_arch = "wasm32"),
602 all(
603 feature = "fragile-send-sync-non-atomic-wasm",
604 not(target_feature = "atomics")
605 )
606))]
607static_assertions::assert_impl_all!(PipelineLayout: Send, Sync);
608
609impl Drop for PipelineLayout {
610 fn drop(&mut self) {
611 if !thread::panicking() {
612 self.context
613 .pipeline_layout_drop(&self.id, self.data.as_ref());
614 }
615 }
616}
617
618/// Handle to a rendering (graphics) pipeline.
619///
620/// A `RenderPipeline` object represents a graphics pipeline and its stages, bindings, vertex
621/// buffers and targets. It can be created with [`Device::create_render_pipeline`].
622///
623/// Corresponds to [WebGPU `GPURenderPipeline`](https://gpuweb.github.io/gpuweb/#render-pipeline).
624#[derive(Debug)]
625pub struct RenderPipeline {
626 context: Arc<C>,
627 id: ObjectId,
628 data: Box<Data>,
629}
630#[cfg(any(
631 not(target_arch = "wasm32"),
632 all(
633 feature = "fragile-send-sync-non-atomic-wasm",
634 not(target_feature = "atomics")
635 )
636))]
637static_assertions::assert_impl_all!(RenderPipeline: Send, Sync);
638
639impl Drop for RenderPipeline {
640 fn drop(&mut self) {
641 if !thread::panicking() {
642 self.context
643 .render_pipeline_drop(&self.id, self.data.as_ref());
644 }
645 }
646}
647
648impl RenderPipeline {
649 /// Get an object representing the bind group layout at a given index.
650 pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
651 let context = Arc::clone(&self.context);
652 let (id, data) =
653 self.context
654 .render_pipeline_get_bind_group_layout(&self.id, self.data.as_ref(), index);
655 BindGroupLayout { context, id, data }
656 }
657}
658
659/// Handle to a compute pipeline.
660///
661/// A `ComputePipeline` object represents a compute pipeline and its single shader stage.
662/// It can be created with [`Device::create_compute_pipeline`].
663///
664/// Corresponds to [WebGPU `GPUComputePipeline`](https://gpuweb.github.io/gpuweb/#compute-pipeline).
665#[derive(Debug)]
666pub struct ComputePipeline {
667 context: Arc<C>,
668 id: ObjectId,
669 data: Box<Data>,
670}
671#[cfg(any(
672 not(target_arch = "wasm32"),
673 all(
674 feature = "fragile-send-sync-non-atomic-wasm",
675 not(target_feature = "atomics")
676 )
677))]
678static_assertions::assert_impl_all!(ComputePipeline: Send, Sync);
679
680impl Drop for ComputePipeline {
681 fn drop(&mut self) {
682 if !thread::panicking() {
683 self.context
684 .compute_pipeline_drop(&self.id, self.data.as_ref());
685 }
686 }
687}
688
689impl ComputePipeline {
690 /// Get an object representing the bind group layout at a given index.
691 pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
692 let context = Arc::clone(&self.context);
693 let (id, data) = self.context.compute_pipeline_get_bind_group_layout(
694 &self.id,
695 self.data.as_ref(),
696 index,
697 );
698 BindGroupLayout { context, id, data }
699 }
700}
701
702/// Handle to a command buffer on the GPU.
703///
704/// A `CommandBuffer` represents a complete sequence of commands that may be submitted to a command
705/// queue with [`Queue::submit`]. A `CommandBuffer` is obtained by recording a series of commands to
706/// a [`CommandEncoder`] and then calling [`CommandEncoder::finish`].
707///
708/// Corresponds to [WebGPU `GPUCommandBuffer`](https://gpuweb.github.io/gpuweb/#command-buffer).
709#[derive(Debug)]
710pub struct CommandBuffer {
711 context: Arc<C>,
712 id: Option<ObjectId>,
713 data: Option<Box<Data>>,
714}
715#[cfg(any(
716 not(target_arch = "wasm32"),
717 all(
718 feature = "fragile-send-sync-non-atomic-wasm",
719 not(target_feature = "atomics")
720 )
721))]
722static_assertions::assert_impl_all!(CommandBuffer: Send, Sync);
723
724impl Drop for CommandBuffer {
725 fn drop(&mut self) {
726 if !thread::panicking() {
727 if let Some(id) = self.id.take() {
728 self.context
729 .command_buffer_drop(&id, self.data.take().unwrap().as_ref());
730 }
731 }
732 }
733}
734
735/// Encodes a series of GPU operations.
736///
737/// A command encoder can record [`RenderPass`]es, [`ComputePass`]es,
738/// and transfer operations between driver-managed resources like [`Buffer`]s and [`Texture`]s.
739///
740/// When finished recording, call [`CommandEncoder::finish`] to obtain a [`CommandBuffer`] which may
741/// be submitted for execution.
742///
743/// Corresponds to [WebGPU `GPUCommandEncoder`](https://gpuweb.github.io/gpuweb/#command-encoder).
744#[derive(Debug)]
745pub struct CommandEncoder {
746 context: Arc<C>,
747 id: Option<ObjectId>,
748 data: Box<Data>,
749}
750#[cfg(any(
751 not(target_arch = "wasm32"),
752 all(
753 feature = "fragile-send-sync-non-atomic-wasm",
754 not(target_feature = "atomics")
755 )
756))]
757static_assertions::assert_impl_all!(CommandEncoder: Send, Sync);
758
759impl Drop for CommandEncoder {
760 fn drop(&mut self) {
761 if !thread::panicking() {
762 if let Some(id) = self.id.take() {
763 self.context.command_encoder_drop(&id, self.data.as_ref());
764 }
765 }
766 }
767}
768
769/// In-progress recording of a render pass: a list of render commands in a [`CommandEncoder`].
770///
771/// It can be created with [`CommandEncoder::begin_render_pass()`], whose [`RenderPassDescriptor`]
772/// specifies the attachments (textures) that will be rendered to.
773///
774/// Most of the methods on `RenderPass` serve one of two purposes, identifiable by their names:
775///
776/// * `draw_*()`: Drawing (that is, encoding a render command, which, when executed by the GPU, will
777/// rasterize something and execute shaders).
778/// * `set_*()`: Setting part of the [render state](https://gpuweb.github.io/gpuweb/#renderstate)
779/// for future drawing commands.
780///
781/// A render pass may contain any number of drawing commands, and before/between each command the
782/// render state may be updated however you wish; each drawing command will be executed using the
783/// render state that has been set when the `draw_*()` function is called.
784///
785/// Corresponds to [WebGPU `GPURenderPassEncoder`](
786/// https://gpuweb.github.io/gpuweb/#render-pass-encoder).
787#[derive(Debug)]
788pub struct RenderPass<'a> {
789 id: ObjectId,
790 data: Box<Data>,
791 parent: &'a mut CommandEncoder,
792}
793
794/// In-progress recording of a compute pass.
795///
796/// It can be created with [`CommandEncoder::begin_compute_pass`].
797///
798/// Corresponds to [WebGPU `GPUComputePassEncoder`](
799/// https://gpuweb.github.io/gpuweb/#compute-pass-encoder).
800#[derive(Debug)]
801pub struct ComputePass<'a> {
802 id: ObjectId,
803 data: Box<Data>,
804 parent: &'a mut CommandEncoder,
805}
806
807/// Encodes a series of GPU operations into a reusable "render bundle".
808///
809/// It only supports a handful of render commands, but it makes them reusable.
810/// It can be created with [`Device::create_render_bundle_encoder`].
811/// It can be executed onto a [`CommandEncoder`] using [`RenderPass::execute_bundles`].
812///
813/// Executing a [`RenderBundle`] is often more efficient than issuing the underlying commands
814/// manually.
815///
816/// Corresponds to [WebGPU `GPURenderBundleEncoder`](
817/// https://gpuweb.github.io/gpuweb/#gpurenderbundleencoder).
818#[derive(Debug)]
819pub struct RenderBundleEncoder<'a> {
820 context: Arc<C>,
821 id: ObjectId,
822 data: Box<Data>,
823 parent: &'a Device,
824 /// This type should be !Send !Sync, because it represents an allocation on this thread's
825 /// command buffer.
826 _p: PhantomData<*const u8>,
827}
828static_assertions::assert_not_impl_any!(RenderBundleEncoder<'_>: Send, Sync);
829
830/// Pre-prepared reusable bundle of GPU operations.
831///
832/// It only supports a handful of render commands, but it makes them reusable. Executing a
833/// [`RenderBundle`] is often more efficient than issuing the underlying commands manually.
834///
835/// It can be created by use of a [`RenderBundleEncoder`], and executed onto a [`CommandEncoder`]
836/// using [`RenderPass::execute_bundles`].
837///
838/// Corresponds to [WebGPU `GPURenderBundle`](https://gpuweb.github.io/gpuweb/#render-bundle).
839#[derive(Debug)]
840pub struct RenderBundle {
841 context: Arc<C>,
842 id: ObjectId,
843 data: Box<Data>,
844}
845#[cfg(any(
846 not(target_arch = "wasm32"),
847 all(
848 feature = "fragile-send-sync-non-atomic-wasm",
849 not(target_feature = "atomics")
850 )
851))]
852static_assertions::assert_impl_all!(RenderBundle: Send, Sync);
853
854impl Drop for RenderBundle {
855 fn drop(&mut self) {
856 if !thread::panicking() {
857 self.context
858 .render_bundle_drop(&self.id, self.data.as_ref());
859 }
860 }
861}
862
863/// Handle to a query set.
864///
865/// It can be created with [`Device::create_query_set`].
866///
867/// Corresponds to [WebGPU `GPUQuerySet`](https://gpuweb.github.io/gpuweb/#queryset).
868#[derive(Debug)]
869pub struct QuerySet {
870 context: Arc<C>,
871 id: ObjectId,
872 data: Box<Data>,
873}
874#[cfg(any(
875 not(target_arch = "wasm32"),
876 all(
877 feature = "fragile-send-sync-non-atomic-wasm",
878 not(target_feature = "atomics")
879 )
880))]
881#[cfg(any(
882 not(target_arch = "wasm32"),
883 all(
884 feature = "fragile-send-sync-non-atomic-wasm",
885 not(target_feature = "atomics")
886 )
887))]
888static_assertions::assert_impl_all!(QuerySet: Send, Sync);
889
890impl Drop for QuerySet {
891 fn drop(&mut self) {
892 if !thread::panicking() {
893 self.context.query_set_drop(&self.id, self.data.as_ref());
894 }
895 }
896}
897
898/// Handle to a command queue on a device.
899///
900/// A `Queue` executes recorded [`CommandBuffer`] objects and provides convenience methods
901/// for writing to [buffers](Queue::write_buffer) and [textures](Queue::write_texture).
902/// It can be created along with a [`Device`] by calling [`Adapter::request_device`].
903///
904/// Corresponds to [WebGPU `GPUQueue`](https://gpuweb.github.io/gpuweb/#gpu-queue).
905#[derive(Debug)]
906pub struct Queue {
907 context: Arc<C>,
908 id: ObjectId,
909 data: Box<Data>,
910}
911#[cfg(any(
912 not(target_arch = "wasm32"),
913 all(
914 feature = "fragile-send-sync-non-atomic-wasm",
915 not(target_feature = "atomics")
916 )
917))]
918static_assertions::assert_impl_all!(Queue: Send, Sync);
919
920/// Resource that can be bound to a pipeline.
921///
922/// Corresponds to [WebGPU `GPUBindingResource`](
923/// https://gpuweb.github.io/gpuweb/#typedefdef-gpubindingresource).
924#[non_exhaustive]
925#[derive(Clone, Debug)]
926pub enum BindingResource<'a> {
927 /// Binding is backed by a buffer.
928 ///
929 /// Corresponds to [`wgt::BufferBindingType::Uniform`] and [`wgt::BufferBindingType::Storage`]
930 /// with [`BindGroupLayoutEntry::count`] set to None.
931 Buffer(BufferBinding<'a>),
932 /// Binding is backed by an array of buffers.
933 ///
934 /// [`Features::BUFFER_BINDING_ARRAY`] must be supported to use this feature.
935 ///
936 /// Corresponds to [`wgt::BufferBindingType::Uniform`] and [`wgt::BufferBindingType::Storage`]
937 /// with [`BindGroupLayoutEntry::count`] set to Some.
938 BufferArray(&'a [BufferBinding<'a>]),
939 /// Binding is a sampler.
940 ///
941 /// Corresponds to [`wgt::BindingType::Sampler`] with [`BindGroupLayoutEntry::count`] set to None.
942 Sampler(&'a Sampler),
943 /// Binding is backed by an array of samplers.
944 ///
945 /// [`Features::TEXTURE_BINDING_ARRAY`] must be supported to use this feature.
946 ///
947 /// Corresponds to [`wgt::BindingType::Sampler`] with [`BindGroupLayoutEntry::count`] set
948 /// to Some.
949 SamplerArray(&'a [&'a Sampler]),
950 /// Binding is backed by a texture.
951 ///
952 /// Corresponds to [`wgt::BindingType::Texture`] and [`wgt::BindingType::StorageTexture`] with
953 /// [`BindGroupLayoutEntry::count`] set to None.
954 TextureView(&'a TextureView),
955 /// Binding is backed by an array of textures.
956 ///
957 /// [`Features::TEXTURE_BINDING_ARRAY`] must be supported to use this feature.
958 ///
959 /// Corresponds to [`wgt::BindingType::Texture`] and [`wgt::BindingType::StorageTexture`] with
960 /// [`BindGroupLayoutEntry::count`] set to Some.
961 TextureViewArray(&'a [&'a TextureView]),
962}
963#[cfg(any(
964 not(target_arch = "wasm32"),
965 all(
966 feature = "fragile-send-sync-non-atomic-wasm",
967 not(target_feature = "atomics")
968 )
969))]
970static_assertions::assert_impl_all!(BindingResource: Send, Sync);
971
972/// Describes the segment of a buffer to bind.
973///
974/// Corresponds to [WebGPU `GPUBufferBinding`](
975/// https://gpuweb.github.io/gpuweb/#dictdef-gpubufferbinding).
976#[derive(Clone, Debug)]
977pub struct BufferBinding<'a> {
978 /// The buffer to bind.
979 pub buffer: &'a Buffer,
980
981 /// Base offset of the buffer, in bytes.
982 ///
983 /// If the [`has_dynamic_offset`] field of this buffer's layout entry is
984 /// `true`, the offset here will be added to the dynamic offset passed to
985 /// [`RenderPass::set_bind_group`] or [`ComputePass::set_bind_group`].
986 ///
987 /// If the buffer was created with [`BufferUsages::UNIFORM`], then this
988 /// offset must be a multiple of
989 /// [`Limits::min_uniform_buffer_offset_alignment`].
990 ///
991 /// If the buffer was created with [`BufferUsages::STORAGE`], then this
992 /// offset must be a multiple of
993 /// [`Limits::min_storage_buffer_offset_alignment`].
994 ///
995 /// [`has_dynamic_offset`]: BindingType::Buffer::has_dynamic_offset
996 pub offset: BufferAddress,
997
998 /// Size of the binding in bytes, or `None` for using the rest of the buffer.
999 pub size: Option<BufferSize>,
1000}
1001#[cfg(any(
1002 not(target_arch = "wasm32"),
1003 all(
1004 feature = "fragile-send-sync-non-atomic-wasm",
1005 not(target_feature = "atomics")
1006 )
1007))]
1008static_assertions::assert_impl_all!(BufferBinding: Send, Sync);
1009
1010/// Operation to perform to the output attachment at the start of a render pass.
1011///
1012/// Corresponds to [WebGPU `GPULoadOp`](https://gpuweb.github.io/gpuweb/#enumdef-gpuloadop),
1013/// plus the corresponding clearValue.
1014#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
1015#[cfg_attr(feature = "trace", derive(serde::Serialize))]
1016#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
1017pub enum LoadOp<V> {
1018 /// Loads the specified value for this attachment into the render pass.
1019 ///
1020 /// On some GPU hardware (primarily mobile), "clear" is significantly cheaper
1021 /// because it avoids loading data from main memory into tile-local memory.
1022 ///
1023 /// On other GPU hardware, there isn’t a significant difference.
1024 ///
1025 /// As a result, it is recommended to use "clear" rather than "load" in cases
1026 /// where the initial value doesn’t matter
1027 /// (e.g. the render target will be cleared using a skybox).
1028 Clear(V),
1029 /// Loads the existing value for this attachment into the render pass.
1030 Load,
1031}
1032
1033impl<V: Default> Default for LoadOp<V> {
1034 fn default() -> Self {
1035 Self::Clear(Default::default())
1036 }
1037}
1038
1039/// Operation to perform to the output attachment at the end of a render pass.
1040///
1041/// Corresponds to [WebGPU `GPUStoreOp`](https://gpuweb.github.io/gpuweb/#enumdef-gpustoreop).
1042#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Default)]
1043#[cfg_attr(feature = "trace", derive(serde::Serialize))]
1044#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
1045pub enum StoreOp {
1046 /// Stores the resulting value of the render pass for this attachment.
1047 #[default]
1048 Store,
1049 /// Discards the resulting value of the render pass for this attachment.
1050 ///
1051 /// The attachment will be treated as uninitialized afterwards.
1052 /// (If only either Depth or Stencil texture-aspects is set to `Discard`,
1053 /// the respective other texture-aspect will be preserved.)
1054 ///
1055 /// This can be significantly faster on tile-based render hardware.
1056 ///
1057 /// Prefer this if the attachment is not read by subsequent passes.
1058 Discard,
1059}
1060
1061/// Pair of load and store operations for an attachment aspect.
1062///
1063/// This type is unique to the Rust API of `wgpu`. In the WebGPU specification,
1064/// separate `loadOp` and `storeOp` fields are used instead.
1065#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
1066#[cfg_attr(feature = "trace", derive(serde::Serialize))]
1067#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
1068pub struct Operations<V> {
1069 /// How data should be read through this attachment.
1070 pub load: LoadOp<V>,
1071 /// Whether data will be written to through this attachment.
1072 ///
1073 /// Note that resolve textures (if specified) are always written to,
1074 /// regardless of this setting.
1075 pub store: StoreOp,
1076}
1077
1078impl<V: Default> Default for Operations<V> {
1079 #[inline]
1080 fn default() -> Self {
1081 Self {
1082 load: LoadOp::<V>::default(),
1083 store: StoreOp::default(),
1084 }
1085 }
1086}
1087
1088/// Describes the timestamp writes of a render pass.
1089///
1090/// For use with [`RenderPassDescriptor`].
1091/// At least one of `beginning_of_pass_write_index` and `end_of_pass_write_index` must be `Some`.
1092///
1093/// Corresponds to [WebGPU `GPURenderPassTimestampWrite`](
1094/// https://gpuweb.github.io/gpuweb/#dictdef-gpurenderpasstimestampwrites).
1095#[derive(Clone, Debug)]
1096pub struct RenderPassTimestampWrites<'a> {
1097 /// The query set to write to.
1098 pub query_set: &'a QuerySet,
1099 /// The index of the query set at which a start timestamp of this pass is written, if any.
1100 pub beginning_of_pass_write_index: Option<u32>,
1101 /// The index of the query set at which an end timestamp of this pass is written, if any.
1102 pub end_of_pass_write_index: Option<u32>,
1103}
1104#[cfg(any(
1105 not(target_arch = "wasm32"),
1106 all(
1107 feature = "fragile-send-sync-non-atomic-wasm",
1108 not(target_feature = "atomics")
1109 )
1110))]
1111static_assertions::assert_impl_all!(RenderPassTimestampWrites: Send, Sync);
1112
1113/// Describes a color attachment to a [`RenderPass`].
1114///
1115/// For use with [`RenderPassDescriptor`].
1116///
1117/// Corresponds to [WebGPU `GPURenderPassColorAttachment`](
1118/// https://gpuweb.github.io/gpuweb/#color-attachments).
1119#[derive(Clone, Debug)]
1120pub struct RenderPassColorAttachment<'tex> {
1121 /// The view to use as an attachment.
1122 pub view: &'tex TextureView,
1123 /// The view that will receive the resolved output if multisampling is used.
1124 ///
1125 /// If set, it is always written to, regardless of how [`Self::ops`] is configured.
1126 pub resolve_target: Option<&'tex TextureView>,
1127 /// What operations will be performed on this color attachment.
1128 pub ops: Operations<Color>,
1129}
1130#[cfg(any(
1131 not(target_arch = "wasm32"),
1132 all(
1133 feature = "fragile-send-sync-non-atomic-wasm",
1134 not(target_feature = "atomics")
1135 )
1136))]
1137static_assertions::assert_impl_all!(RenderPassColorAttachment: Send, Sync);
1138
1139/// Describes a depth/stencil attachment to a [`RenderPass`].
1140///
1141/// For use with [`RenderPassDescriptor`].
1142///
1143/// Corresponds to [WebGPU `GPURenderPassDepthStencilAttachment`](
1144/// https://gpuweb.github.io/gpuweb/#depth-stencil-attachments).
1145#[derive(Clone, Debug)]
1146pub struct RenderPassDepthStencilAttachment<'tex> {
1147 /// The view to use as an attachment.
1148 pub view: &'tex TextureView,
1149 /// What operations will be performed on the depth part of the attachment.
1150 pub depth_ops: Option<Operations<f32>>,
1151 /// What operations will be performed on the stencil part of the attachment.
1152 pub stencil_ops: Option<Operations<u32>>,
1153}
1154#[cfg(any(
1155 not(target_arch = "wasm32"),
1156 all(
1157 feature = "fragile-send-sync-non-atomic-wasm",
1158 not(target_feature = "atomics")
1159 )
1160))]
1161static_assertions::assert_impl_all!(RenderPassDepthStencilAttachment: Send, Sync);
1162
1163// The underlying types are also exported so that documentation shows up for them
1164
1165/// Object debugging label.
1166pub type Label<'a> = Option<&'a str>;
1167pub use wgt::RequestAdapterOptions as RequestAdapterOptionsBase;
1168/// Additional information required when requesting an adapter.
1169///
1170/// For use with [`Instance::request_adapter`].
1171///
1172/// Corresponds to [WebGPU `GPURequestAdapterOptions`](
1173/// https://gpuweb.github.io/gpuweb/#dictdef-gpurequestadapteroptions).
1174pub type RequestAdapterOptions<'a> = RequestAdapterOptionsBase<&'a Surface>;
1175#[cfg(any(
1176 not(target_arch = "wasm32"),
1177 all(
1178 feature = "fragile-send-sync-non-atomic-wasm",
1179 not(target_feature = "atomics")
1180 )
1181))]
1182static_assertions::assert_impl_all!(RequestAdapterOptions: Send, Sync);
1183/// Describes a [`Device`].
1184///
1185/// For use with [`Adapter::request_device`].
1186///
1187/// Corresponds to [WebGPU `GPUDeviceDescriptor`](
1188/// https://gpuweb.github.io/gpuweb/#dictdef-gpudevicedescriptor).
1189pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
1190static_assertions::assert_impl_all!(DeviceDescriptor: Send, Sync);
1191/// Describes a [`Buffer`].
1192///
1193/// For use with [`Device::create_buffer`].
1194///
1195/// Corresponds to [WebGPU `GPUBufferDescriptor`](
1196/// https://gpuweb.github.io/gpuweb/#dictdef-gpubufferdescriptor).
1197pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
1198static_assertions::assert_impl_all!(BufferDescriptor: Send, Sync);
1199/// Describes a [`CommandEncoder`].
1200///
1201/// For use with [`Device::create_command_encoder`].
1202///
1203/// Corresponds to [WebGPU `GPUCommandEncoderDescriptor`](
1204/// https://gpuweb.github.io/gpuweb/#dictdef-gpucommandencoderdescriptor).
1205pub type CommandEncoderDescriptor<'a> = wgt::CommandEncoderDescriptor<Label<'a>>;
1206static_assertions::assert_impl_all!(CommandEncoderDescriptor: Send, Sync);
1207/// Describes a [`RenderBundle`].
1208///
1209/// For use with [`RenderBundleEncoder::finish`].
1210///
1211/// Corresponds to [WebGPU `GPURenderBundleDescriptor`](
1212/// https://gpuweb.github.io/gpuweb/#dictdef-gpurenderbundledescriptor).
1213pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor<Label<'a>>;
1214static_assertions::assert_impl_all!(RenderBundleDescriptor: Send, Sync);
1215/// Describes a [`Texture`].
1216///
1217/// For use with [`Device::create_texture`].
1218///
1219/// Corresponds to [WebGPU `GPUTextureDescriptor`](
1220/// https://gpuweb.github.io/gpuweb/#dictdef-gputexturedescriptor).
1221pub type TextureDescriptor<'a> = wgt::TextureDescriptor<Label<'a>, &'a [TextureFormat]>;
1222static_assertions::assert_impl_all!(TextureDescriptor: Send, Sync);
1223/// Describes a [`QuerySet`].
1224///
1225/// For use with [`Device::create_query_set`].
1226///
1227/// Corresponds to [WebGPU `GPUQuerySetDescriptor`](
1228/// https://gpuweb.github.io/gpuweb/#dictdef-gpuquerysetdescriptor).
1229pub type QuerySetDescriptor<'a> = wgt::QuerySetDescriptor<Label<'a>>;
1230static_assertions::assert_impl_all!(QuerySetDescriptor: Send, Sync);
1231pub use wgt::Maintain as MaintainBase;
1232/// Passed to [`Device::poll`] to control how and if it should block.
1233pub type Maintain = wgt::Maintain<SubmissionIndex>;
1234#[cfg(any(
1235 not(target_arch = "wasm32"),
1236 all(
1237 feature = "fragile-send-sync-non-atomic-wasm",
1238 not(target_feature = "atomics")
1239 )
1240))]
1241static_assertions::assert_impl_all!(Maintain: Send, Sync);
1242
1243/// Describes a [`TextureView`].
1244///
1245/// For use with [`Texture::create_view`].
1246///
1247/// Corresponds to [WebGPU `GPUTextureViewDescriptor`](
1248/// https://gpuweb.github.io/gpuweb/#dictdef-gputextureviewdescriptor).
1249#[derive(Clone, Debug, Default, Eq, PartialEq)]
1250pub struct TextureViewDescriptor<'a> {
1251 /// Debug label of the texture view. This will show up in graphics debuggers for easy identification.
1252 pub label: Label<'a>,
1253 /// Format of the texture view. Either must be the same as the texture format or in the list
1254 /// of `view_formats` in the texture's descriptor.
1255 pub format: Option<TextureFormat>,
1256 /// The dimension of the texture view. For 1D textures, this must be `D1`. For 2D textures it must be one of
1257 /// `D2`, `D2Array`, `Cube`, and `CubeArray`. For 3D textures it must be `D3`
1258 pub dimension: Option<TextureViewDimension>,
1259 /// Aspect of the texture. Color textures must be [`TextureAspect::All`].
1260 pub aspect: TextureAspect,
1261 /// Base mip level.
1262 pub base_mip_level: u32,
1263 /// Mip level count.
1264 /// If `Some(count)`, `base_mip_level + count` must be less or equal to underlying texture mip count.
1265 /// If `None`, considered to include the rest of the mipmap levels, but at least 1 in total.
1266 pub mip_level_count: Option<u32>,
1267 /// Base array layer.
1268 pub base_array_layer: u32,
1269 /// Layer count.
1270 /// If `Some(count)`, `base_array_layer + count` must be less or equal to the underlying array count.
1271 /// If `None`, considered to include the rest of the array layers, but at least 1 in total.
1272 pub array_layer_count: Option<u32>,
1273}
1274static_assertions::assert_impl_all!(TextureViewDescriptor: Send, Sync);
1275
1276/// Describes a [`PipelineLayout`].
1277///
1278/// For use with [`Device::create_pipeline_layout`].
1279///
1280/// Corresponds to [WebGPU `GPUPipelineLayoutDescriptor`](
1281/// https://gpuweb.github.io/gpuweb/#dictdef-gpupipelinelayoutdescriptor).
1282#[derive(Clone, Debug, Default)]
1283pub struct PipelineLayoutDescriptor<'a> {
1284 /// Debug label of the pipeline layout. This will show up in graphics debuggers for easy identification.
1285 pub label: Label<'a>,
1286 /// Bind groups that this pipeline uses. The first entry will provide all the bindings for
1287 /// "set = 0", second entry will provide all the bindings for "set = 1" etc.
1288 pub bind_group_layouts: &'a [&'a BindGroupLayout],
1289 /// Set of push constant ranges this pipeline uses. Each shader stage that uses push constants
1290 /// must define the range in push constant memory that corresponds to its single `layout(push_constant)`
1291 /// uniform block.
1292 ///
1293 /// If this array is non-empty, the [`Features::PUSH_CONSTANTS`] must be enabled.
1294 pub push_constant_ranges: &'a [PushConstantRange],
1295}
1296#[cfg(any(
1297 not(target_arch = "wasm32"),
1298 all(
1299 feature = "fragile-send-sync-non-atomic-wasm",
1300 not(target_feature = "atomics")
1301 )
1302))]
1303static_assertions::assert_impl_all!(PipelineLayoutDescriptor: Send, Sync);
1304
1305/// Describes a [`Sampler`].
1306///
1307/// For use with [`Device::create_sampler`].
1308///
1309/// Corresponds to [WebGPU `GPUSamplerDescriptor`](
1310/// https://gpuweb.github.io/gpuweb/#dictdef-gpusamplerdescriptor).
1311#[derive(Clone, Debug, PartialEq)]
1312pub struct SamplerDescriptor<'a> {
1313 /// Debug label of the sampler. This will show up in graphics debuggers for easy identification.
1314 pub label: Label<'a>,
1315 /// How to deal with out of bounds accesses in the u (i.e. x) direction
1316 pub address_mode_u: AddressMode,
1317 /// How to deal with out of bounds accesses in the v (i.e. y) direction
1318 pub address_mode_v: AddressMode,
1319 /// How to deal with out of bounds accesses in the w (i.e. z) direction
1320 pub address_mode_w: AddressMode,
1321 /// How to filter the texture when it needs to be magnified (made larger)
1322 pub mag_filter: FilterMode,
1323 /// How to filter the texture when it needs to be minified (made smaller)
1324 pub min_filter: FilterMode,
1325 /// How to filter between mip map levels
1326 pub mipmap_filter: FilterMode,
1327 /// Minimum level of detail (i.e. mip level) to use
1328 pub lod_min_clamp: f32,
1329 /// Maximum level of detail (i.e. mip level) to use
1330 pub lod_max_clamp: f32,
1331 /// If this is enabled, this is a comparison sampler using the given comparison function.
1332 pub compare: Option<CompareFunction>,
1333 /// Must be at least 1. If this is not 1, all filter modes must be linear.
1334 pub anisotropy_clamp: u16,
1335 /// Border color to use when address_mode is [`AddressMode::ClampToBorder`]
1336 pub border_color: Option<SamplerBorderColor>,
1337}
1338static_assertions::assert_impl_all!(SamplerDescriptor: Send, Sync);
1339
1340impl Default for SamplerDescriptor<'_> {
1341 fn default() -> Self {
1342 Self {
1343 label: None,
1344 address_mode_u: Default::default(),
1345 address_mode_v: Default::default(),
1346 address_mode_w: Default::default(),
1347 mag_filter: Default::default(),
1348 min_filter: Default::default(),
1349 mipmap_filter: Default::default(),
1350 lod_min_clamp: 0.0,
1351 lod_max_clamp: 32.0,
1352 compare: None,
1353 anisotropy_clamp: 1,
1354 border_color: None,
1355 }
1356 }
1357}
1358
1359/// An element of a [`BindGroupDescriptor`], consisting of a bindable resource
1360/// and the slot to bind it to.
1361///
1362/// Corresponds to [WebGPU `GPUBindGroupEntry`](
1363/// https://gpuweb.github.io/gpuweb/#dictdef-gpubindgroupentry).
1364#[derive(Clone, Debug)]
1365pub struct BindGroupEntry<'a> {
1366 /// Slot for which binding provides resource. Corresponds to an entry of the same
1367 /// binding index in the [`BindGroupLayoutDescriptor`].
1368 pub binding: u32,
1369 /// Resource to attach to the binding
1370 pub resource: BindingResource<'a>,
1371}
1372#[cfg(any(
1373 not(target_arch = "wasm32"),
1374 all(
1375 feature = "fragile-send-sync-non-atomic-wasm",
1376 not(target_feature = "atomics")
1377 )
1378))]
1379static_assertions::assert_impl_all!(BindGroupEntry: Send, Sync);
1380
1381/// Describes a group of bindings and the resources to be bound.
1382///
1383/// For use with [`Device::create_bind_group`].
1384///
1385/// Corresponds to [WebGPU `GPUBindGroupDescriptor`](
1386/// https://gpuweb.github.io/gpuweb/#dictdef-gpubindgroupdescriptor).
1387#[derive(Clone, Debug)]
1388pub struct BindGroupDescriptor<'a> {
1389 /// Debug label of the bind group. This will show up in graphics debuggers for easy identification.
1390 pub label: Label<'a>,
1391 /// The [`BindGroupLayout`] that corresponds to this bind group.
1392 pub layout: &'a BindGroupLayout,
1393 /// The resources to bind to this bind group.
1394 pub entries: &'a [BindGroupEntry<'a>],
1395}
1396#[cfg(any(
1397 not(target_arch = "wasm32"),
1398 all(
1399 feature = "fragile-send-sync-non-atomic-wasm",
1400 not(target_feature = "atomics")
1401 )
1402))]
1403static_assertions::assert_impl_all!(BindGroupDescriptor: Send, Sync);
1404
1405/// Describes the attachments of a render pass.
1406///
1407/// For use with [`CommandEncoder::begin_render_pass`].
1408///
1409/// Note: separate lifetimes are needed because the texture views (`'tex`)
1410/// have to live as long as the pass is recorded, while everything else (`'desc`) doesn't.
1411///
1412/// Corresponds to [WebGPU `GPURenderPassDescriptor`](
1413/// https://gpuweb.github.io/gpuweb/#dictdef-gpurenderpassdescriptor).
1414#[derive(Clone, Debug, Default)]
1415pub struct RenderPassDescriptor<'tex, 'desc> {
1416 /// Debug label of the render pass. This will show up in graphics debuggers for easy identification.
1417 pub label: Label<'desc>,
1418 /// The color attachments of the render pass.
1419 pub color_attachments: &'desc [Option<RenderPassColorAttachment<'tex>>],
1420 /// The depth and stencil attachment of the render pass, if any.
1421 pub depth_stencil_attachment: Option<RenderPassDepthStencilAttachment<'tex>>,
1422 /// Defines which timestamp values will be written for this pass, and where to write them to.
1423 ///
1424 /// Requires [`Features::TIMESTAMP_QUERY`] to be enabled.
1425 pub timestamp_writes: Option<RenderPassTimestampWrites<'desc>>,
1426 /// Defines where the occlusion query results will be stored for this pass.
1427 pub occlusion_query_set: Option<&'tex QuerySet>,
1428}
1429#[cfg(any(
1430 not(target_arch = "wasm32"),
1431 all(
1432 feature = "fragile-send-sync-non-atomic-wasm",
1433 not(target_feature = "atomics")
1434 )
1435))]
1436static_assertions::assert_impl_all!(RenderPassDescriptor: Send, Sync);
1437
1438/// Describes how the vertex buffer is interpreted.
1439///
1440/// For use in [`VertexState`].
1441///
1442/// Corresponds to [WebGPU `GPUVertexBufferLayout`](
1443/// https://gpuweb.github.io/gpuweb/#dictdef-gpuvertexbufferlayout).
1444#[derive(Clone, Debug, Hash, Eq, PartialEq)]
1445pub struct VertexBufferLayout<'a> {
1446 /// The stride, in bytes, between elements of this buffer.
1447 pub array_stride: BufferAddress,
1448 /// How often this vertex buffer is "stepped" forward.
1449 pub step_mode: VertexStepMode,
1450 /// The list of attributes which comprise a single vertex.
1451 pub attributes: &'a [VertexAttribute],
1452}
1453static_assertions::assert_impl_all!(VertexBufferLayout: Send, Sync);
1454
1455/// Describes the vertex processing in a render pipeline.
1456///
1457/// For use in [`RenderPipelineDescriptor`].
1458///
1459/// Corresponds to [WebGPU `GPUVertexState`](
1460/// https://gpuweb.github.io/gpuweb/#dictdef-gpuvertexstate).
1461#[derive(Clone, Debug)]
1462pub struct VertexState<'a> {
1463 /// The compiled shader module for this stage.
1464 pub module: &'a ShaderModule,
1465 /// The name of the entry point in the compiled shader. There must be a function with this name
1466 /// in the shader.
1467 pub entry_point: &'a str,
1468 /// The format of any vertex buffers used with this pipeline.
1469 pub buffers: &'a [VertexBufferLayout<'a>],
1470}
1471#[cfg(any(
1472 not(target_arch = "wasm32"),
1473 all(
1474 feature = "fragile-send-sync-non-atomic-wasm",
1475 not(target_feature = "atomics")
1476 )
1477))]
1478static_assertions::assert_impl_all!(VertexState: Send, Sync);
1479
1480/// Describes the fragment processing in a render pipeline.
1481///
1482/// For use in [`RenderPipelineDescriptor`].
1483///
1484/// Corresponds to [WebGPU `GPUFragmentState`](
1485/// https://gpuweb.github.io/gpuweb/#dictdef-gpufragmentstate).
1486#[derive(Clone, Debug)]
1487pub struct FragmentState<'a> {
1488 /// The compiled shader module for this stage.
1489 pub module: &'a ShaderModule,
1490 /// The name of the entry point in the compiled shader. There must be a function with this name
1491 /// in the shader.
1492 pub entry_point: &'a str,
1493 /// The color state of the render targets.
1494 pub targets: &'a [Option<ColorTargetState>],
1495}
1496#[cfg(any(
1497 not(target_arch = "wasm32"),
1498 all(
1499 feature = "fragile-send-sync-non-atomic-wasm",
1500 not(target_feature = "atomics")
1501 )
1502))]
1503static_assertions::assert_impl_all!(FragmentState: Send, Sync);
1504
1505/// Describes a render (graphics) pipeline.
1506///
1507/// For use with [`Device::create_render_pipeline`].
1508///
1509/// Corresponds to [WebGPU `GPURenderPipelineDescriptor`](
1510/// https://gpuweb.github.io/gpuweb/#dictdef-gpurenderpipelinedescriptor).
1511#[derive(Clone, Debug)]
1512pub struct RenderPipelineDescriptor<'a> {
1513 /// Debug label of the pipeline. This will show up in graphics debuggers for easy identification.
1514 pub label: Label<'a>,
1515 /// The layout of bind groups for this pipeline.
1516 pub layout: Option<&'a PipelineLayout>,
1517 /// The compiled vertex stage, its entry point, and the input buffers layout.
1518 pub vertex: VertexState<'a>,
1519 /// The properties of the pipeline at the primitive assembly and rasterization level.
1520 pub primitive: PrimitiveState,
1521 /// The effect of draw calls on the depth and stencil aspects of the output target, if any.
1522 pub depth_stencil: Option<DepthStencilState>,
1523 /// The multi-sampling properties of the pipeline.
1524 pub multisample: MultisampleState,
1525 /// The compiled fragment stage, its entry point, and the color targets.
1526 pub fragment: Option<FragmentState<'a>>,
1527 /// If the pipeline will be used with a multiview render pass, this indicates how many array
1528 /// layers the attachments will have.
1529 pub multiview: Option<NonZeroU32>,
1530}
1531#[cfg(any(
1532 not(target_arch = "wasm32"),
1533 all(
1534 feature = "fragile-send-sync-non-atomic-wasm",
1535 not(target_feature = "atomics")
1536 )
1537))]
1538static_assertions::assert_impl_all!(RenderPipelineDescriptor: Send, Sync);
1539
1540/// Describes the timestamp writes of a compute pass.
1541///
1542/// For use with [`ComputePassDescriptor`].
1543/// At least one of `beginning_of_pass_write_index` and `end_of_pass_write_index` must be `Some`.
1544///
1545/// Corresponds to [WebGPU `GPUComputePassTimestampWrites`](
1546/// https://gpuweb.github.io/gpuweb/#dictdef-gpucomputepasstimestampwrites).
1547#[derive(Clone, Debug)]
1548pub struct ComputePassTimestampWrites<'a> {
1549 /// The query set to write to.
1550 pub query_set: &'a QuerySet,
1551 /// The index of the query set at which a start timestamp of this pass is written, if any.
1552 pub beginning_of_pass_write_index: Option<u32>,
1553 /// The index of the query set at which an end timestamp of this pass is written, if any.
1554 pub end_of_pass_write_index: Option<u32>,
1555}
1556#[cfg(any(
1557 not(target_arch = "wasm32"),
1558 all(
1559 feature = "fragile-send-sync-non-atomic-wasm",
1560 not(target_feature = "atomics")
1561 )
1562))]
1563static_assertions::assert_impl_all!(ComputePassTimestampWrites: Send, Sync);
1564
1565/// Describes the attachments of a compute pass.
1566///
1567/// For use with [`CommandEncoder::begin_compute_pass`].
1568///
1569/// Corresponds to [WebGPU `GPUComputePassDescriptor`](
1570/// https://gpuweb.github.io/gpuweb/#dictdef-gpucomputepassdescriptor).
1571#[derive(Clone, Default, Debug)]
1572pub struct ComputePassDescriptor<'a> {
1573 /// Debug label of the compute pass. This will show up in graphics debuggers for easy identification.
1574 pub label: Label<'a>,
1575 /// Defines which timestamp values will be written for this pass, and where to write them to.
1576 ///
1577 /// Requires [`Features::TIMESTAMP_QUERY`] to be enabled.
1578 pub timestamp_writes: Option<ComputePassTimestampWrites<'a>>,
1579}
1580#[cfg(any(
1581 not(target_arch = "wasm32"),
1582 all(
1583 feature = "fragile-send-sync-non-atomic-wasm",
1584 not(target_feature = "atomics")
1585 )
1586))]
1587static_assertions::assert_impl_all!(ComputePassDescriptor: Send, Sync);
1588
1589/// Describes a compute pipeline.
1590///
1591/// For use with [`Device::create_compute_pipeline`].
1592///
1593/// Corresponds to [WebGPU `GPUComputePipelineDescriptor`](
1594/// https://gpuweb.github.io/gpuweb/#dictdef-gpucomputepipelinedescriptor).
1595#[derive(Clone, Debug)]
1596pub struct ComputePipelineDescriptor<'a> {
1597 /// Debug label of the pipeline. This will show up in graphics debuggers for easy identification.
1598 pub label: Label<'a>,
1599 /// The layout of bind groups for this pipeline.
1600 pub layout: Option<&'a PipelineLayout>,
1601 /// The compiled shader module for this stage.
1602 pub module: &'a ShaderModule,
1603 /// The name of the entry point in the compiled shader. There must be a function with this name
1604 /// and no return value in the shader.
1605 pub entry_point: &'a str,
1606}
1607#[cfg(any(
1608 not(target_arch = "wasm32"),
1609 all(
1610 feature = "fragile-send-sync-non-atomic-wasm",
1611 not(target_feature = "atomics")
1612 )
1613))]
1614static_assertions::assert_impl_all!(ComputePipelineDescriptor: Send, Sync);
1615
1616pub use wgt::ImageCopyBuffer as ImageCopyBufferBase;
1617/// View of a buffer which can be used to copy to/from a texture.
1618///
1619/// Corresponds to [WebGPU `GPUImageCopyBuffer`](
1620/// https://gpuweb.github.io/gpuweb/#dictdef-gpuimagecopybuffer).
1621pub type ImageCopyBuffer<'a> = ImageCopyBufferBase<&'a Buffer>;
1622#[cfg(any(
1623 not(target_arch = "wasm32"),
1624 all(
1625 feature = "fragile-send-sync-non-atomic-wasm",
1626 not(target_feature = "atomics")
1627 )
1628))]
1629static_assertions::assert_impl_all!(ImageCopyBuffer: Send, Sync);
1630
1631pub use wgt::ImageCopyTexture as ImageCopyTextureBase;
1632/// View of a texture which can be used to copy to/from a buffer/texture.
1633///
1634/// Corresponds to [WebGPU `GPUImageCopyTexture`](
1635/// https://gpuweb.github.io/gpuweb/#dictdef-gpuimagecopytexture).
1636pub type ImageCopyTexture<'a> = ImageCopyTextureBase<&'a Texture>;
1637#[cfg(any(
1638 not(target_arch = "wasm32"),
1639 all(
1640 feature = "fragile-send-sync-non-atomic-wasm",
1641 not(target_feature = "atomics")
1642 )
1643))]
1644static_assertions::assert_impl_all!(ImageCopyTexture: Send, Sync);
1645
1646pub use wgt::ImageCopyTextureTagged as ImageCopyTextureTaggedBase;
1647/// View of a texture which can be used to copy to a texture, including
1648/// color space and alpha premultiplication information.
1649///
1650/// Corresponds to [WebGPU `GPUImageCopyTextureTagged`](
1651/// https://gpuweb.github.io/gpuweb/#dictdef-gpuimagecopytexturetagged).
1652pub type ImageCopyTextureTagged<'a> = ImageCopyTextureTaggedBase<&'a Texture>;
1653#[cfg(any(
1654 not(target_arch = "wasm32"),
1655 all(
1656 feature = "fragile-send-sync-non-atomic-wasm",
1657 not(target_feature = "atomics")
1658 )
1659))]
1660static_assertions::assert_impl_all!(ImageCopyTexture: Send, Sync);
1661
1662/// Describes a [`BindGroupLayout`].
1663///
1664/// For use with [`Device::create_bind_group_layout`].
1665///
1666/// Corresponds to [WebGPU `GPUBindGroupLayoutDescriptor`](
1667/// https://gpuweb.github.io/gpuweb/#dictdef-gpubindgrouplayoutdescriptor).
1668#[derive(Clone, Debug)]
1669pub struct BindGroupLayoutDescriptor<'a> {
1670 /// Debug label of the bind group layout. This will show up in graphics debuggers for easy identification.
1671 pub label: Label<'a>,
1672
1673 /// Array of entries in this BindGroupLayout
1674 pub entries: &'a [BindGroupLayoutEntry],
1675}
1676static_assertions::assert_impl_all!(BindGroupLayoutDescriptor: Send, Sync);
1677
1678/// Describes a [`RenderBundleEncoder`].
1679///
1680/// For use with [`Device::create_render_bundle_encoder`].
1681///
1682/// Corresponds to [WebGPU `GPURenderBundleEncoderDescriptor`](
1683/// https://gpuweb.github.io/gpuweb/#dictdef-gpurenderbundleencoderdescriptor).
1684#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
1685pub struct RenderBundleEncoderDescriptor<'a> {
1686 /// Debug label of the render bundle encoder. This will show up in graphics debuggers for easy identification.
1687 pub label: Label<'a>,
1688 /// The formats of the color attachments that this render bundle is capable to rendering to. This
1689 /// must match the formats of the color attachments in the render pass this render bundle is executed in.
1690 pub color_formats: &'a [Option<TextureFormat>],
1691 /// Information about the depth attachment that this render bundle is capable to rendering to. This
1692 /// must match the format of the depth attachments in the render pass this render bundle is executed in.
1693 pub depth_stencil: Option<RenderBundleDepthStencil>,
1694 /// Sample count this render bundle is capable of rendering to. This must match the pipelines and
1695 /// the render passes it is used in.
1696 pub sample_count: u32,
1697 /// If this render bundle will rendering to multiple array layers in the attachments at the same time.
1698 pub multiview: Option<NonZeroU32>,
1699}
1700static_assertions::assert_impl_all!(RenderBundleEncoderDescriptor: Send, Sync);
1701
1702/// Surface texture that can be rendered to.
1703/// Result of a successful call to [`Surface::get_current_texture`].
1704///
1705/// This type is unique to the Rust API of `wgpu`. In the WebGPU specification,
1706/// the [`GPUCanvasContext`](https://gpuweb.github.io/gpuweb/#canvas-context) provides
1707/// a texture without any additional information.
1708#[derive(Debug)]
1709pub struct SurfaceTexture {
1710 /// Accessible view of the frame.
1711 pub texture: Texture,
1712 /// `true` if the acquired buffer can still be used for rendering,
1713 /// but should be recreated for maximum performance.
1714 pub suboptimal: bool,
1715 presented: bool,
1716 detail: Box<dyn AnyWasmNotSendSync>,
1717}
1718#[cfg(any(
1719 not(target_arch = "wasm32"),
1720 all(
1721 feature = "fragile-send-sync-non-atomic-wasm",
1722 not(target_feature = "atomics")
1723 )
1724))]
1725static_assertions::assert_impl_all!(SurfaceTexture: Send, Sync);
1726
1727/// Result of an unsuccessful call to [`Surface::get_current_texture`].
1728#[derive(Clone, PartialEq, Eq, Debug)]
1729pub enum SurfaceError {
1730 /// A timeout was encountered while trying to acquire the next frame.
1731 Timeout,
1732 /// The underlying surface has changed, and therefore the swap chain must be updated.
1733 Outdated,
1734 /// The swap chain has been lost and needs to be recreated.
1735 Lost,
1736 /// There is no more memory left to allocate a new frame.
1737 OutOfMemory,
1738}
1739static_assertions::assert_impl_all!(SurfaceError: Send, Sync);
1740
1741impl fmt::Display for SurfaceError {
1742 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1743 write!(f, "{}", match self {
1744 Self::Timeout => "A timeout was encountered while trying to acquire the next frame",
1745 Self::Outdated => "The underlying surface has changed, and therefore the swap chain must be updated",
1746 Self::Lost => "The swap chain has been lost and needs to be recreated",
1747 Self::OutOfMemory => "There is no more memory left to allocate a new frame",
1748 })
1749 }
1750}
1751
1752impl error::Error for SurfaceError {}
1753
1754impl Default for Instance {
1755 /// Creates a new instance of wgpu with default options.
1756 ///
1757 /// Backends are set to `Backends::all()`, and FXC is chosen as the `dx12_shader_compiler`.
1758 fn default() -> Self {
1759 Self::new(InstanceDescriptor::default())
1760 }
1761}
1762
1763impl Instance {
1764 /// Create an new instance of wgpu.
1765 ///
1766 /// # Arguments
1767 ///
1768 /// - `instance_desc` - Has fields for which [backends][Backends] wgpu will choose
1769 /// during instantiation, and which [DX12 shader compiler][Dx12Compiler] wgpu will use.
1770 pub fn new(instance_desc: InstanceDescriptor) -> Self {
1771 Self {
1772 context: Arc::from(crate::backend::Context::init(instance_desc)),
1773 }
1774 }
1775
1776 /// Create an new instance of wgpu from a wgpu-hal instance.
1777 ///
1778 /// # Arguments
1779 ///
1780 /// - `hal_instance` - wgpu-hal instance.
1781 ///
1782 /// # Safety
1783 ///
1784 /// Refer to the creation of wgpu-hal Instance for every backend.
1785 #[cfg(any(
1786 not(target_arch = "wasm32"),
1787 target_os = "emscripten",
1788 feature = "webgl"
1789 ))]
1790 pub unsafe fn from_hal<A: wgc::hal_api::HalApi>(hal_instance: A::Instance) -> Self {
1791 Self {
1792 context: Arc::new(unsafe {
1793 crate::backend::Context::from_hal_instance::<A>(hal_instance)
1794 }),
1795 }
1796 }
1797
1798 /// Return a reference to a specific backend instance, if available.
1799 ///
1800 /// If this `Instance` has a wgpu-hal [`Instance`] for backend
1801 /// `A`, return a reference to it. Otherwise, return `None`.
1802 ///
1803 /// # Safety
1804 ///
1805 /// - The raw instance handle returned must not be manually destroyed.
1806 ///
1807 /// [`Instance`]: hal::Api::Instance
1808 #[cfg(any(
1809 not(target_arch = "wasm32"),
1810 target_os = "emscripten",
1811 feature = "webgl"
1812 ))]
1813 pub unsafe fn as_hal<A: wgc::hal_api::HalApi>(&self) -> Option<&A::Instance> {
1814 unsafe {
1815 self.context
1816 .as_any()
1817 .downcast_ref::<crate::backend::Context>()
1818 .unwrap()
1819 .instance_as_hal::<A>()
1820 }
1821 }
1822
1823 /// Create an new instance of wgpu from a wgpu-core instance.
1824 ///
1825 /// # Arguments
1826 ///
1827 /// - `core_instance` - wgpu-core instance.
1828 ///
1829 /// # Safety
1830 ///
1831 /// Refer to the creation of wgpu-core Instance.
1832 #[cfg(any(
1833 not(target_arch = "wasm32"),
1834 target_os = "emscripten",
1835 feature = "webgl"
1836 ))]
1837 pub unsafe fn from_core(core_instance: wgc::instance::Instance) -> Self {
1838 Self {
1839 context: Arc::new(unsafe {
1840 crate::backend::Context::from_core_instance(core_instance)
1841 }),
1842 }
1843 }
1844
1845 /// Retrieves all available [`Adapter`]s that match the given [`Backends`].
1846 ///
1847 /// # Arguments
1848 ///
1849 /// - `backends` - Backends from which to enumerate adapters.
1850 #[cfg(any(
1851 not(target_arch = "wasm32"),
1852 target_os = "emscripten",
1853 feature = "webgl"
1854 ))]
1855 pub fn enumerate_adapters(&self, backends: Backends) -> impl ExactSizeIterator<Item = Adapter> {
1856 let context = Arc::clone(&self.context);
1857 self.context
1858 .as_any()
1859 .downcast_ref::<crate::backend::Context>()
1860 .unwrap()
1861 .enumerate_adapters(backends)
1862 .into_iter()
1863 .map(move |id| crate::Adapter {
1864 context: Arc::clone(&context),
1865 id: ObjectId::from(id),
1866 data: Box::new(()),
1867 })
1868 }
1869
1870 /// Retrieves an [`Adapter`] which matches the given [`RequestAdapterOptions`].
1871 ///
1872 /// Some options are "soft", so treated as non-mandatory. Others are "hard".
1873 ///
1874 /// If no adapters are found that suffice all the "hard" options, `None` is returned.
1875 pub fn request_adapter(
1876 &self,
1877 options: &RequestAdapterOptions,
1878 ) -> impl Future<Output = Option<Adapter>> + WasmNotSend {
1879 let context = Arc::clone(&self.context);
1880 let adapter = self.context.instance_request_adapter(options);
1881 async move {
1882 adapter
1883 .await
1884 .map(|(id, data)| Adapter { context, id, data })
1885 }
1886 }
1887
1888 /// Converts a wgpu-hal `ExposedAdapter` to a wgpu [`Adapter`].
1889 ///
1890 /// # Safety
1891 ///
1892 /// `hal_adapter` must be created from this instance internal handle.
1893 #[cfg(any(
1894 not(target_arch = "wasm32"),
1895 target_os = "emscripten",
1896 feature = "webgl"
1897 ))]
1898 pub unsafe fn create_adapter_from_hal<A: wgc::hal_api::HalApi>(
1899 &self,
1900 hal_adapter: hal::ExposedAdapter<A>,
1901 ) -> Adapter {
1902 let context = Arc::clone(&self.context);
1903 let id = unsafe {
1904 context
1905 .as_any()
1906 .downcast_ref::<crate::backend::Context>()
1907 .unwrap()
1908 .create_adapter_from_hal(hal_adapter)
1909 .into()
1910 };
1911 Adapter {
1912 context,
1913 id,
1914 data: Box::new(()),
1915 }
1916 }
1917
1918 /// Creates a surface from a raw window handle.
1919 ///
1920 /// If the specified display and window handle are not supported by any of the backends, then the surface
1921 /// will not be supported by any adapters.
1922 ///
1923 /// # Safety
1924 ///
1925 /// - `raw_window_handle` must be a valid object to create a surface upon.
1926 /// - `raw_window_handle` must remain valid until after the returned [`Surface`] is
1927 /// dropped.
1928 ///
1929 /// # Errors
1930 ///
1931 /// - On WebGL2: Will return an error if the browser does not support WebGL2,
1932 /// or declines to provide GPU access (such as due to a resource shortage).
1933 ///
1934 /// # Panics
1935 ///
1936 /// - On macOS/Metal: will panic if not called on the main thread.
1937 /// - On web: will panic if the `raw_window_handle` does not properly refer to a
1938 /// canvas element.
1939 pub unsafe fn create_surface<
1940 W: raw_window_handle::HasRawWindowHandle + raw_window_handle::HasRawDisplayHandle,
1941 >(
1942 &self,
1943 window: &W,
1944 ) -> Result<Surface, CreateSurfaceError> {
1945 let (id, data) = DynContext::instance_create_surface(
1946 &*self.context,
1947 raw_window_handle::HasRawDisplayHandle::raw_display_handle(window),
1948 raw_window_handle::HasRawWindowHandle::raw_window_handle(window),
1949 )?;
1950 Ok(Surface {
1951 context: Arc::clone(&self.context),
1952 id,
1953 data,
1954 config: Mutex::new(None),
1955 })
1956 }
1957
1958 /// Creates a surface from `CoreAnimationLayer`.
1959 ///
1960 /// # Safety
1961 ///
1962 /// - layer must be a valid object to create a surface upon.
1963 #[cfg(any(target_os = "ios", target_os = "macos"))]
1964 pub unsafe fn create_surface_from_core_animation_layer(
1965 &self,
1966 layer: *mut std::ffi::c_void,
1967 ) -> Surface {
1968 let surface = unsafe {
1969 self.context
1970 .as_any()
1971 .downcast_ref::<crate::backend::Context>()
1972 .unwrap()
1973 .create_surface_from_core_animation_layer(layer)
1974 };
1975 Surface {
1976 context: Arc::clone(&self.context),
1977 id: ObjectId::from(surface.id()),
1978 data: Box::new(surface),
1979 config: Mutex::new(None),
1980 }
1981 }
1982
1983 /// Creates a surface from `IDCompositionVisual`.
1984 ///
1985 /// # Safety
1986 ///
1987 /// - visual must be a valid IDCompositionVisual to create a surface upon.
1988 #[cfg(target_os = "windows")]
1989 pub unsafe fn create_surface_from_visual(&self, visual: *mut std::ffi::c_void) -> Surface {
1990 let surface = unsafe {
1991 self.context
1992 .as_any()
1993 .downcast_ref::<crate::backend::Context>()
1994 .unwrap()
1995 .create_surface_from_visual(visual)
1996 };
1997 Surface {
1998 context: Arc::clone(&self.context),
1999 id: ObjectId::from(surface.id()),
2000 data: Box::new(surface),
2001 config: Mutex::new(None),
2002 }
2003 }
2004
2005 /// Creates a surface from `SurfaceHandle`.
2006 ///
2007 /// # Safety
2008 ///
2009 /// - surface_handle must be a valid SurfaceHandle to create a surface upon.
2010 #[cfg(target_os = "windows")]
2011 pub unsafe fn create_surface_from_surface_handle(
2012 &self,
2013 surface_handle: *mut std::ffi::c_void,
2014 ) -> Surface {
2015 let surface = unsafe {
2016 self.context
2017 .as_any()
2018 .downcast_ref::<crate::backend::Context>()
2019 .unwrap()
2020 .create_surface_from_surface_handle(surface_handle)
2021 };
2022 Surface {
2023 context: Arc::clone(&self.context),
2024 id: ObjectId::from(surface.id()),
2025 data: Box::new(surface),
2026 config: Mutex::new(None),
2027 }
2028 }
2029
2030 /// Creates a surface from `SwapChainPanel`.
2031 ///
2032 /// # Safety
2033 ///
2034 /// - visual must be a valid SwapChainPanel to create a surface upon.
2035 #[cfg(target_os = "windows")]
2036 pub unsafe fn create_surface_from_swap_chain_panel(
2037 &self,
2038 swap_chain_panel: *mut std::ffi::c_void,
2039 ) -> Surface {
2040 let surface = unsafe {
2041 self.context
2042 .as_any()
2043 .downcast_ref::<crate::backend::Context>()
2044 .unwrap()
2045 .create_surface_from_swap_chain_panel(swap_chain_panel)
2046 };
2047 Surface {
2048 context: Arc::clone(&self.context),
2049 id: ObjectId::from(surface.id()),
2050 data: Box::new(surface),
2051 config: Mutex::new(None),
2052 }
2053 }
2054
2055 /// Creates a surface from a `web_sys::HtmlCanvasElement`.
2056 ///
2057 /// The `canvas` argument must be a valid `<canvas>` element to
2058 /// create a surface upon.
2059 ///
2060 /// # Errors
2061 ///
2062 /// - On WebGL2: Will return an error if the browser does not support WebGL2,
2063 /// or declines to provide GPU access (such as due to a resource shortage).
2064 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
2065 pub fn create_surface_from_canvas(
2066 &self,
2067 canvas: web_sys::HtmlCanvasElement,
2068 ) -> Result<Surface, CreateSurfaceError> {
2069 let surface = self
2070 .context
2071 .as_any()
2072 .downcast_ref::<crate::backend::Context>()
2073 .unwrap()
2074 .instance_create_surface_from_canvas(canvas)?;
2075
2076 // TODO: This is ugly, a way to create things from a native context needs to be made nicer.
2077 Ok(Surface {
2078 context: Arc::clone(&self.context),
2079 #[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))]
2080 id: ObjectId::from(surface.id()),
2081 #[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))]
2082 data: Box::new(surface),
2083 #[cfg(all(target_arch = "wasm32", not(feature = "webgl")))]
2084 id: ObjectId::UNUSED,
2085 #[cfg(all(target_arch = "wasm32", not(feature = "webgl")))]
2086 data: Box::new(surface.1),
2087 config: Mutex::new(None),
2088 })
2089 }
2090
2091 /// Creates a surface from a `web_sys::OffscreenCanvas`.
2092 ///
2093 /// The `canvas` argument must be a valid `OffscreenCanvas` object
2094 /// to create a surface upon.
2095 ///
2096 /// # Errors
2097 ///
2098 /// - On WebGL2: Will return an error if the browser does not support WebGL2,
2099 /// or declines to provide GPU access (such as due to a resource shortage).
2100 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
2101 pub fn create_surface_from_offscreen_canvas(
2102 &self,
2103 canvas: web_sys::OffscreenCanvas,
2104 ) -> Result<Surface, CreateSurfaceError> {
2105 let surface = self
2106 .context
2107 .as_any()
2108 .downcast_ref::<crate::backend::Context>()
2109 .unwrap()
2110 .instance_create_surface_from_offscreen_canvas(canvas)?;
2111
2112 // TODO: This is ugly, a way to create things from a native context needs to be made nicer.
2113 Ok(Surface {
2114 context: Arc::clone(&self.context),
2115 #[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))]
2116 id: ObjectId::from(surface.id()),
2117 #[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))]
2118 data: Box::new(surface),
2119 #[cfg(all(target_arch = "wasm32", not(feature = "webgl")))]
2120 id: ObjectId::UNUSED,
2121 #[cfg(all(target_arch = "wasm32", not(feature = "webgl")))]
2122 data: Box::new(surface.1),
2123 config: Mutex::new(None),
2124 })
2125 }
2126
2127 /// Polls all devices.
2128 ///
2129 /// If `force_wait` is true and this is not running on the web, then this
2130 /// function will block until all in-flight buffers have been mapped and
2131 /// all submitted commands have finished execution.
2132 ///
2133 /// Return `true` if all devices' queues are empty, or `false` if there are
2134 /// queue submissions still in flight. (Note that, unless access to all
2135 /// [`Queue`s] associated with this [`Instance`] is coordinated somehow,
2136 /// this information could be out of date by the time the caller receives
2137 /// it. `Queue`s can be shared between threads, and other threads could
2138 /// submit new work at any time.)
2139 ///
2140 /// On the web, this is a no-op. `Device`s are automatically polled.
2141 ///
2142 /// [`Queue`s]: Queue
2143 pub fn poll_all(&self, force_wait: bool) -> bool {
2144 self.context.instance_poll_all_devices(force_wait)
2145 }
2146
2147 /// Generates memory report.
2148 #[cfg(any(
2149 not(target_arch = "wasm32"),
2150 target_os = "emscripten",
2151 feature = "webgl"
2152 ))]
2153 pub fn generate_report(&self) -> wgc::global::GlobalReport {
2154 self.context
2155 .as_any()
2156 .downcast_ref::<crate::backend::Context>()
2157 .unwrap()
2158 .generate_report()
2159 }
2160}
2161
2162impl Adapter {
2163 /// Requests a connection to a physical device, creating a logical device.
2164 ///
2165 /// Returns the [`Device`] together with a [`Queue`] that executes command buffers.
2166 ///
2167 /// # Arguments
2168 ///
2169 /// - `desc` - Description of the features and limits requested from the given device.
2170 /// - `trace_path` - Can be used for API call tracing, if that feature is
2171 /// enabled in `wgpu-core`.
2172 ///
2173 /// # Panics
2174 ///
2175 /// - Features specified by `desc` are not supported by this adapter.
2176 /// - Unsafe features were requested but not enabled when requesting the adapter.
2177 /// - Limits requested exceed the values provided by the adapter.
2178 /// - Adapter does not support all features wgpu requires to safely operate.
2179 pub fn request_device(
2180 &self,
2181 desc: &DeviceDescriptor,
2182 trace_path: Option<&std::path::Path>,
2183 ) -> impl Future<Output = Result<(Device, Queue), RequestDeviceError>> + WasmNotSend {
2184 let context = Arc::clone(&self.context);
2185 let device = DynContext::adapter_request_device(
2186 &*self.context,
2187 &self.id,
2188 self.data.as_ref(),
2189 desc,
2190 trace_path,
2191 );
2192 async move {
2193 device.await.map(
2194 |DeviceRequest {
2195 device_id,
2196 device_data,
2197 queue_id,
2198 queue_data,
2199 }| {
2200 (
2201 Device {
2202 context: Arc::clone(&context),
2203 id: device_id,
2204 data: device_data,
2205 },
2206 Queue {
2207 context,
2208 id: queue_id,
2209 data: queue_data,
2210 },
2211 )
2212 },
2213 )
2214 }
2215 }
2216
2217 /// Create a wgpu [`Device`] and [`Queue`] from a wgpu-hal `OpenDevice`
2218 ///
2219 /// # Safety
2220 ///
2221 /// - `hal_device` must be created from this adapter internal handle.
2222 /// - `desc.features` must be a subset of `hal_device` features.
2223 #[cfg(any(
2224 not(target_arch = "wasm32"),
2225 target_os = "emscripten",
2226 feature = "webgl"
2227 ))]
2228 pub unsafe fn create_device_from_hal<A: wgc::hal_api::HalApi>(
2229 &self,
2230 hal_device: hal::OpenDevice<A>,
2231 desc: &DeviceDescriptor,
2232 trace_path: Option<&std::path::Path>,
2233 ) -> Result<(Device, Queue), RequestDeviceError> {
2234 let context = Arc::clone(&self.context);
2235 unsafe {
2236 self.context
2237 .as_any()
2238 .downcast_ref::<crate::backend::Context>()
2239 .unwrap()
2240 .create_device_from_hal(&self.id.into(), hal_device, desc, trace_path)
2241 }
2242 .map(|(device, queue)| {
2243 (
2244 Device {
2245 context: Arc::clone(&context),
2246 id: device.id().into(),
2247 data: Box::new(device),
2248 },
2249 Queue {
2250 context,
2251 id: queue.id().into(),
2252 data: Box::new(queue),
2253 },
2254 )
2255 })
2256 }
2257
2258 /// Apply a callback to this `Adapter`'s underlying backend adapter.
2259 ///
2260 /// If this `Adapter` is implemented by the backend API given by `A` (Vulkan,
2261 /// Dx12, etc.), then apply `hal_adapter_callback` to `Some(&adapter)`, where
2262 /// `adapter` is the underlying backend adapter type, [`A::Adapter`].
2263 ///
2264 /// If this `Adapter` uses a different backend, apply `hal_adapter_callback`
2265 /// to `None`.
2266 ///
2267 /// The adapter is locked for reading while `hal_adapter_callback` runs. If
2268 /// the callback attempts to perform any `wgpu` operations that require
2269 /// write access to the adapter, deadlock will occur. The locks are
2270 /// automatically released when the callback returns.
2271 ///
2272 /// # Safety
2273 ///
2274 /// - The raw handle passed to the callback must not be manually destroyed.
2275 ///
2276 /// [`A::Adapter`]: hal::Api::Adapter
2277 #[cfg(any(
2278 not(target_arch = "wasm32"),
2279 target_os = "emscripten",
2280 feature = "webgl"
2281 ))]
2282 pub unsafe fn as_hal<A: wgc::hal_api::HalApi, F: FnOnce(Option<&A::Adapter>) -> R, R>(
2283 &self,
2284 hal_adapter_callback: F,
2285 ) -> R {
2286 unsafe {
2287 self.context
2288 .as_any()
2289 .downcast_ref::<crate::backend::Context>()
2290 .unwrap()
2291 .adapter_as_hal::<A, F, R>(self.id.into(), hal_adapter_callback)
2292 }
2293 }
2294
2295 /// Returns whether this adapter may present to the passed surface.
2296 pub fn is_surface_supported(&self, surface: &Surface) -> bool {
2297 DynContext::adapter_is_surface_supported(
2298 &*self.context,
2299 &self.id,
2300 self.data.as_ref(),
2301 &surface.id,
2302 surface.data.as_ref(),
2303 )
2304 }
2305
2306 /// List all features that are supported with this adapter.
2307 ///
2308 /// Features must be explicitly requested in [`Adapter::request_device`] in order
2309 /// to use them.
2310 pub fn features(&self) -> Features {
2311 DynContext::adapter_features(&*self.context, &self.id, self.data.as_ref())
2312 }
2313
2314 /// List the "best" limits that are supported by this adapter.
2315 ///
2316 /// Limits must be explicitly requested in [`Adapter::request_device`] to set
2317 /// the values that you are allowed to use.
2318 pub fn limits(&self) -> Limits {
2319 DynContext::adapter_limits(&*self.context, &self.id, self.data.as_ref())
2320 }
2321
2322 /// Get info about the adapter itself.
2323 pub fn get_info(&self) -> AdapterInfo {
2324 DynContext::adapter_get_info(&*self.context, &self.id, self.data.as_ref())
2325 }
2326
2327 /// Get info about the adapter itself.
2328 pub fn get_downlevel_capabilities(&self) -> DownlevelCapabilities {
2329 DynContext::adapter_downlevel_capabilities(&*self.context, &self.id, self.data.as_ref())
2330 }
2331
2332 /// Returns the features supported for a given texture format by this adapter.
2333 ///
2334 /// Note that the WebGPU spec further restricts the available usages/features.
2335 /// To disable these restrictions on a device, request the [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] feature.
2336 pub fn get_texture_format_features(&self, format: TextureFormat) -> TextureFormatFeatures {
2337 DynContext::adapter_get_texture_format_features(
2338 &*self.context,
2339 &self.id,
2340 self.data.as_ref(),
2341 format,
2342 )
2343 }
2344
2345 /// Generates a timestamp using the clock used by the presentation engine.
2346 ///
2347 /// When comparing completely opaque timestamp systems, we need a way of generating timestamps that signal
2348 /// the exact same time. You can do this by calling your own timestamp function immediately after a call to
2349 /// this function. This should result in timestamps that are 0.5 to 5 microseconds apart. There are locks
2350 /// that must be taken during the call, so don't call your function before.
2351 ///
2352 /// ```no_run
2353 /// # let adapter: wgpu::Adapter = panic!();
2354 /// # let some_code = || wgpu::PresentationTimestamp::INVALID_TIMESTAMP;
2355 /// use std::time::{Duration, Instant};
2356 /// let presentation = adapter.get_presentation_timestamp();
2357 /// let instant = Instant::now();
2358 ///
2359 /// // We can now turn a new presentation timestamp into an Instant.
2360 /// let some_pres_timestamp = some_code();
2361 /// let duration = Duration::from_nanos((some_pres_timestamp.0 - presentation.0) as u64);
2362 /// let new_instant: Instant = instant + duration;
2363 /// ```
2364 //
2365 /// [Instant]: std::time::Instant
2366 pub fn get_presentation_timestamp(&self) -> PresentationTimestamp {
2367 DynContext::adapter_get_presentation_timestamp(&*self.context, &self.id, self.data.as_ref())
2368 }
2369}
2370
2371impl Device {
2372 /// Check for resource cleanups and mapping callbacks.
2373 ///
2374 /// Return `true` if the queue is empty, or `false` if there are more queue
2375 /// submissions still in flight. (Note that, unless access to the [`Queue`] is
2376 /// coordinated somehow, this information could be out of date by the time
2377 /// the caller receives it. `Queue`s can be shared between threads, so
2378 /// other threads could submit new work at any time.)
2379 ///
2380 /// On the web, this is a no-op. `Device`s are automatically polled.
2381 pub fn poll(&self, maintain: Maintain) -> bool {
2382 DynContext::device_poll(&*self.context, &self.id, self.data.as_ref(), maintain)
2383 }
2384
2385 /// List all features that may be used with this device.
2386 ///
2387 /// Functions may panic if you use unsupported features.
2388 pub fn features(&self) -> Features {
2389 DynContext::device_features(&*self.context, &self.id, self.data.as_ref())
2390 }
2391
2392 /// List all limits that were requested of this device.
2393 ///
2394 /// If any of these limits are exceeded, functions may panic.
2395 pub fn limits(&self) -> Limits {
2396 DynContext::device_limits(&*self.context, &self.id, self.data.as_ref())
2397 }
2398
2399 /// Creates a shader module from either SPIR-V or WGSL source code.
2400 pub fn create_shader_module(&self, desc: ShaderModuleDescriptor) -> ShaderModule {
2401 let (id, data) = DynContext::device_create_shader_module(
2402 &*self.context,
2403 &self.id,
2404 self.data.as_ref(),
2405 desc,
2406 wgt::ShaderBoundChecks::new(),
2407 );
2408 ShaderModule {
2409 context: Arc::clone(&self.context),
2410 id,
2411 data,
2412 }
2413 }
2414
2415 /// Creates a shader module from either SPIR-V or WGSL source code without runtime checks.
2416 ///
2417 /// # Safety
2418 /// In contrast with [`create_shader_module`](Self::create_shader_module) this function
2419 /// creates a shader module without runtime checks which allows shaders to perform
2420 /// operations which can lead to undefined behavior like indexing out of bounds, thus it's
2421 /// the caller responsibility to pass a shader which doesn't perform any of this
2422 /// operations.
2423 ///
2424 /// This has no effect on web.
2425 pub unsafe fn create_shader_module_unchecked(
2426 &self,
2427 desc: ShaderModuleDescriptor,
2428 ) -> ShaderModule {
2429 let (id, data) = DynContext::device_create_shader_module(
2430 &*self.context,
2431 &self.id,
2432 self.data.as_ref(),
2433 desc,
2434 unsafe { wgt::ShaderBoundChecks::unchecked() },
2435 );
2436 ShaderModule {
2437 context: Arc::clone(&self.context),
2438 id,
2439 data,
2440 }
2441 }
2442
2443 /// Creates a shader module from SPIR-V binary directly.
2444 ///
2445 /// # Safety
2446 ///
2447 /// This function passes binary data to the backend as-is and can potentially result in a
2448 /// driver crash or bogus behaviour. No attempt is made to ensure that data is valid SPIR-V.
2449 ///
2450 /// See also [`include_spirv_raw!`] and [`util::make_spirv_raw`].
2451 pub unsafe fn create_shader_module_spirv(
2452 &self,
2453 desc: &ShaderModuleDescriptorSpirV,
2454 ) -> ShaderModule {
2455 let (id, data) = unsafe {
2456 DynContext::device_create_shader_module_spirv(
2457 &*self.context,
2458 &self.id,
2459 self.data.as_ref(),
2460 desc,
2461 )
2462 };
2463 ShaderModule {
2464 context: Arc::clone(&self.context),
2465 id,
2466 data,
2467 }
2468 }
2469
2470 /// Creates an empty [`CommandEncoder`].
2471 pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor) -> CommandEncoder {
2472 let (id, data) = DynContext::device_create_command_encoder(
2473 &*self.context,
2474 &self.id,
2475 self.data.as_ref(),
2476 desc,
2477 );
2478 CommandEncoder {
2479 context: Arc::clone(&self.context),
2480 id: Some(id),
2481 data,
2482 }
2483 }
2484
2485 /// Creates an empty [`RenderBundleEncoder`].
2486 pub fn create_render_bundle_encoder(
2487 &self,
2488 desc: &RenderBundleEncoderDescriptor,
2489 ) -> RenderBundleEncoder {
2490 let (id, data) = DynContext::device_create_render_bundle_encoder(
2491 &*self.context,
2492 &self.id,
2493 self.data.as_ref(),
2494 desc,
2495 );
2496 RenderBundleEncoder {
2497 context: Arc::clone(&self.context),
2498 id,
2499 data,
2500 parent: self,
2501 _p: Default::default(),
2502 }
2503 }
2504
2505 /// Creates a new [`BindGroup`].
2506 pub fn create_bind_group(&self, desc: &BindGroupDescriptor) -> BindGroup {
2507 let (id, data) = DynContext::device_create_bind_group(
2508 &*self.context,
2509 &self.id,
2510 self.data.as_ref(),
2511 desc,
2512 );
2513 BindGroup {
2514 context: Arc::clone(&self.context),
2515 id,
2516 data,
2517 }
2518 }
2519
2520 /// Creates a [`BindGroupLayout`].
2521 pub fn create_bind_group_layout(&self, desc: &BindGroupLayoutDescriptor) -> BindGroupLayout {
2522 let (id, data) = DynContext::device_create_bind_group_layout(
2523 &*self.context,
2524 &self.id,
2525 self.data.as_ref(),
2526 desc,
2527 );
2528 BindGroupLayout {
2529 context: Arc::clone(&self.context),
2530 id,
2531 data,
2532 }
2533 }
2534
2535 /// Creates a [`PipelineLayout`].
2536 pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor) -> PipelineLayout {
2537 let (id, data) = DynContext::device_create_pipeline_layout(
2538 &*self.context,
2539 &self.id,
2540 self.data.as_ref(),
2541 desc,
2542 );
2543 PipelineLayout {
2544 context: Arc::clone(&self.context),
2545 id,
2546 data,
2547 }
2548 }
2549
2550 /// Creates a [`RenderPipeline`].
2551 pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor) -> RenderPipeline {
2552 let (id, data) = DynContext::device_create_render_pipeline(
2553 &*self.context,
2554 &self.id,
2555 self.data.as_ref(),
2556 desc,
2557 );
2558 RenderPipeline {
2559 context: Arc::clone(&self.context),
2560 id,
2561 data,
2562 }
2563 }
2564
2565 /// Creates a [`ComputePipeline`].
2566 pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor) -> ComputePipeline {
2567 let (id, data) = DynContext::device_create_compute_pipeline(
2568 &*self.context,
2569 &self.id,
2570 self.data.as_ref(),
2571 desc,
2572 );
2573 ComputePipeline {
2574 context: Arc::clone(&self.context),
2575 id,
2576 data,
2577 }
2578 }
2579
2580 /// Creates a [`Buffer`].
2581 pub fn create_buffer(&self, desc: &BufferDescriptor) -> Buffer {
2582 let mut map_context = MapContext::new(desc.size);
2583 if desc.mapped_at_creation {
2584 map_context.initial_range = 0..desc.size;
2585 }
2586
2587 let (id, data) =
2588 DynContext::device_create_buffer(&*self.context, &self.id, self.data.as_ref(), desc);
2589
2590 Buffer {
2591 context: Arc::clone(&self.context),
2592 id,
2593 data,
2594 map_context: Mutex::new(map_context),
2595 size: desc.size,
2596 usage: desc.usage,
2597 }
2598 }
2599
2600 /// Creates a new [`Texture`].
2601 ///
2602 /// `desc` specifies the general format of the texture.
2603 pub fn create_texture(&self, desc: &TextureDescriptor) -> Texture {
2604 let (id, data) =
2605 DynContext::device_create_texture(&*self.context, &self.id, self.data.as_ref(), desc);
2606 Texture {
2607 context: Arc::clone(&self.context),
2608 id,
2609 data,
2610 owned: true,
2611 descriptor: TextureDescriptor {
2612 label: None,
2613 view_formats: &[],
2614 ..desc.clone()
2615 },
2616 }
2617 }
2618
2619 /// Creates a [`Texture`] from a wgpu-hal Texture.
2620 ///
2621 /// # Safety
2622 ///
2623 /// - `hal_texture` must be created from this device internal handle
2624 /// - `hal_texture` must be created respecting `desc`
2625 /// - `hal_texture` must be initialized
2626 #[cfg(any(
2627 not(target_arch = "wasm32"),
2628 target_os = "emscripten",
2629 feature = "webgl"
2630 ))]
2631 pub unsafe fn create_texture_from_hal<A: wgc::hal_api::HalApi>(
2632 &self,
2633 hal_texture: A::Texture,
2634 desc: &TextureDescriptor,
2635 ) -> Texture {
2636 let texture = unsafe {
2637 self.context
2638 .as_any()
2639 .downcast_ref::<crate::backend::Context>()
2640 .unwrap()
2641 .create_texture_from_hal::<A>(
2642 hal_texture,
2643 self.data.as_ref().downcast_ref().unwrap(),
2644 desc,
2645 )
2646 };
2647 Texture {
2648 context: Arc::clone(&self.context),
2649 id: ObjectId::from(texture.id()),
2650 data: Box::new(texture),
2651 owned: true,
2652 descriptor: TextureDescriptor {
2653 label: None,
2654 view_formats: &[],
2655 ..desc.clone()
2656 },
2657 }
2658 }
2659
2660 /// Creates a [`Buffer`] from a wgpu-hal Buffer.
2661 ///
2662 /// # Safety
2663 ///
2664 /// - `hal_buffer` must be created from this device internal handle
2665 /// - `hal_buffer` must be created respecting `desc`
2666 /// - `hal_buffer` must be initialized
2667 #[cfg(any(
2668 not(target_arch = "wasm32"),
2669 target_os = "emscripten",
2670 feature = "webgl"
2671 ))]
2672 pub unsafe fn create_buffer_from_hal<A: wgc::hal_api::HalApi>(
2673 &self,
2674 hal_buffer: A::Buffer,
2675 desc: &BufferDescriptor,
2676 ) -> Buffer {
2677 let mut map_context = MapContext::new(desc.size);
2678 if desc.mapped_at_creation {
2679 map_context.initial_range = 0..desc.size;
2680 }
2681
2682 let (id, buffer) = unsafe {
2683 self.context
2684 .as_any()
2685 .downcast_ref::<crate::backend::Context>()
2686 .unwrap()
2687 .create_buffer_from_hal::<A>(
2688 hal_buffer,
2689 self.data.as_ref().downcast_ref().unwrap(),
2690 desc,
2691 )
2692 };
2693
2694 Buffer {
2695 context: Arc::clone(&self.context),
2696 id: ObjectId::from(id),
2697 data: Box::new(buffer),
2698 map_context: Mutex::new(map_context),
2699 size: desc.size,
2700 usage: desc.usage,
2701 }
2702 }
2703
2704 /// Creates a new [`Sampler`].
2705 ///
2706 /// `desc` specifies the behavior of the sampler.
2707 pub fn create_sampler(&self, desc: &SamplerDescriptor) -> Sampler {
2708 let (id, data) =
2709 DynContext::device_create_sampler(&*self.context, &self.id, self.data.as_ref(), desc);
2710 Sampler {
2711 context: Arc::clone(&self.context),
2712 id,
2713 data,
2714 }
2715 }
2716
2717 /// Creates a new [`QuerySet`].
2718 pub fn create_query_set(&self, desc: &QuerySetDescriptor) -> QuerySet {
2719 let (id, data) =
2720 DynContext::device_create_query_set(&*self.context, &self.id, self.data.as_ref(), desc);
2721 QuerySet {
2722 context: Arc::clone(&self.context),
2723 id,
2724 data,
2725 }
2726 }
2727
2728 /// Set a callback for errors that are not handled in error scopes.
2729 pub fn on_uncaptured_error(&self, handler: Box<dyn UncapturedErrorHandler>) {
2730 self.context
2731 .device_on_uncaptured_error(&self.id, self.data.as_ref(), handler);
2732 }
2733
2734 /// Push an error scope.
2735 pub fn push_error_scope(&self, filter: ErrorFilter) {
2736 self.context
2737 .device_push_error_scope(&self.id, self.data.as_ref(), filter);
2738 }
2739
2740 /// Pop an error scope.
2741 pub fn pop_error_scope(&self) -> impl Future<Output = Option<Error>> + WasmNotSend {
2742 self.context
2743 .device_pop_error_scope(&self.id, self.data.as_ref())
2744 }
2745
2746 /// Starts frame capture.
2747 pub fn start_capture(&self) {
2748 DynContext::device_start_capture(&*self.context, &self.id, self.data.as_ref())
2749 }
2750
2751 /// Stops frame capture.
2752 pub fn stop_capture(&self) {
2753 DynContext::device_stop_capture(&*self.context, &self.id, self.data.as_ref())
2754 }
2755
2756 /// Apply a callback to this `Device`'s underlying backend device.
2757 ///
2758 /// If this `Device` is implemented by the backend API given by `A` (Vulkan,
2759 /// Dx12, etc.), then apply `hal_device_callback` to `Some(&device)`, where
2760 /// `device` is the underlying backend device type, [`A::Device`].
2761 ///
2762 /// If this `Device` uses a different backend, apply `hal_device_callback`
2763 /// to `None`.
2764 ///
2765 /// The device is locked for reading while `hal_device_callback` runs. If
2766 /// the callback attempts to perform any `wgpu` operations that require
2767 /// write access to the device (destroying a buffer, say), deadlock will
2768 /// occur. The locks are automatically released when the callback returns.
2769 ///
2770 /// # Safety
2771 ///
2772 /// - The raw handle passed to the callback must not be manually destroyed.
2773 ///
2774 /// [`A::Device`]: hal::Api::Device
2775 #[cfg(any(
2776 not(target_arch = "wasm32"),
2777 target_os = "emscripten",
2778 feature = "webgl"
2779 ))]
2780 pub unsafe fn as_hal<A: wgc::hal_api::HalApi, F: FnOnce(Option<&A::Device>) -> R, R>(
2781 &self,
2782 hal_device_callback: F,
2783 ) -> R {
2784 unsafe {
2785 self.context
2786 .as_any()
2787 .downcast_ref::<crate::backend::Context>()
2788 .unwrap()
2789 .device_as_hal::<A, F, R>(
2790 self.data.as_ref().downcast_ref().unwrap(),
2791 hal_device_callback,
2792 )
2793 }
2794 }
2795
2796 /// Destroy this device.
2797 pub fn destroy(&self) {
2798 DynContext::device_destroy(&*self.context, &self.id, self.data.as_ref())
2799 }
2800}
2801
2802impl Drop for Device {
2803 fn drop(&mut self) {
2804 if !thread::panicking() {
2805 self.context.device_drop(&self.id, self.data.as_ref());
2806 }
2807 }
2808}
2809
2810/// Requesting a device from an [`Adapter`] failed.
2811#[derive(Clone, Debug)]
2812pub struct RequestDeviceError {
2813 inner: RequestDeviceErrorKind,
2814}
2815#[derive(Clone, Debug)]
2816enum RequestDeviceErrorKind {
2817 /// Error from [`wgpu_core`].
2818 // must match dependency cfg
2819 #[cfg(any(
2820 not(target_arch = "wasm32"),
2821 feature = "webgl",
2822 target_os = "emscripten"
2823 ))]
2824 Core(core::instance::RequestDeviceError),
2825
2826 /// Error from web API that was called by `wgpu` to request a device.
2827 ///
2828 /// (This is currently never used by the webgl backend, but it could be.)
2829 #[cfg(all(
2830 target_arch = "wasm32",
2831 not(any(target_os = "emscripten", feature = "webgl"))
2832 ))]
2833 Web(wasm_bindgen::JsValue),
2834}
2835
2836#[cfg(all(
2837 feature = "fragile-send-sync-non-atomic-wasm",
2838 not(target_feature = "atomics")
2839))]
2840unsafe impl Send for RequestDeviceErrorKind {}
2841#[cfg(all(
2842 feature = "fragile-send-sync-non-atomic-wasm",
2843 not(target_feature = "atomics")
2844))]
2845unsafe impl Sync for RequestDeviceErrorKind {}
2846
2847#[cfg(any(
2848 not(target_arch = "wasm32"),
2849 all(
2850 feature = "fragile-send-sync-non-atomic-wasm",
2851 not(target_feature = "atomics")
2852 )
2853))]
2854static_assertions::assert_impl_all!(RequestDeviceError: Send, Sync);
2855
2856impl fmt::Display for RequestDeviceError {
2857 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2858 match &self.inner {
2859 #[cfg(any(
2860 not(target_arch = "wasm32"),
2861 feature = "webgl",
2862 target_os = "emscripten"
2863 ))]
2864 RequestDeviceErrorKind::Core(error) => error.fmt(f),
2865 #[cfg(all(
2866 target_arch = "wasm32",
2867 not(any(target_os = "emscripten", feature = "webgl"))
2868 ))]
2869 RequestDeviceErrorKind::Web(error_js_value) => {
2870 // wasm-bindgen provides a reasonable error stringification via `Debug` impl
2871 write!(f, "{error_js_value:?}")
2872 }
2873 }
2874 }
2875}
2876
2877impl error::Error for RequestDeviceError {
2878 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
2879 match &self.inner {
2880 #[cfg(any(
2881 not(target_arch = "wasm32"),
2882 feature = "webgl",
2883 target_os = "emscripten"
2884 ))]
2885 RequestDeviceErrorKind::Core(error) => error.source(),
2886 #[cfg(all(
2887 target_arch = "wasm32",
2888 not(any(target_os = "emscripten", feature = "webgl"))
2889 ))]
2890 RequestDeviceErrorKind::Web(_) => None,
2891 }
2892 }
2893}
2894
2895#[cfg(any(
2896 not(target_arch = "wasm32"),
2897 feature = "webgl",
2898 target_os = "emscripten"
2899))]
2900impl From<core::instance::RequestDeviceError> for RequestDeviceError {
2901 fn from(error: core::instance::RequestDeviceError) -> Self {
2902 Self {
2903 inner: RequestDeviceErrorKind::Core(error),
2904 }
2905 }
2906}
2907
2908/// [`Instance::create_surface()`] or a related function failed.
2909#[derive(Clone, Debug)]
2910#[non_exhaustive]
2911pub struct CreateSurfaceError {
2912 inner: CreateSurfaceErrorKind,
2913}
2914#[derive(Clone, Debug)]
2915enum CreateSurfaceErrorKind {
2916 /// Error from [`wgpu_hal`].
2917 #[cfg(any(
2918 not(target_arch = "wasm32"),
2919 target_os = "emscripten",
2920 feature = "webgl"
2921 ))]
2922 // must match dependency cfg
2923 Hal(hal::InstanceError),
2924
2925 /// Error from WebGPU surface creation.
2926 #[allow(dead_code)] // may be unused depending on target and features
2927 Web(String),
2928}
2929static_assertions::assert_impl_all!(CreateSurfaceError: Send, Sync);
2930
2931impl fmt::Display for CreateSurfaceError {
2932 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2933 match &self.inner {
2934 #[cfg(any(
2935 not(target_arch = "wasm32"),
2936 target_os = "emscripten",
2937 feature = "webgl"
2938 ))]
2939 CreateSurfaceErrorKind::Hal(e) => e.fmt(f),
2940 CreateSurfaceErrorKind::Web(e) => e.fmt(f),
2941 }
2942 }
2943}
2944
2945impl error::Error for CreateSurfaceError {
2946 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
2947 match &self.inner {
2948 #[cfg(any(
2949 not(target_arch = "wasm32"),
2950 target_os = "emscripten",
2951 feature = "webgl"
2952 ))]
2953 CreateSurfaceErrorKind::Hal(e) => e.source(),
2954 CreateSurfaceErrorKind::Web(_) => None,
2955 }
2956 }
2957}
2958
2959#[cfg(any(
2960 not(target_arch = "wasm32"),
2961 target_os = "emscripten",
2962 feature = "webgl"
2963))]
2964impl From<hal::InstanceError> for CreateSurfaceError {
2965 fn from(e: hal::InstanceError) -> Self {
2966 Self {
2967 inner: CreateSurfaceErrorKind::Hal(e),
2968 }
2969 }
2970}
2971
2972/// Error occurred when trying to async map a buffer.
2973#[derive(Clone, PartialEq, Eq, Debug)]
2974pub struct BufferAsyncError;
2975static_assertions::assert_impl_all!(BufferAsyncError: Send, Sync);
2976
2977impl fmt::Display for BufferAsyncError {
2978 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2979 write!(f, "Error occurred when trying to async map a buffer")
2980 }
2981}
2982
2983impl error::Error for BufferAsyncError {}
2984
2985/// Type of buffer mapping.
2986#[derive(Debug, Clone, Copy, Eq, PartialEq)]
2987pub enum MapMode {
2988 /// Map only for reading
2989 Read,
2990 /// Map only for writing
2991 Write,
2992}
2993static_assertions::assert_impl_all!(MapMode: Send, Sync);
2994
2995fn range_to_offset_size<S: RangeBounds<BufferAddress>>(
2996 bounds: S,
2997) -> (BufferAddress, Option<BufferSize>) {
2998 let offset = match bounds.start_bound() {
2999 Bound::Included(&bound) => bound,
3000 Bound::Excluded(&bound) => bound + 1,
3001 Bound::Unbounded => 0,
3002 };
3003 let size = match bounds.end_bound() {
3004 Bound::Included(&bound) => Some(bound + 1 - offset),
3005 Bound::Excluded(&bound) => Some(bound - offset),
3006 Bound::Unbounded => None,
3007 }
3008 .map(|size| BufferSize::new(size).expect("Buffer slices can not be empty"));
3009
3010 (offset, size)
3011}
3012
3013/// Read only view into a mapped buffer.
3014#[derive(Debug)]
3015pub struct BufferView<'a> {
3016 slice: BufferSlice<'a>,
3017 data: Box<dyn crate::context::BufferMappedRange>,
3018}
3019
3020/// Write only view into mapped buffer.
3021///
3022/// It is possible to read the buffer using this view, but doing so is not
3023/// recommended, as it is likely to be slow.
3024#[derive(Debug)]
3025pub struct BufferViewMut<'a> {
3026 slice: BufferSlice<'a>,
3027 data: Box<dyn crate::context::BufferMappedRange>,
3028 readable: bool,
3029}
3030
3031impl std::ops::Deref for BufferView<'_> {
3032 type Target = [u8];
3033
3034 #[inline]
3035 fn deref(&self) -> &[u8] {
3036 self.data.slice()
3037 }
3038}
3039
3040impl AsRef<[u8]> for BufferView<'_> {
3041 #[inline]
3042 fn as_ref(&self) -> &[u8] {
3043 self.data.slice()
3044 }
3045}
3046
3047impl AsMut<[u8]> for BufferViewMut<'_> {
3048 #[inline]
3049 fn as_mut(&mut self) -> &mut [u8] {
3050 self.data.slice_mut()
3051 }
3052}
3053
3054impl Deref for BufferViewMut<'_> {
3055 type Target = [u8];
3056
3057 fn deref(&self) -> &Self::Target {
3058 if !self.readable {
3059 log::warn!("Reading from a BufferViewMut is slow and not recommended.");
3060 }
3061
3062 self.data.slice()
3063 }
3064}
3065
3066impl DerefMut for BufferViewMut<'_> {
3067 fn deref_mut(&mut self) -> &mut Self::Target {
3068 self.data.slice_mut()
3069 }
3070}
3071
3072impl Drop for BufferView<'_> {
3073 fn drop(&mut self) {
3074 self.slice
3075 .buffer
3076 .map_context
3077 .lock()
3078 .remove(self.slice.offset, self.slice.size);
3079 }
3080}
3081
3082impl Drop for BufferViewMut<'_> {
3083 fn drop(&mut self) {
3084 self.slice
3085 .buffer
3086 .map_context
3087 .lock()
3088 .remove(self.slice.offset, self.slice.size);
3089 }
3090}
3091
3092impl Buffer {
3093 /// Return the binding view of the entire buffer.
3094 pub fn as_entire_binding(&self) -> BindingResource {
3095 BindingResource::Buffer(self.as_entire_buffer_binding())
3096 }
3097
3098 /// Return the binding view of the entire buffer.
3099 pub fn as_entire_buffer_binding(&self) -> BufferBinding {
3100 BufferBinding {
3101 buffer: self,
3102 offset: 0,
3103 size: None,
3104 }
3105 }
3106
3107 /// Use only a portion of this Buffer for a given operation. Choosing a range with no end
3108 /// will use the rest of the buffer. Using a totally unbounded range will use the entire buffer.
3109 pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice {
3110 let (offset, size) = range_to_offset_size(bounds);
3111 BufferSlice {
3112 buffer: self,
3113 offset,
3114 size,
3115 }
3116 }
3117
3118 /// Flushes any pending write operations and unmaps the buffer from host memory.
3119 pub fn unmap(&self) {
3120 self.map_context.lock().reset();
3121 DynContext::buffer_unmap(&*self.context, &self.id, self.data.as_ref());
3122 }
3123
3124 /// Destroy the associated native resources as soon as possible.
3125 pub fn destroy(&self) {
3126 DynContext::buffer_destroy(&*self.context, &self.id, self.data.as_ref());
3127 }
3128
3129 /// Returns the length of the buffer allocation in bytes.
3130 ///
3131 /// This is always equal to the `size` that was specified when creating the buffer.
3132 pub fn size(&self) -> BufferAddress {
3133 self.size
3134 }
3135
3136 /// Returns the allowed usages for this `Buffer`.
3137 ///
3138 /// This is always equal to the `usage` that was specified when creating the buffer.
3139 pub fn usage(&self) -> BufferUsages {
3140 self.usage
3141 }
3142}
3143
3144impl<'a> BufferSlice<'a> {
3145 /// Map the buffer. Buffer is ready to map once the callback is called.
3146 ///
3147 /// For the callback to complete, either `queue.submit(..)`, `instance.poll_all(..)`, or `device.poll(..)`
3148 /// must be called elsewhere in the runtime, possibly integrated into an event loop or run on a separate thread.
3149 ///
3150 /// The callback will be called on the thread that first calls the above functions after the gpu work
3151 /// has completed. There are no restrictions on the code you can run in the callback, however on native the
3152 /// call to the function will not complete until the callback returns, so prefer keeping callbacks short
3153 /// and used to set flags, send messages, etc.
3154 pub fn map_async(
3155 &self,
3156 mode: MapMode,
3157 callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
3158 ) {
3159 let mut mc = self.buffer.map_context.lock();
3160 assert_eq!(
3161 mc.initial_range,
3162 0..0,
3163 "Buffer {:?} is already mapped",
3164 self.buffer.id
3165 );
3166 let end = match self.size {
3167 Some(s) => self.offset + s.get(),
3168 None => mc.total_size,
3169 };
3170 mc.initial_range = self.offset..end;
3171
3172 DynContext::buffer_map_async(
3173 &*self.buffer.context,
3174 &self.buffer.id,
3175 self.buffer.data.as_ref(),
3176 mode,
3177 self.offset..end,
3178 Box::new(callback),
3179 )
3180 }
3181
3182 /// Synchronously and immediately map a buffer for reading. If the buffer is not immediately mappable
3183 /// through [`BufferDescriptor::mapped_at_creation`] or [`BufferSlice::map_async`], will panic.
3184 pub fn get_mapped_range(&self) -> BufferView<'a> {
3185 let end = self.buffer.map_context.lock().add(self.offset, self.size);
3186 let data = DynContext::buffer_get_mapped_range(
3187 &*self.buffer.context,
3188 &self.buffer.id,
3189 self.buffer.data.as_ref(),
3190 self.offset..end,
3191 );
3192 BufferView { slice: *self, data }
3193 }
3194
3195 /// Synchronously and immediately map a buffer for reading. If the buffer is not immediately mappable
3196 /// through [`BufferDescriptor::mapped_at_creation`] or [`BufferSlice::map_async`], will panic.
3197 ///
3198 /// This is useful in wasm builds when you want to pass mapped data directly to js. Unlike `get_mapped_range`
3199 /// which unconditionally copies mapped data into the wasm heap, this function directly hands you the
3200 /// ArrayBuffer that we mapped the data into in js.
3201 #[cfg(all(
3202 target_arch = "wasm32",
3203 not(any(target_os = "emscripten", feature = "webgl"))
3204 ))]
3205 pub fn get_mapped_range_as_array_buffer(&self) -> js_sys::ArrayBuffer {
3206 let end = self.buffer.map_context.lock().add(self.offset, self.size);
3207 DynContext::buffer_get_mapped_range_as_array_buffer(
3208 &*self.buffer.context,
3209 &self.buffer.id,
3210 self.buffer.data.as_ref(),
3211 self.offset..end,
3212 )
3213 }
3214
3215 /// Synchronously and immediately map a buffer for writing. If the buffer is not immediately mappable
3216 /// through [`BufferDescriptor::mapped_at_creation`] or [`BufferSlice::map_async`], will panic.
3217 pub fn get_mapped_range_mut(&self) -> BufferViewMut<'a> {
3218 let end = self.buffer.map_context.lock().add(self.offset, self.size);
3219 let data = DynContext::buffer_get_mapped_range(
3220 &*self.buffer.context,
3221 &self.buffer.id,
3222 self.buffer.data.as_ref(),
3223 self.offset..end,
3224 );
3225 BufferViewMut {
3226 slice: *self,
3227 data,
3228 readable: self.buffer.usage.contains(BufferUsages::MAP_READ),
3229 }
3230 }
3231}
3232
3233impl Drop for Buffer {
3234 fn drop(&mut self) {
3235 if !thread::panicking() {
3236 self.context.buffer_drop(&self.id, self.data.as_ref());
3237 }
3238 }
3239}
3240
3241impl Texture {
3242 /// Returns the inner hal Texture using a callback. The hal texture will be `None` if the
3243 /// backend type argument does not match with this wgpu Texture
3244 ///
3245 /// # Safety
3246 ///
3247 /// - The raw handle obtained from the hal Texture must not be manually destroyed
3248 #[cfg(any(
3249 not(target_arch = "wasm32"),
3250 target_os = "emscripten",
3251 feature = "webgl"
3252 ))]
3253 pub unsafe fn as_hal<A: wgc::hal_api::HalApi, F: FnOnce(Option<&A::Texture>)>(
3254 &self,
3255 hal_texture_callback: F,
3256 ) {
3257 let texture = self.data.as_ref().downcast_ref().unwrap();
3258 unsafe {
3259 self.context
3260 .as_any()
3261 .downcast_ref::<crate::backend::Context>()
3262 .unwrap()
3263 .texture_as_hal::<A, F>(texture, hal_texture_callback)
3264 }
3265 }
3266
3267 /// Creates a view of this texture.
3268 pub fn create_view(&self, desc: &TextureViewDescriptor) -> TextureView {
3269 let (id, data) =
3270 DynContext::texture_create_view(&*self.context, &self.id, self.data.as_ref(), desc);
3271 TextureView {
3272 context: Arc::clone(&self.context),
3273 id,
3274 data,
3275 }
3276 }
3277
3278 /// Destroy the associated native resources as soon as possible.
3279 pub fn destroy(&self) {
3280 DynContext::texture_destroy(&*self.context, &self.id, self.data.as_ref());
3281 }
3282
3283 /// Make an `ImageCopyTexture` representing the whole texture.
3284 pub fn as_image_copy(&self) -> ImageCopyTexture {
3285 ImageCopyTexture {
3286 texture: self,
3287 mip_level: 0,
3288 origin: Origin3d::ZERO,
3289 aspect: TextureAspect::All,
3290 }
3291 }
3292
3293 /// Returns the size of this `Texture`.
3294 ///
3295 /// This is always equal to the `size` that was specified when creating the texture.
3296 pub fn size(&self) -> Extent3d {
3297 self.descriptor.size
3298 }
3299
3300 /// Returns the width of this `Texture`.
3301 ///
3302 /// This is always equal to the `size.width` that was specified when creating the texture.
3303 pub fn width(&self) -> u32 {
3304 self.descriptor.size.width
3305 }
3306
3307 /// Returns the height of this `Texture`.
3308 ///
3309 /// This is always equal to the `size.height` that was specified when creating the texture.
3310 pub fn height(&self) -> u32 {
3311 self.descriptor.size.height
3312 }
3313
3314 /// Returns the depth or layer count of this `Texture`.
3315 ///
3316 /// This is always equal to the `size.depth_or_array_layers` that was specified when creating the texture.
3317 pub fn depth_or_array_layers(&self) -> u32 {
3318 self.descriptor.size.depth_or_array_layers
3319 }
3320
3321 /// Returns the mip_level_count of this `Texture`.
3322 ///
3323 /// This is always equal to the `mip_level_count` that was specified when creating the texture.
3324 pub fn mip_level_count(&self) -> u32 {
3325 self.descriptor.mip_level_count
3326 }
3327
3328 /// Returns the sample_count of this `Texture`.
3329 ///
3330 /// This is always equal to the `sample_count` that was specified when creating the texture.
3331 pub fn sample_count(&self) -> u32 {
3332 self.descriptor.sample_count
3333 }
3334
3335 /// Returns the dimension of this `Texture`.
3336 ///
3337 /// This is always equal to the `dimension` that was specified when creating the texture.
3338 pub fn dimension(&self) -> TextureDimension {
3339 self.descriptor.dimension
3340 }
3341
3342 /// Returns the format of this `Texture`.
3343 ///
3344 /// This is always equal to the `format` that was specified when creating the texture.
3345 pub fn format(&self) -> TextureFormat {
3346 self.descriptor.format
3347 }
3348
3349 /// Returns the allowed usages of this `Texture`.
3350 ///
3351 /// This is always equal to the `usage` that was specified when creating the texture.
3352 pub fn usage(&self) -> TextureUsages {
3353 self.descriptor.usage
3354 }
3355}
3356
3357impl Drop for Texture {
3358 fn drop(&mut self) {
3359 if self.owned && !thread::panicking() {
3360 self.context.texture_drop(&self.id, self.data.as_ref());
3361 }
3362 }
3363}
3364
3365impl Drop for TextureView {
3366 fn drop(&mut self) {
3367 if !thread::panicking() {
3368 self.context.texture_view_drop(&self.id, self.data.as_ref());
3369 }
3370 }
3371}
3372
3373impl CommandEncoder {
3374 /// Finishes recording and returns a [`CommandBuffer`] that can be submitted for execution.
3375 pub fn finish(mut self) -> CommandBuffer {
3376 let (id, data) = DynContext::command_encoder_finish(
3377 &*self.context,
3378 self.id.take().unwrap(),
3379 self.data.as_mut(),
3380 );
3381 CommandBuffer {
3382 context: Arc::clone(&self.context),
3383 id: Some(id),
3384 data: Some(data),
3385 }
3386 }
3387
3388 /// Begins recording of a render pass.
3389 ///
3390 /// This function returns a [`RenderPass`] object which records a single render pass.
3391 pub fn begin_render_pass<'pass>(
3392 &'pass mut self,
3393 desc: &RenderPassDescriptor<'pass, '_>,
3394 ) -> RenderPass<'pass> {
3395 let id = self.id.as_ref().unwrap();
3396 let (id, data) = DynContext::command_encoder_begin_render_pass(
3397 &*self.context,
3398 id,
3399 self.data.as_ref(),
3400 desc,
3401 );
3402 RenderPass {
3403 id,
3404 data,
3405 parent: self,
3406 }
3407 }
3408
3409 /// Begins recording of a compute pass.
3410 ///
3411 /// This function returns a [`ComputePass`] object which records a single compute pass.
3412 pub fn begin_compute_pass(&mut self, desc: &ComputePassDescriptor) -> ComputePass {
3413 let id = self.id.as_ref().unwrap();
3414 let (id, data) = DynContext::command_encoder_begin_compute_pass(
3415 &*self.context,
3416 id,
3417 self.data.as_ref(),
3418 desc,
3419 );
3420 ComputePass {
3421 id,
3422 data,
3423 parent: self,
3424 }
3425 }
3426
3427 /// Copy data from one buffer to another.
3428 ///
3429 /// # Panics
3430 ///
3431 /// - Buffer offsets or copy size not a multiple of [`COPY_BUFFER_ALIGNMENT`].
3432 /// - Copy would overrun buffer.
3433 /// - Copy within the same buffer.
3434 pub fn copy_buffer_to_buffer(
3435 &mut self,
3436 source: &Buffer,
3437 source_offset: BufferAddress,
3438 destination: &Buffer,
3439 destination_offset: BufferAddress,
3440 copy_size: BufferAddress,
3441 ) {
3442 DynContext::command_encoder_copy_buffer_to_buffer(
3443 &*self.context,
3444 self.id.as_ref().unwrap(),
3445 self.data.as_ref(),
3446 &source.id,
3447 source.data.as_ref(),
3448 source_offset,
3449 &destination.id,
3450 destination.data.as_ref(),
3451 destination_offset,
3452 copy_size,
3453 );
3454 }
3455
3456 /// Copy data from a buffer to a texture.
3457 pub fn copy_buffer_to_texture(
3458 &mut self,
3459 source: ImageCopyBuffer,
3460 destination: ImageCopyTexture,
3461 copy_size: Extent3d,
3462 ) {
3463 DynContext::command_encoder_copy_buffer_to_texture(
3464 &*self.context,
3465 self.id.as_ref().unwrap(),
3466 self.data.as_ref(),
3467 source,
3468 destination,
3469 copy_size,
3470 );
3471 }
3472
3473 /// Copy data from a texture to a buffer.
3474 pub fn copy_texture_to_buffer(
3475 &mut self,
3476 source: ImageCopyTexture,
3477 destination: ImageCopyBuffer,
3478 copy_size: Extent3d,
3479 ) {
3480 DynContext::command_encoder_copy_texture_to_buffer(
3481 &*self.context,
3482 self.id.as_ref().unwrap(),
3483 self.data.as_ref(),
3484 source,
3485 destination,
3486 copy_size,
3487 );
3488 }
3489
3490 /// Copy data from one texture to another.
3491 ///
3492 /// # Panics
3493 ///
3494 /// - Textures are not the same type
3495 /// - If a depth texture, or a multisampled texture, the entire texture must be copied
3496 /// - Copy would overrun either texture
3497 pub fn copy_texture_to_texture(
3498 &mut self,
3499 source: ImageCopyTexture,
3500 destination: ImageCopyTexture,
3501 copy_size: Extent3d,
3502 ) {
3503 DynContext::command_encoder_copy_texture_to_texture(
3504 &*self.context,
3505 self.id.as_ref().unwrap(),
3506 self.data.as_ref(),
3507 source,
3508 destination,
3509 copy_size,
3510 );
3511 }
3512
3513 /// Clears texture to zero.
3514 ///
3515 /// Note that unlike with clear_buffer, `COPY_DST` usage is not required.
3516 ///
3517 /// # Implementation notes
3518 ///
3519 /// - implemented either via buffer copies and render/depth target clear, path depends on texture usages
3520 /// - behaves like texture zero init, but is performed immediately (clearing is *not* delayed via marking it as uninitialized)
3521 ///
3522 /// # Panics
3523 ///
3524 /// - `CLEAR_TEXTURE` extension not enabled
3525 /// - Range is out of bounds
3526 pub fn clear_texture(&mut self, texture: &Texture, subresource_range: &ImageSubresourceRange) {
3527 DynContext::command_encoder_clear_texture(
3528 &*self.context,
3529 self.id.as_ref().unwrap(),
3530 self.data.as_ref(),
3531 texture,
3532 subresource_range,
3533 );
3534 }
3535
3536 /// Clears buffer to zero.
3537 ///
3538 /// # Panics
3539 ///
3540 /// - Buffer does not have `COPY_DST` usage.
3541 /// - Range it out of bounds
3542 pub fn clear_buffer(
3543 &mut self,
3544 buffer: &Buffer,
3545 offset: BufferAddress,
3546 size: Option<BufferSize>,
3547 ) {
3548 DynContext::command_encoder_clear_buffer(
3549 &*self.context,
3550 self.id.as_ref().unwrap(),
3551 self.data.as_ref(),
3552 buffer,
3553 offset,
3554 size,
3555 );
3556 }
3557
3558 /// Inserts debug marker.
3559 pub fn insert_debug_marker(&mut self, label: &str) {
3560 let id = self.id.as_ref().unwrap();
3561 DynContext::command_encoder_insert_debug_marker(
3562 &*self.context,
3563 id,
3564 self.data.as_ref(),
3565 label,
3566 );
3567 }
3568
3569 /// Start record commands and group it into debug marker group.
3570 pub fn push_debug_group(&mut self, label: &str) {
3571 let id = self.id.as_ref().unwrap();
3572 DynContext::command_encoder_push_debug_group(&*self.context, id, self.data.as_ref(), label);
3573 }
3574
3575 /// Stops command recording and creates debug group.
3576 pub fn pop_debug_group(&mut self) {
3577 let id = self.id.as_ref().unwrap();
3578 DynContext::command_encoder_pop_debug_group(&*self.context, id, self.data.as_ref());
3579 }
3580}
3581
3582/// [`Features::TIMESTAMP_QUERY`] must be enabled on the device in order to call these functions.
3583impl CommandEncoder {
3584 /// Issue a timestamp command at this point in the queue.
3585 /// The timestamp will be written to the specified query set, at the specified index.
3586 ///
3587 /// Must be multiplied by [`Queue::get_timestamp_period`] to get
3588 /// the value in nanoseconds. Absolute values have no meaning,
3589 /// but timestamps can be subtracted to get the time it takes
3590 /// for a string of operations to complete.
3591 pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
3592 DynContext::command_encoder_write_timestamp(
3593 &*self.context,
3594 self.id.as_ref().unwrap(),
3595 self.data.as_mut(),
3596 &query_set.id,
3597 query_set.data.as_ref(),
3598 query_index,
3599 )
3600 }
3601}
3602
3603/// [`Features::TIMESTAMP_QUERY`] or [`Features::PIPELINE_STATISTICS_QUERY`] must be enabled on the device in order to call these functions.
3604impl CommandEncoder {
3605 /// Resolve a query set, writing the results into the supplied destination buffer.
3606 ///
3607 /// Queries may be between 8 and 40 bytes each. See [`PipelineStatisticsTypes`] for more information.
3608 pub fn resolve_query_set(
3609 &mut self,
3610 query_set: &QuerySet,
3611 query_range: Range<u32>,
3612 destination: &Buffer,
3613 destination_offset: BufferAddress,
3614 ) {
3615 DynContext::command_encoder_resolve_query_set(
3616 &*self.context,
3617 self.id.as_ref().unwrap(),
3618 self.data.as_ref(),
3619 &query_set.id,
3620 query_set.data.as_ref(),
3621 query_range.start,
3622 query_range.end - query_range.start,
3623 &destination.id,
3624 destination.data.as_ref(),
3625 destination_offset,
3626 )
3627 }
3628}
3629
3630impl<'a> RenderPass<'a> {
3631 /// Sets the active bind group for a given bind group index. The bind group layout
3632 /// in the active pipeline when any `draw_*()` method is called must match the layout of
3633 /// this bind group.
3634 ///
3635 /// If the bind group have dynamic offsets, provide them in binding order.
3636 /// These offsets have to be aligned to [`Limits::min_uniform_buffer_offset_alignment`]
3637 /// or [`Limits::min_storage_buffer_offset_alignment`] appropriately.
3638 ///
3639 /// Subsequent draw calls’ shader executions will be able to access data in these bind groups.
3640 pub fn set_bind_group(
3641 &mut self,
3642 index: u32,
3643 bind_group: &'a BindGroup,
3644 offsets: &[DynamicOffset],
3645 ) {
3646 DynContext::render_pass_set_bind_group(
3647 &*self.parent.context,
3648 &mut self.id,
3649 self.data.as_mut(),
3650 index,
3651 &bind_group.id,
3652 bind_group.data.as_ref(),
3653 offsets,
3654 )
3655 }
3656
3657 /// Sets the active render pipeline.
3658 ///
3659 /// Subsequent draw calls will exhibit the behavior defined by `pipeline`.
3660 pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
3661 DynContext::render_pass_set_pipeline(
3662 &*self.parent.context,
3663 &mut self.id,
3664 self.data.as_mut(),
3665 &pipeline.id,
3666 pipeline.data.as_ref(),
3667 )
3668 }
3669
3670 /// Sets the blend color as used by some of the blending modes.
3671 ///
3672 /// Subsequent blending tests will test against this value.
3673 /// If this method has not been called, the blend constant defaults to [`Color::TRANSPARENT`]
3674 /// (all components zero).
3675 pub fn set_blend_constant(&mut self, color: Color) {
3676 DynContext::render_pass_set_blend_constant(
3677 &*self.parent.context,
3678 &mut self.id,
3679 self.data.as_mut(),
3680 color,
3681 )
3682 }
3683
3684 /// Sets the active index buffer.
3685 ///
3686 /// Subsequent calls to [`draw_indexed`](RenderPass::draw_indexed) on this [`RenderPass`] will
3687 /// use `buffer` as the source index buffer.
3688 pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
3689 DynContext::render_pass_set_index_buffer(
3690 &*self.parent.context,
3691 &mut self.id,
3692 self.data.as_mut(),
3693 &buffer_slice.buffer.id,
3694 buffer_slice.buffer.data.as_ref(),
3695 index_format,
3696 buffer_slice.offset,
3697 buffer_slice.size,
3698 )
3699 }
3700
3701 /// Assign a vertex buffer to a slot.
3702 ///
3703 /// Subsequent calls to [`draw`] and [`draw_indexed`] on this
3704 /// [`RenderPass`] will use `buffer` as one of the source vertex buffers.
3705 ///
3706 /// The `slot` refers to the index of the matching descriptor in
3707 /// [`VertexState::buffers`].
3708 ///
3709 /// [`draw`]: RenderPass::draw
3710 /// [`draw_indexed`]: RenderPass::draw_indexed
3711 pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
3712 DynContext::render_pass_set_vertex_buffer(
3713 &*self.parent.context,
3714 &mut self.id,
3715 self.data.as_mut(),
3716 slot,
3717 &buffer_slice.buffer.id,
3718 buffer_slice.buffer.data.as_ref(),
3719 buffer_slice.offset,
3720 buffer_slice.size,
3721 )
3722 }
3723
3724 /// Sets the scissor rectangle used during the rasterization stage.
3725 /// After transformation into [viewport coordinates](https://www.w3.org/TR/webgpu/#viewport-coordinates).
3726 ///
3727 /// Subsequent draw calls will discard any fragments which fall outside the scissor rectangle.
3728 /// If this method has not been called, the scissor rectangle defaults to the entire bounds of
3729 /// the render targets.
3730 ///
3731 /// The function of the scissor rectangle resembles [`set_viewport()`](Self::set_viewport),
3732 /// but it does not affect the coordinate system, only which fragments are discarded.
3733 pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) {
3734 DynContext::render_pass_set_scissor_rect(
3735 &*self.parent.context,
3736 &mut self.id,
3737 self.data.as_mut(),
3738 x,
3739 y,
3740 width,
3741 height,
3742 );
3743 }
3744
3745 /// Sets the viewport used during the rasterization stage to linearly map
3746 /// from [normalized device coordinates](https://www.w3.org/TR/webgpu/#ndc) to [viewport coordinates](https://www.w3.org/TR/webgpu/#viewport-coordinates).
3747 ///
3748 /// Subsequent draw calls will only draw within this region.
3749 /// If this method has not been called, the viewport defaults to the entire bounds of the render
3750 /// targets.
3751 pub fn set_viewport(&mut self, x: f32, y: f32, w: f32, h: f32, min_depth: f32, max_depth: f32) {
3752 DynContext::render_pass_set_viewport(
3753 &*self.parent.context,
3754 &mut self.id,
3755 self.data.as_mut(),
3756 x,
3757 y,
3758 w,
3759 h,
3760 min_depth,
3761 max_depth,
3762 );
3763 }
3764
3765 /// Sets the stencil reference.
3766 ///
3767 /// Subsequent stencil tests will test against this value.
3768 /// If this method has not been called, the stencil reference value defaults to `0`.
3769 pub fn set_stencil_reference(&mut self, reference: u32) {
3770 DynContext::render_pass_set_stencil_reference(
3771 &*self.parent.context,
3772 &mut self.id,
3773 self.data.as_mut(),
3774 reference,
3775 );
3776 }
3777
3778 /// Draws primitives from the active vertex buffer(s).
3779 ///
3780 /// The active vertex buffer(s) can be set with [`RenderPass::set_vertex_buffer`].
3781 /// Does not use an Index Buffer. If you need this see [`RenderPass::draw_indexed`]
3782 ///
3783 /// Panics if vertices Range is outside of the range of the vertices range of any set vertex buffer.
3784 ///
3785 /// vertices: The range of vertices to draw.
3786 /// instances: Range of Instances to draw. Use 0..1 if instance buffers are not used.
3787 /// E.g.of how its used internally
3788 /// ```rust ignore
3789 /// for instance_id in instance_range {
3790 /// for vertex_id in vertex_range {
3791 /// let vertex = vertex[vertex_id];
3792 /// vertex_shader(vertex, vertex_id, instance_id);
3793 /// }
3794 /// }
3795 /// ```
3796 ///
3797 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
3798 /// It is not affected by changes to the state that are performed after it is called.
3799 pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
3800 DynContext::render_pass_draw(
3801 &*self.parent.context,
3802 &mut self.id,
3803 self.data.as_mut(),
3804 vertices,
3805 instances,
3806 )
3807 }
3808
3809 /// Inserts debug marker.
3810 pub fn insert_debug_marker(&mut self, label: &str) {
3811 DynContext::render_pass_insert_debug_marker(
3812 &*self.parent.context,
3813 &mut self.id,
3814 self.data.as_mut(),
3815 label,
3816 );
3817 }
3818
3819 /// Start record commands and group it into debug marker group.
3820 pub fn push_debug_group(&mut self, label: &str) {
3821 DynContext::render_pass_push_debug_group(
3822 &*self.parent.context,
3823 &mut self.id,
3824 self.data.as_mut(),
3825 label,
3826 );
3827 }
3828
3829 /// Stops command recording and creates debug group.
3830 pub fn pop_debug_group(&mut self) {
3831 DynContext::render_pass_pop_debug_group(
3832 &*self.parent.context,
3833 &mut self.id,
3834 self.data.as_mut(),
3835 );
3836 }
3837
3838 /// Draws indexed primitives using the active index buffer and the active vertex buffers.
3839 ///
3840 /// The active index buffer can be set with [`RenderPass::set_index_buffer`]
3841 /// The active vertex buffers can be set with [`RenderPass::set_vertex_buffer`].
3842 ///
3843 /// Panics if indices Range is outside of the range of the indices range of any set index buffer.
3844 ///
3845 /// indices: The range of indices to draw.
3846 /// base_vertex: value added to each index value before indexing into the vertex buffers.
3847 /// instances: Range of Instances to draw. Use 0..1 if instance buffers are not used.
3848 /// E.g.of how its used internally
3849 /// ```rust ignore
3850 /// for instance_id in instance_range {
3851 /// for index_index in index_range {
3852 /// let vertex_id = index_buffer[index_index];
3853 /// let adjusted_vertex_id = vertex_id + base_vertex;
3854 /// let vertex = vertex[adjusted_vertex_id];
3855 /// vertex_shader(vertex, adjusted_vertex_id, instance_id);
3856 /// }
3857 /// }
3858 /// ```
3859 ///
3860 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
3861 /// It is not affected by changes to the state that are performed after it is called.
3862 pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
3863 DynContext::render_pass_draw_indexed(
3864 &*self.parent.context,
3865 &mut self.id,
3866 self.data.as_mut(),
3867 indices,
3868 base_vertex,
3869 instances,
3870 );
3871 }
3872
3873 /// Draws primitives from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
3874 ///
3875 /// The active vertex buffers can be set with [`RenderPass::set_vertex_buffer`].
3876 ///
3877 /// The structure expected in `indirect_buffer` must conform to [`DrawIndirect`](crate::util::DrawIndirect).
3878 ///
3879 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
3880 /// It is not affected by changes to the state that are performed after it is called.
3881 pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
3882 DynContext::render_pass_draw_indirect(
3883 &*self.parent.context,
3884 &mut self.id,
3885 self.data.as_mut(),
3886 &indirect_buffer.id,
3887 indirect_buffer.data.as_ref(),
3888 indirect_offset,
3889 );
3890 }
3891
3892 /// Draws indexed primitives using the active index buffer and the active vertex buffers,
3893 /// based on the contents of the `indirect_buffer`.
3894 ///
3895 /// The active index buffer can be set with [`RenderPass::set_index_buffer`], while the active
3896 /// vertex buffers can be set with [`RenderPass::set_vertex_buffer`].
3897 ///
3898 /// The structure expected in `indirect_buffer` must conform to [`DrawIndexedIndirect`](crate::util::DrawIndexedIndirect).
3899 ///
3900 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
3901 /// It is not affected by changes to the state that are performed after it is called.
3902 pub fn draw_indexed_indirect(
3903 &mut self,
3904 indirect_buffer: &'a Buffer,
3905 indirect_offset: BufferAddress,
3906 ) {
3907 DynContext::render_pass_draw_indexed_indirect(
3908 &*self.parent.context,
3909 &mut self.id,
3910 self.data.as_mut(),
3911 &indirect_buffer.id,
3912 indirect_buffer.data.as_ref(),
3913 indirect_offset,
3914 );
3915 }
3916
3917 /// Execute a [render bundle][RenderBundle], which is a set of pre-recorded commands
3918 /// that can be run together.
3919 ///
3920 /// Commands in the bundle do not inherit this render pass's current render state, and after the
3921 /// bundle has executed, the state is **cleared** (reset to defaults, not the previous state).
3922 pub fn execute_bundles<I: IntoIterator<Item = &'a RenderBundle> + 'a>(
3923 &mut self,
3924 render_bundles: I,
3925 ) {
3926 DynContext::render_pass_execute_bundles(
3927 &*self.parent.context,
3928 &mut self.id,
3929 self.data.as_mut(),
3930 Box::new(
3931 render_bundles
3932 .into_iter()
3933 .map(|rb| (&rb.id, rb.data.as_ref())),
3934 ),
3935 )
3936 }
3937}
3938
3939/// [`Features::MULTI_DRAW_INDIRECT`] must be enabled on the device in order to call these functions.
3940impl<'a> RenderPass<'a> {
3941 /// Dispatches multiple draw calls from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
3942 /// `count` draw calls are issued.
3943 ///
3944 /// The active vertex buffers can be set with [`RenderPass::set_vertex_buffer`].
3945 ///
3946 /// The structure expected in `indirect_buffer` must conform to [`DrawIndirect`](crate::util::DrawIndirect).
3947 /// These draw structures are expected to be tightly packed.
3948 ///
3949 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
3950 /// It is not affected by changes to the state that are performed after it is called.
3951 pub fn multi_draw_indirect(
3952 &mut self,
3953 indirect_buffer: &'a Buffer,
3954 indirect_offset: BufferAddress,
3955 count: u32,
3956 ) {
3957 DynContext::render_pass_multi_draw_indirect(
3958 &*self.parent.context,
3959 &mut self.id,
3960 self.data.as_mut(),
3961 &indirect_buffer.id,
3962 indirect_buffer.data.as_ref(),
3963 indirect_offset,
3964 count,
3965 );
3966 }
3967
3968 /// Dispatches multiple draw calls from the active index buffer and the active vertex buffers,
3969 /// based on the contents of the `indirect_buffer`. `count` draw calls are issued.
3970 ///
3971 /// The active index buffer can be set with [`RenderPass::set_index_buffer`], while the active
3972 /// vertex buffers can be set with [`RenderPass::set_vertex_buffer`].
3973 ///
3974 /// The structure expected in `indirect_buffer` must conform to [`DrawIndexedIndirect`](crate::util::DrawIndexedIndirect).
3975 /// These draw structures are expected to be tightly packed.
3976 ///
3977 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
3978 /// It is not affected by changes to the state that are performed after it is called.
3979 pub fn multi_draw_indexed_indirect(
3980 &mut self,
3981 indirect_buffer: &'a Buffer,
3982 indirect_offset: BufferAddress,
3983 count: u32,
3984 ) {
3985 DynContext::render_pass_multi_draw_indexed_indirect(
3986 &*self.parent.context,
3987 &mut self.id,
3988 self.data.as_mut(),
3989 &indirect_buffer.id,
3990 indirect_buffer.data.as_ref(),
3991 indirect_offset,
3992 count,
3993 );
3994 }
3995}
3996
3997/// [`Features::MULTI_DRAW_INDIRECT_COUNT`] must be enabled on the device in order to call these functions.
3998impl<'a> RenderPass<'a> {
3999 /// Dispatches multiple draw calls from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
4000 /// The count buffer is read to determine how many draws to issue.
4001 ///
4002 /// The indirect buffer must be long enough to account for `max_count` draws, however only `count`
4003 /// draws will be read. If `count` is greater than `max_count`, `max_count` will be used.
4004 ///
4005 /// The active vertex buffers can be set with [`RenderPass::set_vertex_buffer`].
4006 ///
4007 /// The structure expected in `indirect_buffer` must conform to [`DrawIndirect`](crate::util::DrawIndirect).
4008 /// These draw structures are expected to be tightly packed.
4009 ///
4010 /// The structure expected in `count_buffer` is the following:
4011 ///
4012 /// ```rust
4013 /// #[repr(C)]
4014 /// struct DrawIndirectCount {
4015 /// count: u32, // Number of draw calls to issue.
4016 /// }
4017 /// ```
4018 ///
4019 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
4020 /// It is not affected by changes to the state that are performed after it is called.
4021 pub fn multi_draw_indirect_count(
4022 &mut self,
4023 indirect_buffer: &'a Buffer,
4024 indirect_offset: BufferAddress,
4025 count_buffer: &'a Buffer,
4026 count_offset: BufferAddress,
4027 max_count: u32,
4028 ) {
4029 DynContext::render_pass_multi_draw_indirect_count(
4030 &*self.parent.context,
4031 &mut self.id,
4032 self.data.as_mut(),
4033 &indirect_buffer.id,
4034 indirect_buffer.data.as_ref(),
4035 indirect_offset,
4036 &count_buffer.id,
4037 count_buffer.data.as_ref(),
4038 count_offset,
4039 max_count,
4040 );
4041 }
4042
4043 /// Dispatches multiple draw calls from the active index buffer and the active vertex buffers,
4044 /// based on the contents of the `indirect_buffer`. The count buffer is read to determine how many draws to issue.
4045 ///
4046 /// The indirect buffer must be long enough to account for `max_count` draws, however only `count`
4047 /// draws will be read. If `count` is greater than `max_count`, `max_count` will be used.
4048 ///
4049 /// The active index buffer can be set with [`RenderPass::set_index_buffer`], while the active
4050 /// vertex buffers can be set with [`RenderPass::set_vertex_buffer`].
4051 ///
4052 ///
4053 /// The structure expected in `indirect_buffer` must conform to [`DrawIndexedIndirect`](crate::util::DrawIndexedIndirect).
4054 ///
4055 /// These draw structures are expected to be tightly packed.
4056 ///
4057 /// The structure expected in `count_buffer` is the following:
4058 ///
4059 /// ```rust
4060 /// #[repr(C)]
4061 /// struct DrawIndexedIndirectCount {
4062 /// count: u32, // Number of draw calls to issue.
4063 /// }
4064 /// ```
4065 ///
4066 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
4067 /// It is not affected by changes to the state that are performed after it is called.
4068 pub fn multi_draw_indexed_indirect_count(
4069 &mut self,
4070 indirect_buffer: &'a Buffer,
4071 indirect_offset: BufferAddress,
4072 count_buffer: &'a Buffer,
4073 count_offset: BufferAddress,
4074 max_count: u32,
4075 ) {
4076 DynContext::render_pass_multi_draw_indexed_indirect_count(
4077 &*self.parent.context,
4078 &mut self.id,
4079 self.data.as_mut(),
4080 &indirect_buffer.id,
4081 indirect_buffer.data.as_ref(),
4082 indirect_offset,
4083 &count_buffer.id,
4084 count_buffer.data.as_ref(),
4085 count_offset,
4086 max_count,
4087 );
4088 }
4089}
4090
4091/// [`Features::PUSH_CONSTANTS`] must be enabled on the device in order to call these functions.
4092impl<'a> RenderPass<'a> {
4093 /// Set push constant data for subsequent draw calls.
4094 ///
4095 /// Write the bytes in `data` at offset `offset` within push constant
4096 /// storage, all of which are accessible by all the pipeline stages in
4097 /// `stages`, and no others. Both `offset` and the length of `data` must be
4098 /// multiples of [`PUSH_CONSTANT_ALIGNMENT`], which is always 4.
4099 ///
4100 /// For example, if `offset` is `4` and `data` is eight bytes long, this
4101 /// call will write `data` to bytes `4..12` of push constant storage.
4102 ///
4103 /// # Stage matching
4104 ///
4105 /// Every byte in the affected range of push constant storage must be
4106 /// accessible to exactly the same set of pipeline stages, which must match
4107 /// `stages`. If there are two bytes of storage that are accessible by
4108 /// different sets of pipeline stages - say, one is accessible by fragment
4109 /// shaders, and the other is accessible by both fragment shaders and vertex
4110 /// shaders - then no single `set_push_constants` call may affect both of
4111 /// them; to write both, you must make multiple calls, each with the
4112 /// appropriate `stages` value.
4113 ///
4114 /// Which pipeline stages may access a given byte is determined by the
4115 /// pipeline's [`PushConstant`] global variable and (if it is a struct) its
4116 /// members' offsets.
4117 ///
4118 /// For example, suppose you have twelve bytes of push constant storage,
4119 /// where bytes `0..8` are accessed by the vertex shader, and bytes `4..12`
4120 /// are accessed by the fragment shader. This means there are three byte
4121 /// ranges each accessed by a different set of stages:
4122 ///
4123 /// - Bytes `0..4` are accessed only by the fragment shader.
4124 ///
4125 /// - Bytes `4..8` are accessed by both the fragment shader and the vertex shader.
4126 ///
4127 /// - Bytes `8..12` are accessed only by the vertex shader.
4128 ///
4129 /// To write all twelve bytes requires three `set_push_constants` calls, one
4130 /// for each range, each passing the matching `stages` mask.
4131 ///
4132 /// [`PushConstant`]: https://docs.rs/naga/latest/naga/enum.StorageClass.html#variant.PushConstant
4133 pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) {
4134 DynContext::render_pass_set_push_constants(
4135 &*self.parent.context,
4136 &mut self.id,
4137 self.data.as_mut(),
4138 stages,
4139 offset,
4140 data,
4141 );
4142 }
4143}
4144
4145/// [`Features::TIMESTAMP_QUERY_INSIDE_PASSES`] must be enabled on the device in order to call these functions.
4146impl<'a> RenderPass<'a> {
4147 /// Issue a timestamp command at this point in the queue. The
4148 /// timestamp will be written to the specified query set, at the specified index.
4149 ///
4150 /// Must be multiplied by [`Queue::get_timestamp_period`] to get
4151 /// the value in nanoseconds. Absolute values have no meaning,
4152 /// but timestamps can be subtracted to get the time it takes
4153 /// for a string of operations to complete.
4154 pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
4155 DynContext::render_pass_write_timestamp(
4156 &*self.parent.context,
4157 &mut self.id,
4158 self.data.as_mut(),
4159 &query_set.id,
4160 query_set.data.as_ref(),
4161 query_index,
4162 )
4163 }
4164}
4165
4166impl<'a> RenderPass<'a> {
4167 /// Start a occlusion query on this render pass. It can be ended with
4168 /// `end_occlusion_query`. Occlusion queries may not be nested.
4169 pub fn begin_occlusion_query(&mut self, query_index: u32) {
4170 DynContext::render_pass_begin_occlusion_query(
4171 &*self.parent.context,
4172 &mut self.id,
4173 self.data.as_mut(),
4174 query_index,
4175 );
4176 }
4177
4178 /// End the occlusion query on this render pass. It can be started with
4179 /// `begin_occlusion_query`. Occlusion queries may not be nested.
4180 pub fn end_occlusion_query(&mut self) {
4181 DynContext::render_pass_end_occlusion_query(
4182 &*self.parent.context,
4183 &mut self.id,
4184 self.data.as_mut(),
4185 );
4186 }
4187}
4188
4189/// [`Features::PIPELINE_STATISTICS_QUERY`] must be enabled on the device in order to call these functions.
4190impl<'a> RenderPass<'a> {
4191 /// Start a pipeline statistics query on this render pass. It can be ended with
4192 /// `end_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
4193 pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
4194 DynContext::render_pass_begin_pipeline_statistics_query(
4195 &*self.parent.context,
4196 &mut self.id,
4197 self.data.as_mut(),
4198 &query_set.id,
4199 query_set.data.as_ref(),
4200 query_index,
4201 );
4202 }
4203
4204 /// End the pipeline statistics query on this render pass. It can be started with
4205 /// `begin_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
4206 pub fn end_pipeline_statistics_query(&mut self) {
4207 DynContext::render_pass_end_pipeline_statistics_query(
4208 &*self.parent.context,
4209 &mut self.id,
4210 self.data.as_mut(),
4211 );
4212 }
4213}
4214
4215impl<'a> Drop for RenderPass<'a> {
4216 fn drop(&mut self) {
4217 if !thread::panicking() {
4218 let parent_id = self.parent.id.as_ref().unwrap();
4219 self.parent.context.command_encoder_end_render_pass(
4220 parent_id,
4221 self.parent.data.as_ref(),
4222 &mut self.id,
4223 self.data.as_mut(),
4224 );
4225 }
4226 }
4227}
4228
4229impl<'a> ComputePass<'a> {
4230 /// Sets the active bind group for a given bind group index. The bind group layout
4231 /// in the active pipeline when the `dispatch()` function is called must match the layout of this bind group.
4232 ///
4233 /// If the bind group have dynamic offsets, provide them in the binding order.
4234 /// These offsets have to be aligned to [`Limits::min_uniform_buffer_offset_alignment`]
4235 /// or [`Limits::min_storage_buffer_offset_alignment`] appropriately.
4236 pub fn set_bind_group(
4237 &mut self,
4238 index: u32,
4239 bind_group: &'a BindGroup,
4240 offsets: &[DynamicOffset],
4241 ) {
4242 DynContext::compute_pass_set_bind_group(
4243 &*self.parent.context,
4244 &mut self.id,
4245 self.data.as_mut(),
4246 index,
4247 &bind_group.id,
4248 bind_group.data.as_ref(),
4249 offsets,
4250 );
4251 }
4252
4253 /// Sets the active compute pipeline.
4254 pub fn set_pipeline(&mut self, pipeline: &'a ComputePipeline) {
4255 DynContext::compute_pass_set_pipeline(
4256 &*self.parent.context,
4257 &mut self.id,
4258 self.data.as_mut(),
4259 &pipeline.id,
4260 pipeline.data.as_ref(),
4261 );
4262 }
4263
4264 /// Inserts debug marker.
4265 pub fn insert_debug_marker(&mut self, label: &str) {
4266 DynContext::compute_pass_insert_debug_marker(
4267 &*self.parent.context,
4268 &mut self.id,
4269 self.data.as_mut(),
4270 label,
4271 );
4272 }
4273
4274 /// Start record commands and group it into debug marker group.
4275 pub fn push_debug_group(&mut self, label: &str) {
4276 DynContext::compute_pass_push_debug_group(
4277 &*self.parent.context,
4278 &mut self.id,
4279 self.data.as_mut(),
4280 label,
4281 );
4282 }
4283
4284 /// Stops command recording and creates debug group.
4285 pub fn pop_debug_group(&mut self) {
4286 DynContext::compute_pass_pop_debug_group(
4287 &*self.parent.context,
4288 &mut self.id,
4289 self.data.as_mut(),
4290 );
4291 }
4292
4293 /// Dispatches compute work operations.
4294 ///
4295 /// `x`, `y` and `z` denote the number of work groups to dispatch in each dimension.
4296 pub fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) {
4297 DynContext::compute_pass_dispatch_workgroups(
4298 &*self.parent.context,
4299 &mut self.id,
4300 self.data.as_mut(),
4301 x,
4302 y,
4303 z,
4304 );
4305 }
4306
4307 /// Dispatches compute work operations, based on the contents of the `indirect_buffer`.
4308 ///
4309 /// The structure expected in `indirect_buffer` must conform to [`DispatchIndirect`](crate::util::DispatchIndirect).
4310 pub fn dispatch_workgroups_indirect(
4311 &mut self,
4312 indirect_buffer: &'a Buffer,
4313 indirect_offset: BufferAddress,
4314 ) {
4315 DynContext::compute_pass_dispatch_workgroups_indirect(
4316 &*self.parent.context,
4317 &mut self.id,
4318 self.data.as_mut(),
4319 &indirect_buffer.id,
4320 indirect_buffer.data.as_ref(),
4321 indirect_offset,
4322 );
4323 }
4324}
4325
4326/// [`Features::PUSH_CONSTANTS`] must be enabled on the device in order to call these functions.
4327impl<'a> ComputePass<'a> {
4328 /// Set push constant data for subsequent dispatch calls.
4329 ///
4330 /// Write the bytes in `data` at offset `offset` within push constant
4331 /// storage. Both `offset` and the length of `data` must be
4332 /// multiples of [`PUSH_CONSTANT_ALIGNMENT`], which is always 4.
4333 ///
4334 /// For example, if `offset` is `4` and `data` is eight bytes long, this
4335 /// call will write `data` to bytes `4..12` of push constant storage.
4336 pub fn set_push_constants(&mut self, offset: u32, data: &[u8]) {
4337 DynContext::compute_pass_set_push_constants(
4338 &*self.parent.context,
4339 &mut self.id,
4340 self.data.as_mut(),
4341 offset,
4342 data,
4343 );
4344 }
4345}
4346
4347/// [`Features::TIMESTAMP_QUERY_INSIDE_PASSES`] must be enabled on the device in order to call these functions.
4348impl<'a> ComputePass<'a> {
4349 /// Issue a timestamp command at this point in the queue. The timestamp will be written to the specified query set, at the specified index.
4350 ///
4351 /// Must be multiplied by [`Queue::get_timestamp_period`] to get
4352 /// the value in nanoseconds. Absolute values have no meaning,
4353 /// but timestamps can be subtracted to get the time it takes
4354 /// for a string of operations to complete.
4355 pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
4356 DynContext::compute_pass_write_timestamp(
4357 &*self.parent.context,
4358 &mut self.id,
4359 self.data.as_mut(),
4360 &query_set.id,
4361 query_set.data.as_ref(),
4362 query_index,
4363 )
4364 }
4365}
4366
4367/// [`Features::PIPELINE_STATISTICS_QUERY`] must be enabled on the device in order to call these functions.
4368impl<'a> ComputePass<'a> {
4369 /// Start a pipeline statistics query on this render pass. It can be ended with
4370 /// `end_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
4371 pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
4372 DynContext::compute_pass_begin_pipeline_statistics_query(
4373 &*self.parent.context,
4374 &mut self.id,
4375 self.data.as_mut(),
4376 &query_set.id,
4377 query_set.data.as_ref(),
4378 query_index,
4379 );
4380 }
4381
4382 /// End the pipeline statistics query on this render pass. It can be started with
4383 /// `begin_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
4384 pub fn end_pipeline_statistics_query(&mut self) {
4385 DynContext::compute_pass_end_pipeline_statistics_query(
4386 &*self.parent.context,
4387 &mut self.id,
4388 self.data.as_mut(),
4389 );
4390 }
4391}
4392
4393impl<'a> Drop for ComputePass<'a> {
4394 fn drop(&mut self) {
4395 if !thread::panicking() {
4396 let parent_id = self.parent.id.as_ref().unwrap();
4397 self.parent.context.command_encoder_end_compute_pass(
4398 parent_id,
4399 self.parent.data.as_ref(),
4400 &mut self.id,
4401 self.data.as_mut(),
4402 );
4403 }
4404 }
4405}
4406
4407impl<'a> RenderBundleEncoder<'a> {
4408 /// Finishes recording and returns a [`RenderBundle`] that can be executed in other render passes.
4409 pub fn finish(self, desc: &RenderBundleDescriptor) -> RenderBundle {
4410 let (id, data) =
4411 DynContext::render_bundle_encoder_finish(&*self.context, self.id, self.data, desc);
4412 RenderBundle {
4413 context: Arc::clone(&self.context),
4414 id,
4415 data,
4416 }
4417 }
4418
4419 /// Sets the active bind group for a given bind group index. The bind group layout
4420 /// in the active pipeline when any `draw()` function is called must match the layout of this bind group.
4421 ///
4422 /// If the bind group have dynamic offsets, provide them in the binding order.
4423 pub fn set_bind_group(
4424 &mut self,
4425 index: u32,
4426 bind_group: &'a BindGroup,
4427 offsets: &[DynamicOffset],
4428 ) {
4429 DynContext::render_bundle_encoder_set_bind_group(
4430 &*self.parent.context,
4431 &mut self.id,
4432 self.data.as_mut(),
4433 index,
4434 &bind_group.id,
4435 bind_group.data.as_ref(),
4436 offsets,
4437 )
4438 }
4439
4440 /// Sets the active render pipeline.
4441 ///
4442 /// Subsequent draw calls will exhibit the behavior defined by `pipeline`.
4443 pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
4444 DynContext::render_bundle_encoder_set_pipeline(
4445 &*self.parent.context,
4446 &mut self.id,
4447 self.data.as_mut(),
4448 &pipeline.id,
4449 pipeline.data.as_ref(),
4450 )
4451 }
4452
4453 /// Sets the active index buffer.
4454 ///
4455 /// Subsequent calls to [`draw_indexed`](RenderBundleEncoder::draw_indexed) on this [`RenderBundleEncoder`] will
4456 /// use `buffer` as the source index buffer.
4457 pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
4458 DynContext::render_bundle_encoder_set_index_buffer(
4459 &*self.parent.context,
4460 &mut self.id,
4461 self.data.as_mut(),
4462 &buffer_slice.buffer.id,
4463 buffer_slice.buffer.data.as_ref(),
4464 index_format,
4465 buffer_slice.offset,
4466 buffer_slice.size,
4467 )
4468 }
4469
4470 /// Assign a vertex buffer to a slot.
4471 ///
4472 /// Subsequent calls to [`draw`] and [`draw_indexed`] on this
4473 /// [`RenderBundleEncoder`] will use `buffer` as one of the source vertex buffers.
4474 ///
4475 /// The `slot` refers to the index of the matching descriptor in
4476 /// [`VertexState::buffers`].
4477 ///
4478 /// [`draw`]: RenderBundleEncoder::draw
4479 /// [`draw_indexed`]: RenderBundleEncoder::draw_indexed
4480 pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
4481 DynContext::render_bundle_encoder_set_vertex_buffer(
4482 &*self.parent.context,
4483 &mut self.id,
4484 self.data.as_mut(),
4485 slot,
4486 &buffer_slice.buffer.id,
4487 buffer_slice.buffer.data.as_ref(),
4488 buffer_slice.offset,
4489 buffer_slice.size,
4490 )
4491 }
4492
4493 /// Draws primitives from the active vertex buffer(s).
4494 ///
4495 /// The active vertex buffers can be set with [`RenderBundleEncoder::set_vertex_buffer`].
4496 /// Does not use an Index Buffer. If you need this see [`RenderBundleEncoder::draw_indexed`]
4497 ///
4498 /// Panics if vertices Range is outside of the range of the vertices range of any set vertex buffer.
4499 ///
4500 /// vertices: The range of vertices to draw.
4501 /// instances: Range of Instances to draw. Use 0..1 if instance buffers are not used.
4502 /// E.g.of how its used internally
4503 /// ```rust ignore
4504 /// for instance_id in instance_range {
4505 /// for vertex_id in vertex_range {
4506 /// let vertex = vertex[vertex_id];
4507 /// vertex_shader(vertex, vertex_id, instance_id);
4508 /// }
4509 /// }
4510 /// ```
4511 pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
4512 DynContext::render_bundle_encoder_draw(
4513 &*self.parent.context,
4514 &mut self.id,
4515 self.data.as_mut(),
4516 vertices,
4517 instances,
4518 )
4519 }
4520
4521 /// Draws indexed primitives using the active index buffer and the active vertex buffer(s).
4522 ///
4523 /// The active index buffer can be set with [`RenderBundleEncoder::set_index_buffer`].
4524 /// The active vertex buffer(s) can be set with [`RenderBundleEncoder::set_vertex_buffer`].
4525 ///
4526 /// Panics if indices Range is outside of the range of the indices range of any set index buffer.
4527 ///
4528 /// indices: The range of indices to draw.
4529 /// base_vertex: value added to each index value before indexing into the vertex buffers.
4530 /// instances: Range of Instances to draw. Use 0..1 if instance buffers are not used.
4531 /// E.g.of how its used internally
4532 /// ```rust ignore
4533 /// for instance_id in instance_range {
4534 /// for index_index in index_range {
4535 /// let vertex_id = index_buffer[index_index];
4536 /// let adjusted_vertex_id = vertex_id + base_vertex;
4537 /// let vertex = vertex[adjusted_vertex_id];
4538 /// vertex_shader(vertex, adjusted_vertex_id, instance_id);
4539 /// }
4540 /// }
4541 /// ```
4542 pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
4543 DynContext::render_bundle_encoder_draw_indexed(
4544 &*self.parent.context,
4545 &mut self.id,
4546 self.data.as_mut(),
4547 indices,
4548 base_vertex,
4549 instances,
4550 );
4551 }
4552
4553 /// Draws primitives from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
4554 ///
4555 /// The active vertex buffers can be set with [`RenderBundleEncoder::set_vertex_buffer`].
4556 ///
4557 /// The structure expected in `indirect_buffer` must conform to [`DrawIndirect`](crate::util::DrawIndirect).
4558 pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
4559 DynContext::render_bundle_encoder_draw_indirect(
4560 &*self.parent.context,
4561 &mut self.id,
4562 self.data.as_mut(),
4563 &indirect_buffer.id,
4564 indirect_buffer.data.as_ref(),
4565 indirect_offset,
4566 );
4567 }
4568
4569 /// Draws indexed primitives using the active index buffer and the active vertex buffers,
4570 /// based on the contents of the `indirect_buffer`.
4571 ///
4572 /// The active index buffer can be set with [`RenderBundleEncoder::set_index_buffer`], while the active
4573 /// vertex buffers can be set with [`RenderBundleEncoder::set_vertex_buffer`].
4574 ///
4575 /// The structure expected in `indirect_buffer` must conform to [`DrawIndexedIndirect`](crate::util::DrawIndexedIndirect).
4576 pub fn draw_indexed_indirect(
4577 &mut self,
4578 indirect_buffer: &'a Buffer,
4579 indirect_offset: BufferAddress,
4580 ) {
4581 DynContext::render_bundle_encoder_draw_indexed_indirect(
4582 &*self.parent.context,
4583 &mut self.id,
4584 self.data.as_mut(),
4585 &indirect_buffer.id,
4586 indirect_buffer.data.as_ref(),
4587 indirect_offset,
4588 );
4589 }
4590}
4591
4592/// [`Features::PUSH_CONSTANTS`] must be enabled on the device in order to call these functions.
4593impl<'a> RenderBundleEncoder<'a> {
4594 /// Set push constant data.
4595 ///
4596 /// Offset is measured in bytes, but must be a multiple of [`PUSH_CONSTANT_ALIGNMENT`].
4597 ///
4598 /// Data size must be a multiple of 4 and must have an alignment of 4.
4599 /// For example, with an offset of 4 and an array of `[u8; 8]`, that will write to the range
4600 /// of 4..12.
4601 ///
4602 /// For each byte in the range of push constant data written, the union of the stages of all push constant
4603 /// ranges that covers that byte must be exactly `stages`. There's no good way of explaining this simply,
4604 /// so here are some examples:
4605 ///
4606 /// ```text
4607 /// For the given ranges:
4608 /// - 0..4 Vertex
4609 /// - 4..8 Fragment
4610 /// ```
4611 ///
4612 /// You would need to upload this in two set_push_constants calls. First for the `Vertex` range, second for the `Fragment` range.
4613 ///
4614 /// ```text
4615 /// For the given ranges:
4616 /// - 0..8 Vertex
4617 /// - 4..12 Fragment
4618 /// ```
4619 ///
4620 /// You would need to upload this in three set_push_constants calls. First for the `Vertex` only range 0..4, second
4621 /// for the `Vertex | Fragment` range 4..8, third for the `Fragment` range 8..12.
4622 pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) {
4623 DynContext::render_bundle_encoder_set_push_constants(
4624 &*self.parent.context,
4625 &mut self.id,
4626 self.data.as_mut(),
4627 stages,
4628 offset,
4629 data,
4630 );
4631 }
4632}
4633
4634/// A read-only view into a staging buffer.
4635///
4636/// Reading into this buffer won't yield the contents of the buffer from the
4637/// GPU and is likely to be slow. Because of this, although [`AsMut`] is
4638/// implemented for this type, [`AsRef`] is not.
4639pub struct QueueWriteBufferView<'a> {
4640 queue: &'a Queue,
4641 buffer: &'a Buffer,
4642 offset: BufferAddress,
4643 inner: Box<dyn context::QueueWriteBuffer>,
4644}
4645#[cfg(any(
4646 not(target_arch = "wasm32"),
4647 all(
4648 feature = "fragile-send-sync-non-atomic-wasm",
4649 not(target_feature = "atomics")
4650 )
4651))]
4652static_assertions::assert_impl_all!(QueueWriteBufferView: Send, Sync);
4653
4654impl Deref for QueueWriteBufferView<'_> {
4655 type Target = [u8];
4656
4657 fn deref(&self) -> &Self::Target {
4658 log::warn!("Reading from a QueueWriteBufferView won't yield the contents of the buffer and may be slow.");
4659 self.inner.slice()
4660 }
4661}
4662
4663impl DerefMut for QueueWriteBufferView<'_> {
4664 fn deref_mut(&mut self) -> &mut Self::Target {
4665 self.inner.slice_mut()
4666 }
4667}
4668
4669impl<'a> AsMut<[u8]> for QueueWriteBufferView<'a> {
4670 fn as_mut(&mut self) -> &mut [u8] {
4671 self.inner.slice_mut()
4672 }
4673}
4674
4675impl<'a> Drop for QueueWriteBufferView<'a> {
4676 fn drop(&mut self) {
4677 DynContext::queue_write_staging_buffer(
4678 &*self.queue.context,
4679 &self.queue.id,
4680 self.queue.data.as_ref(),
4681 &self.buffer.id,
4682 self.buffer.data.as_ref(),
4683 self.offset,
4684 &*self.inner,
4685 );
4686 }
4687}
4688
4689impl Queue {
4690 /// Schedule a data write into `buffer` starting at `offset`.
4691 ///
4692 /// This method is intended to have low performance costs.
4693 /// As such, the write is not immediately submitted, and instead enqueued
4694 /// internally to happen at the start of the next `submit()` call.
4695 ///
4696 /// This method fails if `data` overruns the size of `buffer` starting at `offset`.
4697 pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) {
4698 DynContext::queue_write_buffer(
4699 &*self.context,
4700 &self.id,
4701 self.data.as_ref(),
4702 &buffer.id,
4703 buffer.data.as_ref(),
4704 offset,
4705 data,
4706 )
4707 }
4708
4709 /// Schedule a data write into `buffer` starting at `offset` via the returned
4710 /// [`QueueWriteBufferView`].
4711 ///
4712 /// Reading from this buffer is slow and will not yield the actual contents of the buffer.
4713 ///
4714 /// This method is intended to have low performance costs.
4715 /// As such, the write is not immediately submitted, and instead enqueued
4716 /// internally to happen at the start of the next `submit()` call.
4717 ///
4718 /// This method fails if `size` is greater than the size of `buffer` starting at `offset`.
4719 #[must_use]
4720 pub fn write_buffer_with<'a>(
4721 &'a self,
4722 buffer: &'a Buffer,
4723 offset: BufferAddress,
4724 size: BufferSize,
4725 ) -> Option<QueueWriteBufferView<'a>> {
4726 profiling::scope!("Queue::write_buffer_with");
4727 DynContext::queue_validate_write_buffer(
4728 &*self.context,
4729 &self.id,
4730 self.data.as_ref(),
4731 &buffer.id,
4732 buffer.data.as_ref(),
4733 offset,
4734 size,
4735 )?;
4736 let staging_buffer = DynContext::queue_create_staging_buffer(
4737 &*self.context,
4738 &self.id,
4739 self.data.as_ref(),
4740 size,
4741 )?;
4742 Some(QueueWriteBufferView {
4743 queue: self,
4744 buffer,
4745 offset,
4746 inner: staging_buffer,
4747 })
4748 }
4749
4750 /// Schedule a write of some data into a texture.
4751 ///
4752 /// * `data` contains the texels to be written, which must be in
4753 /// [the same format as the texture](TextureFormat).
4754 /// * `data_layout` describes the memory layout of `data`, which does not necessarily
4755 /// have to have tightly packed rows.
4756 /// * `texture` specifies the texture to write into, and the location within the
4757 /// texture (coordinate offset, mip level) that will be overwritten.
4758 /// * `size` is the size, in texels, of the region to be written.
4759 ///
4760 /// This method is intended to have low performance costs.
4761 /// As such, the write is not immediately submitted, and instead enqueued
4762 /// internally to happen at the start of the next `submit()` call.
4763 /// However, `data` will be immediately copied into staging memory; so the caller may
4764 /// discard it any time after this call completes.
4765 ///
4766 /// This method fails if `size` overruns the size of `texture`, or if `data` is too short.
4767 pub fn write_texture(
4768 &self,
4769 texture: ImageCopyTexture,
4770 data: &[u8],
4771 data_layout: ImageDataLayout,
4772 size: Extent3d,
4773 ) {
4774 DynContext::queue_write_texture(
4775 &*self.context,
4776 &self.id,
4777 self.data.as_ref(),
4778 texture,
4779 data,
4780 data_layout,
4781 size,
4782 )
4783 }
4784
4785 /// Schedule a copy of data from `image` into `texture`.
4786 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
4787 pub fn copy_external_image_to_texture(
4788 &self,
4789 source: &wgt::ImageCopyExternalImage,
4790 dest: ImageCopyTextureTagged,
4791 size: Extent3d,
4792 ) {
4793 DynContext::queue_copy_external_image_to_texture(
4794 &*self.context,
4795 &self.id,
4796 self.data.as_ref(),
4797 source,
4798 dest,
4799 size,
4800 )
4801 }
4802
4803 /// Submits a series of finished command buffers for execution.
4804 pub fn submit<I: IntoIterator<Item = CommandBuffer>>(
4805 &self,
4806 command_buffers: I,
4807 ) -> SubmissionIndex {
4808 let (raw, data) = DynContext::queue_submit(
4809 &*self.context,
4810 &self.id,
4811 self.data.as_ref(),
4812 Box::new(
4813 command_buffers
4814 .into_iter()
4815 .map(|mut comb| (comb.id.take().unwrap(), comb.data.take().unwrap())),
4816 ),
4817 );
4818
4819 SubmissionIndex(raw, data)
4820 }
4821
4822 /// Gets the amount of nanoseconds each tick of a timestamp query represents.
4823 ///
4824 /// Returns zero if timestamp queries are unsupported.
4825 ///
4826 /// Timestamp values are represented in nanosecond values on WebGPU, see `<https://gpuweb.github.io/gpuweb/#timestamp>`
4827 /// Therefore, this is always 1.0 on the web, but on wgpu-core a manual conversion is required.
4828 pub fn get_timestamp_period(&self) -> f32 {
4829 DynContext::queue_get_timestamp_period(&*self.context, &self.id, self.data.as_ref())
4830 }
4831
4832 /// Registers a callback when the previous call to submit finishes running on the gpu. This callback
4833 /// being called implies that all mapped buffer callbacks which were registered before this call will
4834 /// have been called.
4835 ///
4836 /// For the callback to complete, either `queue.submit(..)`, `instance.poll_all(..)`, or `device.poll(..)`
4837 /// must be called elsewhere in the runtime, possibly integrated into an event loop or run on a separate thread.
4838 ///
4839 /// The callback will be called on the thread that first calls the above functions after the gpu work
4840 /// has completed. There are no restrictions on the code you can run in the callback, however on native the
4841 /// call to the function will not complete until the callback returns, so prefer keeping callbacks short
4842 /// and used to set flags, send messages, etc.
4843 pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) {
4844 DynContext::queue_on_submitted_work_done(
4845 &*self.context,
4846 &self.id,
4847 self.data.as_ref(),
4848 Box::new(callback),
4849 )
4850 }
4851}
4852
4853impl SurfaceTexture {
4854 /// Schedule this texture to be presented on the owning surface.
4855 ///
4856 /// Needs to be called after any work on the texture is scheduled via [`Queue::submit`].
4857 pub fn present(mut self) {
4858 self.presented = true;
4859 DynContext::surface_present(
4860 &*self.texture.context,
4861 &self.texture.id,
4862 // This call to as_ref is essential because we want the DynContext implementation to see the inner
4863 // value of the Box (T::SurfaceOutputDetail), not the Box itself.
4864 self.detail.as_ref(),
4865 );
4866 }
4867}
4868
4869impl Drop for SurfaceTexture {
4870 fn drop(&mut self) {
4871 if !self.presented && !thread::panicking() {
4872 DynContext::surface_texture_discard(
4873 &*self.texture.context,
4874 &self.texture.id,
4875 // This call to as_ref is essential because we want the DynContext implementation to see the inner
4876 // value of the Box (T::SurfaceOutputDetail), not the Box itself.
4877 self.detail.as_ref(),
4878 );
4879 }
4880 }
4881}
4882
4883impl Surface {
4884 /// Returns the capabilities of the surface when used with the given adapter.
4885 ///
4886 /// Returns specified values (see [`SurfaceCapabilities`]) if surface is incompatible with the adapter.
4887 pub fn get_capabilities(&self, adapter: &Adapter) -> SurfaceCapabilities {
4888 DynContext::surface_get_capabilities(
4889 &*self.context,
4890 &self.id,
4891 self.data.as_ref(),
4892 &adapter.id,
4893 adapter.data.as_ref(),
4894 )
4895 }
4896
4897 /// Return a default `SurfaceConfiguration` from width and height to use for the [`Surface`] with this adapter.
4898 ///
4899 /// Returns None if the surface isn't supported by this adapter
4900 pub fn get_default_config(
4901 &self,
4902 adapter: &Adapter,
4903 width: u32,
4904 height: u32,
4905 ) -> Option<SurfaceConfiguration> {
4906 let caps = self.get_capabilities(adapter);
4907 Some(SurfaceConfiguration {
4908 usage: wgt::TextureUsages::RENDER_ATTACHMENT,
4909 format: *caps.formats.get(0)?,
4910 width,
4911 height,
4912 present_mode: *caps.present_modes.get(0)?,
4913 alpha_mode: wgt::CompositeAlphaMode::Auto,
4914 view_formats: vec![],
4915 })
4916 }
4917
4918 /// Initializes [`Surface`] for presentation.
4919 ///
4920 /// # Panics
4921 ///
4922 /// - A old [`SurfaceTexture`] is still alive referencing an old surface.
4923 /// - Texture format requested is unsupported on the surface.
4924 pub fn configure(&self, device: &Device, config: &SurfaceConfiguration) {
4925 DynContext::surface_configure(
4926 &*self.context,
4927 &self.id,
4928 self.data.as_ref(),
4929 &device.id,
4930 device.data.as_ref(),
4931 config,
4932 );
4933
4934 let mut conf = self.config.lock();
4935 *conf = Some(config.clone());
4936 }
4937
4938 /// Returns the next texture to be presented by the swapchain for drawing.
4939 ///
4940 /// In order to present the [`SurfaceTexture`] returned by this method,
4941 /// first a [`Queue::submit`] needs to be done with some work rendering to this texture.
4942 /// Then [`SurfaceTexture::present`] needs to be called.
4943 ///
4944 /// If a SurfaceTexture referencing this surface is alive when the swapchain is recreated,
4945 /// recreating the swapchain will panic.
4946 pub fn get_current_texture(&self) -> Result<SurfaceTexture, SurfaceError> {
4947 let (texture_id, texture_data, status, detail) =
4948 DynContext::surface_get_current_texture(&*self.context, &self.id, self.data.as_ref());
4949
4950 let suboptimal = match status {
4951 SurfaceStatus::Good => false,
4952 SurfaceStatus::Suboptimal => true,
4953 SurfaceStatus::Timeout => return Err(SurfaceError::Timeout),
4954 SurfaceStatus::Outdated => return Err(SurfaceError::Outdated),
4955 SurfaceStatus::Lost => return Err(SurfaceError::Lost),
4956 };
4957
4958 let guard = self.config.lock();
4959 let config = guard
4960 .as_ref()
4961 .expect("This surface has not been configured yet.");
4962
4963 let descriptor = TextureDescriptor {
4964 label: None,
4965 size: Extent3d {
4966 width: config.width,
4967 height: config.height,
4968 depth_or_array_layers: 1,
4969 },
4970 format: config.format,
4971 usage: config.usage,
4972 mip_level_count: 1,
4973 sample_count: 1,
4974 dimension: TextureDimension::D2,
4975 view_formats: &[],
4976 };
4977
4978 texture_id
4979 .zip(texture_data)
4980 .map(|(id, data)| SurfaceTexture {
4981 texture: Texture {
4982 context: Arc::clone(&self.context),
4983 id,
4984 data,
4985 owned: false,
4986 descriptor,
4987 },
4988 suboptimal,
4989 presented: false,
4990 detail,
4991 })
4992 .ok_or(SurfaceError::Lost)
4993 }
4994
4995 /// Returns the inner hal Surface using a callback. The hal surface will be `None` if the
4996 /// backend type argument does not match with this wgpu Surface
4997 ///
4998 /// # Safety
4999 ///
5000 /// - The raw handle obtained from the hal Surface must not be manually destroyed
5001 #[cfg(any(
5002 not(target_arch = "wasm32"),
5003 target_os = "emscripten",
5004 feature = "webgl"
5005 ))]
5006 pub unsafe fn as_hal_mut<
5007 A: wgc::hal_api::HalApi,
5008 F: FnOnce(Option<&mut A::Surface>) -> R,
5009 R,
5010 >(
5011 &mut self,
5012 hal_surface_callback: F,
5013 ) -> R {
5014 unsafe {
5015 self.context
5016 .as_any()
5017 .downcast_ref::<crate::backend::Context>()
5018 .unwrap()
5019 .surface_as_hal_mut::<A, F, R>(
5020 self.data.downcast_ref().unwrap(),
5021 hal_surface_callback,
5022 )
5023 }
5024 }
5025}
5026
5027/// Opaque globally-unique identifier
5028#[cfg(feature = "expose-ids")]
5029#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5030#[repr(transparent)]
5031pub struct Id<T>(::core::num::NonZeroU64, std::marker::PhantomData<*mut T>);
5032
5033// SAFETY: `Id` is a bare `NonZeroU64`, the type parameter is a marker purely to avoid confusing Ids
5034// returned for different types , so `Id` can safely implement Send and Sync.
5035#[cfg(feature = "expose-ids")]
5036unsafe impl<T> Send for Id<T> {}
5037
5038// SAFETY: See the implementation for `Send`.
5039#[cfg(feature = "expose-ids")]
5040unsafe impl<T> Sync for Id<T> {}
5041
5042#[cfg(feature = "expose-ids")]
5043impl<T> Clone for Id<T> {
5044 fn clone(&self) -> Self {
5045 *self
5046 }
5047}
5048
5049#[cfg(feature = "expose-ids")]
5050impl<T> Copy for Id<T> {}
5051
5052#[cfg(feature = "expose-ids")]
5053impl<T> fmt::Debug for Id<T> {
5054 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
5055 f.debug_tuple("Id").field(&self.0).finish()
5056 }
5057}
5058
5059#[cfg(feature = "expose-ids")]
5060impl<T> PartialEq for Id<T> {
5061 fn eq(&self, other: &Id<T>) -> bool {
5062 self.0 == other.0
5063 }
5064}
5065
5066#[cfg(feature = "expose-ids")]
5067impl<T> Eq for Id<T> {}
5068
5069#[cfg(feature = "expose-ids")]
5070impl<T> std::hash::Hash for Id<T> {
5071 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
5072 self.0.hash(state)
5073 }
5074}
5075
5076#[cfg(feature = "expose-ids")]
5077impl Adapter {
5078 /// Returns a globally-unique identifier for this `Adapter`.
5079 ///
5080 /// Calling this method multiple times on the same object will always return the same value.
5081 /// The returned value is guaranteed to be unique among all `Adapter`s created from the same
5082 /// `Instance`.
5083 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5084 pub fn global_id(&self) -> Id<Adapter> {
5085 Id(self.id.global_id(), std::marker::PhantomData)
5086 }
5087}
5088
5089#[cfg(feature = "expose-ids")]
5090impl Device {
5091 /// Returns a globally-unique identifier for this `Device`.
5092 ///
5093 /// Calling this method multiple times on the same object will always return the same value.
5094 /// The returned value is guaranteed to be unique among all `Device`s created from the same
5095 /// `Instance`.
5096 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5097 pub fn global_id(&self) -> Id<Device> {
5098 Id(self.id.global_id(), std::marker::PhantomData)
5099 }
5100}
5101
5102#[cfg(feature = "expose-ids")]
5103impl Queue {
5104 /// Returns a globally-unique identifier for this `Queue`.
5105 ///
5106 /// Calling this method multiple times on the same object will always return the same value.
5107 /// The returned value is guaranteed to be unique among all `Queue`s created from the same
5108 /// `Instance`.
5109 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5110 pub fn global_id(&self) -> Id<Queue> {
5111 Id(self.id.global_id(), std::marker::PhantomData)
5112 }
5113}
5114
5115#[cfg(feature = "expose-ids")]
5116impl ShaderModule {
5117 /// Returns a globally-unique identifier for this `ShaderModule`.
5118 ///
5119 /// Calling this method multiple times on the same object will always return the same value.
5120 /// The returned value is guaranteed to be unique among all `ShaderModule`s created from the same
5121 /// `Instance`.
5122 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5123 pub fn global_id(&self) -> Id<ShaderModule> {
5124 Id(self.id.global_id(), std::marker::PhantomData)
5125 }
5126}
5127
5128#[cfg(feature = "expose-ids")]
5129impl BindGroupLayout {
5130 /// Returns a globally-unique identifier for this `BindGroupLayout`.
5131 ///
5132 /// Calling this method multiple times on the same object will always return the same value.
5133 /// The returned value is guaranteed to be unique among all `BindGroupLayout`s created from the same
5134 /// `Instance`.
5135 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5136 pub fn global_id(&self) -> Id<BindGroupLayout> {
5137 Id(self.id.global_id(), std::marker::PhantomData)
5138 }
5139}
5140
5141#[cfg(feature = "expose-ids")]
5142impl BindGroup {
5143 /// Returns a globally-unique identifier for this `BindGroup`.
5144 ///
5145 /// Calling this method multiple times on the same object will always return the same value.
5146 /// The returned value is guaranteed to be unique among all `BindGroup`s created from the same
5147 /// `Instance`.
5148 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5149 pub fn global_id(&self) -> Id<BindGroup> {
5150 Id(self.id.global_id(), std::marker::PhantomData)
5151 }
5152}
5153
5154#[cfg(feature = "expose-ids")]
5155impl TextureView {
5156 /// Returns a globally-unique identifier for this `TextureView`.
5157 ///
5158 /// Calling this method multiple times on the same object will always return the same value.
5159 /// The returned value is guaranteed to be unique among all `TextureView`s created from the same
5160 /// `Instance`.
5161 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5162 pub fn global_id(&self) -> Id<TextureView> {
5163 Id(self.id.global_id(), std::marker::PhantomData)
5164 }
5165}
5166
5167#[cfg(feature = "expose-ids")]
5168impl Sampler {
5169 /// Returns a globally-unique identifier for this `Sampler`.
5170 ///
5171 /// Calling this method multiple times on the same object will always return the same value.
5172 /// The returned value is guaranteed to be unique among all `Sampler`s created from the same
5173 /// `Instance`.
5174 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5175 pub fn global_id(&self) -> Id<Sampler> {
5176 Id(self.id.global_id(), std::marker::PhantomData)
5177 }
5178}
5179
5180#[cfg(feature = "expose-ids")]
5181impl Buffer {
5182 /// Returns a globally-unique identifier for this `Buffer`.
5183 ///
5184 /// Calling this method multiple times on the same object will always return the same value.
5185 /// The returned value is guaranteed to be unique among all `Buffer`s created from the same
5186 /// `Instance`.
5187 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5188 pub fn global_id(&self) -> Id<Buffer> {
5189 Id(self.id.global_id(), std::marker::PhantomData)
5190 }
5191}
5192
5193#[cfg(feature = "expose-ids")]
5194impl Texture {
5195 /// Returns a globally-unique identifier for this `Texture`.
5196 ///
5197 /// Calling this method multiple times on the same object will always return the same value.
5198 /// The returned value is guaranteed to be unique among all `Texture`s created from the same
5199 /// `Instance`.
5200 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5201 pub fn global_id(&self) -> Id<Texture> {
5202 Id(self.id.global_id(), std::marker::PhantomData)
5203 }
5204}
5205
5206#[cfg(feature = "expose-ids")]
5207impl QuerySet {
5208 /// Returns a globally-unique identifier for this `QuerySet`.
5209 ///
5210 /// Calling this method multiple times on the same object will always return the same value.
5211 /// The returned value is guaranteed to be unique among all `QuerySet`s created from the same
5212 /// `Instance`.
5213 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5214 pub fn global_id(&self) -> Id<QuerySet> {
5215 Id(self.id.global_id(), std::marker::PhantomData)
5216 }
5217}
5218
5219#[cfg(feature = "expose-ids")]
5220impl PipelineLayout {
5221 /// Returns a globally-unique identifier for this `PipelineLayout`.
5222 ///
5223 /// Calling this method multiple times on the same object will always return the same value.
5224 /// The returned value is guaranteed to be unique among all `PipelineLayout`s created from the same
5225 /// `Instance`.
5226 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5227 pub fn global_id(&self) -> Id<PipelineLayout> {
5228 Id(self.id.global_id(), std::marker::PhantomData)
5229 }
5230}
5231
5232#[cfg(feature = "expose-ids")]
5233impl RenderPipeline {
5234 /// Returns a globally-unique identifier for this `RenderPipeline`.
5235 ///
5236 /// Calling this method multiple times on the same object will always return the same value.
5237 /// The returned value is guaranteed to be unique among all `RenderPipeline`s created from the same
5238 /// `Instance`.
5239 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5240 pub fn global_id(&self) -> Id<RenderPipeline> {
5241 Id(self.id.global_id(), std::marker::PhantomData)
5242 }
5243}
5244
5245#[cfg(feature = "expose-ids")]
5246impl ComputePipeline {
5247 /// Returns a globally-unique identifier for this `ComputePipeline`.
5248 ///
5249 /// Calling this method multiple times on the same object will always return the same value.
5250 /// The returned value is guaranteed to be unique among all `ComputePipeline`s created from the same
5251 /// `Instance`.
5252 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5253 pub fn global_id(&self) -> Id<ComputePipeline> {
5254 Id(self.id.global_id(), std::marker::PhantomData)
5255 }
5256}
5257
5258#[cfg(feature = "expose-ids")]
5259impl RenderBundle {
5260 /// Returns a globally-unique identifier for this `RenderBundle`.
5261 ///
5262 /// Calling this method multiple times on the same object will always return the same value.
5263 /// The returned value is guaranteed to be unique among all `RenderBundle`s created from the same
5264 /// `Instance`.
5265 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5266 pub fn global_id(&self) -> Id<RenderBundle> {
5267 Id(self.id.global_id(), std::marker::PhantomData)
5268 }
5269}
5270
5271#[cfg(feature = "expose-ids")]
5272impl Surface {
5273 /// Returns a globally-unique identifier for this `Surface`.
5274 ///
5275 /// Calling this method multiple times on the same object will always return the same value.
5276 /// The returned value is guaranteed to be unique among all `Surface`s created from the same
5277 /// `Instance`.
5278 #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))]
5279 pub fn global_id(&self) -> Id<Surface> {
5280 Id(self.id.global_id(), std::marker::PhantomData)
5281 }
5282}
5283
5284/// Type for the callback of uncaptured error handler
5285pub trait UncapturedErrorHandler: Fn(Error) + Send + 'static {}
5286impl<T> UncapturedErrorHandler for T where T: Fn(Error) + Send + 'static {}
5287
5288/// Error type
5289#[derive(Debug)]
5290pub enum Error {
5291 /// Out of memory error
5292 OutOfMemory {
5293 /// Lower level source of the error.
5294 #[cfg(any(
5295 not(target_arch = "wasm32"),
5296 all(
5297 feature = "fragile-send-sync-non-atomic-wasm",
5298 not(target_feature = "atomics")
5299 )
5300 ))]
5301 source: Box<dyn error::Error + Send + 'static>,
5302 /// Lower level source of the error.
5303 #[cfg(not(any(
5304 not(target_arch = "wasm32"),
5305 all(
5306 feature = "fragile-send-sync-non-atomic-wasm",
5307 not(target_feature = "atomics")
5308 )
5309 )))]
5310 source: Box<dyn error::Error + 'static>,
5311 },
5312 /// Validation error, signifying a bug in code or data
5313 Validation {
5314 /// Lower level source of the error.
5315 #[cfg(any(
5316 not(target_arch = "wasm32"),
5317 all(
5318 feature = "fragile-send-sync-non-atomic-wasm",
5319 not(target_feature = "atomics")
5320 )
5321 ))]
5322 source: Box<dyn error::Error + Send + 'static>,
5323 /// Lower level source of the error.
5324 #[cfg(not(any(
5325 not(target_arch = "wasm32"),
5326 all(
5327 feature = "fragile-send-sync-non-atomic-wasm",
5328 not(target_feature = "atomics")
5329 )
5330 )))]
5331 source: Box<dyn error::Error + 'static>,
5332 /// Description of the validation error.
5333 description: String,
5334 },
5335}
5336#[cfg(any(
5337 not(target_arch = "wasm32"),
5338 all(
5339 feature = "fragile-send-sync-non-atomic-wasm",
5340 not(target_feature = "atomics")
5341 )
5342))]
5343static_assertions::assert_impl_all!(Error: Send);
5344
5345impl error::Error for Error {
5346 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
5347 match self {
5348 Error::OutOfMemory { source } => Some(source.as_ref()),
5349 Error::Validation { source, .. } => Some(source.as_ref()),
5350 }
5351 }
5352}
5353
5354impl fmt::Display for Error {
5355 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
5356 match self {
5357 Error::OutOfMemory { .. } => f.write_str("Out of Memory"),
5358 Error::Validation { description, .. } => f.write_str(description),
5359 }
5360 }
5361}
5362
5363use send_sync::*;
5364
5365mod send_sync {
5366 use std::any::Any;
5367 use std::fmt;
5368
5369 use wgt::{WasmNotSend, WasmNotSync};
5370
5371 pub trait AnyWasmNotSendSync: Any + WasmNotSend + WasmNotSync {
5372 fn upcast_any_ref(&self) -> &dyn Any;
5373 }
5374 impl<T: Any + WasmNotSend + WasmNotSync> AnyWasmNotSendSync for T {
5375 #[inline]
5376 fn upcast_any_ref(&self) -> &dyn Any {
5377 self
5378 }
5379 }
5380
5381 impl dyn AnyWasmNotSendSync + 'static {
5382 #[inline]
5383 pub fn downcast_ref<T: 'static>(&self) -> Option<&T> {
5384 self.upcast_any_ref().downcast_ref::<T>()
5385 }
5386 }
5387
5388 impl fmt::Debug for dyn AnyWasmNotSendSync {
5389 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
5390 f.debug_struct("Any").finish_non_exhaustive()
5391 }
5392 }
5393}
5394
5395#[cfg(test)]
5396mod tests {
5397 use crate::BufferSize;
5398
5399 #[test]
5400 fn range_to_offset_size_works() {
5401 assert_eq!(crate::range_to_offset_size(0..2), (0, BufferSize::new(2)));
5402 assert_eq!(crate::range_to_offset_size(2..5), (2, BufferSize::new(3)));
5403 assert_eq!(crate::range_to_offset_size(..), (0, None));
5404 assert_eq!(crate::range_to_offset_size(21..), (21, None));
5405 assert_eq!(crate::range_to_offset_size(0..), (0, None));
5406 assert_eq!(crate::range_to_offset_size(..21), (0, BufferSize::new(21)));
5407 }
5408
5409 #[test]
5410 #[should_panic]
5411 fn range_to_offset_size_panics_for_empty_range() {
5412 crate::range_to_offset_size(123..123);
5413 }
5414
5415 #[test]
5416 #[should_panic]
5417 fn range_to_offset_size_panics_for_unbounded_empty_range() {
5418 crate::range_to_offset_size(..0);
5419 }
5420}