1#[cfg(dx12)]
2pub(super) mod dxgi;
3
4#[cfg(all(native, feature = "renderdoc"))]
5pub(super) mod renderdoc;
6
7pub mod db {
8 pub mod amd {
9 pub const VENDOR: u32 = 0x1002;
11 }
12 pub mod apple {
13 pub const VENDOR: u32 = 0x106B;
15 }
16 pub mod arm {
17 pub const VENDOR: u32 = 0x13B5;
19 }
20 pub mod broadcom {
21 pub const VENDOR: u32 = 0x14E4;
23 }
24 pub mod imgtec {
25 pub const VENDOR: u32 = 0x1010;
27 }
28 pub mod intel {
29 pub const VENDOR: u32 = 0x8086;
31 pub const DEVICE_KABY_LAKE_MASK: u32 = 0x5900;
32 pub const DEVICE_SKY_LAKE_MASK: u32 = 0x1900;
33 }
34 pub mod mesa {
35 pub const VENDOR: u32 = 0x10005;
41 }
42 pub mod nvidia {
43 pub const VENDOR: u32 = 0x10DE;
45 }
46 pub mod qualcomm {
47 pub const VENDOR: u32 = 0x5143;
49 }
50}
51
52pub const MAX_I32_BINDING_SIZE: u32 = (1 << 31) - 1;
57
58pub use wgpu_naga_bridge::map_naga_stage;
59
60impl crate::CopyExtent {
61 pub fn map_extent_to_copy_size(extent: &wgt::Extent3d, dim: wgt::TextureDimension) -> Self {
62 Self {
63 width: extent.width,
64 height: extent.height,
65 depth: match dim {
66 wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => 1,
67 wgt::TextureDimension::D3 => extent.depth_or_array_layers,
68 },
69 }
70 }
71
72 pub fn min(&self, other: &Self) -> Self {
73 Self {
74 width: self.width.min(other.width),
75 height: self.height.min(other.height),
76 depth: self.depth.min(other.depth),
77 }
78 }
79
80 pub fn at_mip_level(&self, level: u32) -> Self {
84 Self {
85 width: (self.width >> level).max(1),
86 height: (self.height >> level).max(1),
87 depth: (self.depth >> level).max(1),
88 }
89 }
90}
91
92impl crate::TextureCopyBase {
93 pub fn max_copy_size(&self, full_size: &crate::CopyExtent) -> crate::CopyExtent {
94 let mip = full_size.at_mip_level(self.mip_level);
95 crate::CopyExtent {
96 width: mip.width - self.origin.x,
97 height: mip.height - self.origin.y,
98 depth: mip.depth - self.origin.z,
99 }
100 }
101}
102
103impl crate::BufferTextureCopy {
104 pub fn clamp_size_to_virtual(&mut self, full_size: &crate::CopyExtent) {
105 let max_size = self.texture_base.max_copy_size(full_size);
106 self.size = self.size.min(&max_size);
107 }
108}
109
110impl crate::TextureCopy {
111 pub fn clamp_size_to_virtual(
112 &mut self,
113 full_src_size: &crate::CopyExtent,
114 full_dst_size: &crate::CopyExtent,
115 ) {
116 let max_src_size = self.src_base.max_copy_size(full_src_size);
117 let max_dst_size = self.dst_base.max_copy_size(full_dst_size);
118 self.size = self.size.min(&max_src_size).min(&max_dst_size);
119 }
120}
121
122#[cfg_attr(not(any_backend), allow(dead_code))]
125pub(crate) fn adjust_raw_limits(mut limits: wgt::Limits) -> wgt::Limits {
126 limits.max_bind_groups = limits.max_bind_groups.min(crate::MAX_BIND_GROUPS as u32);
128 limits.max_vertex_buffers = limits
129 .max_vertex_buffers
130 .min(crate::MAX_VERTEX_BUFFERS as u32);
131 limits.max_color_attachments = limits
132 .max_color_attachments
133 .min(crate::MAX_COLOR_ATTACHMENTS as u32);
134
135 const MAX_SHADER_STAGES_PER_PIPELINE: u32 = 2;
144 let max_per_stage_resources =
145 limits.max_bindings_per_bind_group / MAX_SHADER_STAGES_PER_PIPELINE;
146
147 cap_limits_to_be_under_the_sum_limit(
148 [
149 &mut limits.max_sampled_textures_per_shader_stage,
150 &mut limits.max_uniform_buffers_per_shader_stage,
151 &mut limits.max_storage_textures_per_shader_stage,
152 &mut limits.max_storage_buffers_per_shader_stage,
153 &mut limits.max_samplers_per_shader_stage,
154 &mut limits.max_acceleration_structures_per_shader_stage,
155 ],
156 max_per_stage_resources,
157 );
158
159 limits.max_dynamic_uniform_buffers_per_pipeline_layout = limits
162 .max_dynamic_uniform_buffers_per_pipeline_layout
163 .min(limits.max_uniform_buffers_per_shader_stage);
164 limits.max_dynamic_storage_buffers_per_pipeline_layout = limits
165 .max_dynamic_storage_buffers_per_pipeline_layout
166 .min(limits.max_storage_buffers_per_shader_stage);
167
168 limits.min_uniform_buffer_offset_alignment = limits.min_uniform_buffer_offset_alignment.max(32);
169 limits.min_storage_buffer_offset_alignment = limits.min_storage_buffer_offset_alignment.max(32);
170
171 limits.max_uniform_buffer_binding_size = limits
172 .max_uniform_buffer_binding_size
173 .min(limits.max_buffer_size);
174 limits.max_storage_buffer_binding_size = limits
175 .max_storage_buffer_binding_size
176 .min(limits.max_buffer_size);
177
178 limits.max_storage_buffer_binding_size &= !(u64::from(wgt::STORAGE_BINDING_SIZE_ALIGNMENT) - 1);
179 limits.max_vertex_buffer_array_stride &= !(wgt::VERTEX_ALIGNMENT as u32 - 1);
180
181 let x = limits.max_compute_workgroup_size_x;
182 let y = limits.max_compute_workgroup_size_y;
183 let z = limits.max_compute_workgroup_size_z;
184 let m = limits.max_compute_invocations_per_workgroup;
185 limits.max_compute_workgroup_size_x = x.min(m);
186 limits.max_compute_workgroup_size_y = y.min(m);
187 limits.max_compute_workgroup_size_z = z.min(m);
188 limits.max_compute_invocations_per_workgroup = m.min(x.saturating_mul(y).saturating_mul(z));
189
190 limits
191}
192
193pub fn cap_limits_to_be_under_the_sum_limit<const N: usize>(
196 mut limits: [&mut u32; N],
197 sum_limit: u32,
198) {
199 limits.sort();
200
201 let mut rem_limit = sum_limit;
202 let mut divisor = limits.len() as u32;
203 for limit_to_adjust in limits {
204 let limit = rem_limit / divisor;
205 *limit_to_adjust = (*limit_to_adjust).min(limit);
206 rem_limit -= *limit_to_adjust;
207 divisor -= 1;
208 }
209}
210
211#[cfg(test)]
212mod tests {
213 use super::*;
214
215 #[test]
216 fn test_cap_limits_to_be_under_the_sum_limit() {
217 test([3, 3, 3], 3, [1, 1, 1]);
218 test([3, 2, 1], 3, [1, 1, 1]);
219 test([1, 2, 3], 6, [1, 2, 3]);
220 test([1, 2, 3], 3, [1, 1, 1]);
221 test([1, 8, 100], 6, [1, 2, 3]);
222 test([2, 80, 80], 6, [2, 2, 2]);
223 test([2, 80, 80], 12, [2, 5, 5]);
224
225 #[track_caller]
226 fn test<const N: usize>(mut input: [u32; N], limit: u32, output: [u32; N]) {
227 cap_limits_to_be_under_the_sum_limit(input.each_mut(), limit);
228 assert_eq!(input, output);
229 }
230 }
231}