1use super::conv;
2use crate::auxil::map_naga_stage;
3use glow::HasContext;
4use std::{
5 convert::TryInto,
6 ptr,
7 sync::{Arc, Mutex},
8};
9
10use arrayvec::ArrayVec;
11#[cfg(not(target_arch = "wasm32"))]
12use std::mem;
13use std::sync::atomic::Ordering;
14
15type ShaderStage<'a> = (
16 naga::ShaderStage,
17 &'a crate::ProgrammableStage<'a, super::Api>,
18);
19type NameBindingMap = rustc_hash::FxHashMap<String, (super::BindingRegister, u8)>;
20
21struct CompilationContext<'a> {
22 layout: &'a super::PipelineLayout,
23 sampler_map: &'a mut super::SamplerBindMap,
24 name_binding_map: &'a mut NameBindingMap,
25 multiview: Option<std::num::NonZeroU32>,
26}
27
28impl CompilationContext<'_> {
29 fn consume_reflection(
30 self,
31 module: &naga::Module,
32 ep_info: &naga::valid::FunctionInfo,
33 reflection_info: naga::back::glsl::ReflectionInfo,
34 ) {
35 for (handle, var) in module.global_variables.iter() {
36 if ep_info[handle].is_empty() {
37 continue;
38 }
39 let register = match var.space {
40 naga::AddressSpace::Uniform => super::BindingRegister::UniformBuffers,
41 naga::AddressSpace::Storage { .. } => super::BindingRegister::StorageBuffers,
42 _ => continue,
43 };
44
45 let br = var.binding.as_ref().unwrap();
46 let slot = self.layout.get_slot(br);
47
48 let name = match reflection_info.uniforms.get(&handle) {
49 Some(name) => name.clone(),
50 None => continue,
51 };
52 log::debug!(
53 "Rebind buffer: {:?} -> {}, register={:?}, slot={}",
54 var.name.as_ref(),
55 &name,
56 register,
57 slot
58 );
59 self.name_binding_map.insert(name, (register, slot));
60 }
61
62 for (name, mapping) in reflection_info.texture_mapping {
63 let var = &module.global_variables[mapping.texture];
64 let register = match module.types[var.ty].inner {
65 naga::TypeInner::Image {
66 class: naga::ImageClass::Storage { .. },
67 ..
68 } => super::BindingRegister::Images,
69 _ => super::BindingRegister::Textures,
70 };
71
72 let tex_br = var.binding.as_ref().unwrap();
73 let texture_linear_index = self.layout.get_slot(tex_br);
74
75 self.name_binding_map
76 .insert(name, (register, texture_linear_index));
77 if let Some(sampler_handle) = mapping.sampler {
78 let sam_br = module.global_variables[sampler_handle]
79 .binding
80 .as_ref()
81 .unwrap();
82 let sampler_linear_index = self.layout.get_slot(sam_br);
83 self.sampler_map[texture_linear_index as usize] = Some(sampler_linear_index);
84 }
85 }
86 }
87}
88
89impl super::Device {
90 #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
98 pub unsafe fn texture_from_raw(
99 &self,
100 name: std::num::NonZeroU32,
101 desc: &crate::TextureDescriptor,
102 drop_guard: Option<crate::DropGuard>,
103 ) -> super::Texture {
104 super::Texture {
105 inner: super::TextureInner::Texture {
106 raw: glow::NativeTexture(name),
107 target: super::Texture::get_info_from_desc(desc),
108 },
109 drop_guard,
110 mip_level_count: desc.mip_level_count,
111 array_layer_count: desc.array_layer_count(),
112 format: desc.format,
113 format_desc: self.shared.describe_texture_format(desc.format),
114 copy_size: desc.copy_extent(),
115 }
116 }
117
118 #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
126 pub unsafe fn texture_from_raw_renderbuffer(
127 &self,
128 name: std::num::NonZeroU32,
129 desc: &crate::TextureDescriptor,
130 drop_guard: Option<crate::DropGuard>,
131 ) -> super::Texture {
132 super::Texture {
133 inner: super::TextureInner::Renderbuffer {
134 raw: glow::NativeRenderbuffer(name),
135 },
136 drop_guard,
137 mip_level_count: desc.mip_level_count,
138 array_layer_count: desc.array_layer_count(),
139 format: desc.format,
140 format_desc: self.shared.describe_texture_format(desc.format),
141 copy_size: desc.copy_extent(),
142 }
143 }
144
145 unsafe fn compile_shader(
146 gl: &glow::Context,
147 shader: &str,
148 naga_stage: naga::ShaderStage,
149 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
150 ) -> Result<glow::Shader, crate::PipelineError> {
151 let target = match naga_stage {
152 naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
153 naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
154 naga::ShaderStage::Compute => glow::COMPUTE_SHADER,
155 };
156
157 let raw = unsafe { gl.create_shader(target) }.unwrap();
158 #[cfg(not(target_arch = "wasm32"))]
159 if gl.supports_debug() {
160 let name = unsafe { mem::transmute(raw) };
163 unsafe { gl.object_label(glow::SHADER, name, label) };
164 }
165
166 unsafe { gl.shader_source(raw, shader) };
167 unsafe { gl.compile_shader(raw) };
168
169 log::info!("\tCompiled shader {:?}", raw);
170
171 let compiled_ok = unsafe { gl.get_shader_compile_status(raw) };
172 let msg = unsafe { gl.get_shader_info_log(raw) };
173 if compiled_ok {
174 if !msg.is_empty() {
175 log::warn!("\tCompile: {}", msg);
176 }
177 Ok(raw)
178 } else {
179 Err(crate::PipelineError::Linkage(
180 map_naga_stage(naga_stage),
181 msg,
182 ))
183 }
184 }
185
186 fn create_shader(
187 gl: &glow::Context,
188 naga_stage: naga::ShaderStage,
189 stage: &crate::ProgrammableStage<super::Api>,
190 context: CompilationContext,
191 ) -> Result<glow::Shader, crate::PipelineError> {
192 use naga::back::glsl;
193 let pipeline_options = glsl::PipelineOptions {
194 shader_stage: naga_stage,
195 entry_point: stage.entry_point.to_string(),
196 multiview: context.multiview,
197 };
198
199 let shader = &stage.module.naga;
200 let entry_point_index = shader
201 .module
202 .entry_points
203 .iter()
204 .position(|ep| ep.name.as_str() == stage.entry_point)
205 .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
206
207 use naga::proc::BoundsCheckPolicy;
208 let version = gl.version();
210 let image_check = if !version.is_embedded && (version.major, version.minor) >= (1, 3) {
211 BoundsCheckPolicy::ReadZeroSkipWrite
212 } else {
213 BoundsCheckPolicy::Unchecked
214 };
215
216 let policies = naga::proc::BoundsCheckPolicies {
218 index: BoundsCheckPolicy::Unchecked,
219 buffer: BoundsCheckPolicy::Unchecked,
220 image_load: image_check,
221 image_store: BoundsCheckPolicy::Unchecked,
222 binding_array: BoundsCheckPolicy::Unchecked,
223 };
224
225 let mut output = String::new();
226 let mut writer = glsl::Writer::new(
227 &mut output,
228 &shader.module,
229 &shader.info,
230 &context.layout.naga_options,
231 &pipeline_options,
232 policies,
233 )
234 .map_err(|e| {
235 let msg = format!("{e}");
236 crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
237 })?;
238
239 let reflection_info = writer.write().map_err(|e| {
240 let msg = format!("{e}");
241 crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
242 })?;
243
244 log::debug!("Naga generated shader:\n{}", output);
245
246 context.consume_reflection(
247 &shader.module,
248 shader.info.get_entry_point(entry_point_index),
249 reflection_info,
250 );
251
252 unsafe { Self::compile_shader(gl, &output, naga_stage, stage.module.label.as_deref()) }
253 }
254
255 unsafe fn create_pipeline<'a>(
256 &self,
257 gl: &glow::Context,
258 shaders: ArrayVec<ShaderStage<'a>, 3>,
259 layout: &super::PipelineLayout,
260 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
261 multiview: Option<std::num::NonZeroU32>,
262 ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
263 let mut program_stages = ArrayVec::new();
264 let mut group_to_binding_to_slot = Vec::with_capacity(layout.group_infos.len());
265 for group in &*layout.group_infos {
266 group_to_binding_to_slot.push(group.binding_to_slot.clone());
267 }
268 for &(naga_stage, stage) in &shaders {
269 program_stages.push(super::ProgramStage {
270 naga_stage: naga_stage.to_owned(),
271 shader_id: stage.module.id,
272 entry_point: stage.entry_point.to_owned(),
273 });
274 }
275 let mut guard = self
276 .shared
277 .program_cache
278 .try_lock()
279 .expect("Couldn't acquire program_cache lock");
280 let program = guard
283 .entry(super::ProgramCacheKey {
284 stages: program_stages,
285 group_to_binding_to_slot: group_to_binding_to_slot.into_boxed_slice(),
286 })
287 .or_insert_with(|| unsafe {
288 Self::create_program(
289 gl,
290 shaders,
291 layout,
292 label,
293 multiview,
294 self.shared.shading_language_version,
295 self.shared.private_caps,
296 )
297 })
298 .to_owned()?;
299 drop(guard);
300
301 Ok(program)
302 }
303
304 unsafe fn create_program<'a>(
305 gl: &glow::Context,
306 shaders: ArrayVec<ShaderStage<'a>, 3>,
307 layout: &super::PipelineLayout,
308 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
309 multiview: Option<std::num::NonZeroU32>,
310 glsl_version: naga::back::glsl::Version,
311 private_caps: super::PrivateCapabilities,
312 ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
313 let glsl_version = match glsl_version {
314 naga::back::glsl::Version::Embedded { version, .. } => format!("{version} es"),
315 naga::back::glsl::Version::Desktop(version) => format!("{version}"),
316 };
317 let program = unsafe { gl.create_program() }.unwrap();
318 #[cfg(not(target_arch = "wasm32"))]
319 if let Some(label) = label {
320 if gl.supports_debug() {
321 let name = unsafe { mem::transmute(program) };
322 unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) };
323 }
324 }
325
326 let mut name_binding_map = NameBindingMap::default();
327 let mut sampler_map = [None; super::MAX_TEXTURE_SLOTS];
328 let mut has_stages = wgt::ShaderStages::empty();
329 let mut shaders_to_delete = arrayvec::ArrayVec::<_, 3>::new();
330
331 for (naga_stage, stage) in shaders {
332 has_stages |= map_naga_stage(naga_stage);
333 let context = CompilationContext {
334 layout,
335 sampler_map: &mut sampler_map,
336 name_binding_map: &mut name_binding_map,
337 multiview,
338 };
339
340 let shader = Self::create_shader(gl, naga_stage, stage, context)?;
341 shaders_to_delete.push(shader);
342 }
343
344 if has_stages == wgt::ShaderStages::VERTEX {
346 let shader_src = format!("#version {glsl_version}\n void main(void) {{}}",);
347 log::info!("Only vertex shader is present. Creating an empty fragment shader",);
348 let shader = unsafe {
349 Self::compile_shader(
350 gl,
351 &shader_src,
352 naga::ShaderStage::Fragment,
353 Some("(wgpu internal) dummy fragment shader"),
354 )
355 }?;
356 shaders_to_delete.push(shader);
357 }
358
359 for &shader in shaders_to_delete.iter() {
360 unsafe { gl.attach_shader(program, shader) };
361 }
362 unsafe { gl.link_program(program) };
363
364 for shader in shaders_to_delete {
365 unsafe { gl.delete_shader(shader) };
366 }
367
368 log::info!("\tLinked program {:?}", program);
369
370 let linked_ok = unsafe { gl.get_program_link_status(program) };
371 let msg = unsafe { gl.get_program_info_log(program) };
372 if !linked_ok {
373 return Err(crate::PipelineError::Linkage(has_stages, msg));
374 }
375 if !msg.is_empty() {
376 log::warn!("\tLink: {}", msg);
377 }
378
379 if !private_caps.contains(super::PrivateCapabilities::SHADER_BINDING_LAYOUT) {
380 unsafe { gl.use_program(Some(program)) };
383 for (ref name, (register, slot)) in name_binding_map {
384 log::trace!("Get binding {:?} from program {:?}", name, program);
385 match register {
386 super::BindingRegister::UniformBuffers => {
387 let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap();
388 unsafe { gl.uniform_block_binding(program, index, slot as _) };
389 }
390 super::BindingRegister::StorageBuffers => {
391 let index =
392 unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap();
393 log::error!(
394 "Unable to re-map shader storage block {} to {}",
395 name,
396 index
397 );
398 return Err(crate::DeviceError::Lost.into());
399 }
400 super::BindingRegister::Textures | super::BindingRegister::Images => {
401 let location = unsafe { gl.get_uniform_location(program, name) };
402 unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) };
403 }
404 }
405 }
406 }
407
408 let mut uniforms: [super::UniformDesc; super::MAX_PUSH_CONSTANTS] =
409 [None; super::MAX_PUSH_CONSTANTS].map(|_: Option<()>| Default::default());
410 let count = unsafe { gl.get_active_uniforms(program) };
411 let mut offset = 0;
412
413 for uniform in 0..count {
414 let glow::ActiveUniform { utype, name, .. } =
415 unsafe { gl.get_active_uniform(program, uniform) }.unwrap();
416
417 if conv::is_opaque_type(utype) {
418 continue;
419 }
420
421 if let Some(location) = unsafe { gl.get_uniform_location(program, &name) } {
422 if uniforms[offset / 4].location.is_some() {
423 panic!("Offset already occupied")
424 }
425
426 let uniform_size = conv::uniform_byte_size(utype);
428
429 uniforms[offset / 4] = super::UniformDesc {
430 location: Some(location),
431 size: uniform_size,
432 utype,
433 };
434
435 offset += uniform_size as usize;
436 }
437 }
438
439 Ok(Arc::new(super::PipelineInner {
440 program,
441 sampler_map,
442 uniforms,
443 }))
444 }
445}
446
447impl crate::Device<super::Api> for super::Device {
448 unsafe fn exit(self, queue: super::Queue) {
449 let gl = &self.shared.context.lock();
450 unsafe { gl.delete_vertex_array(self.main_vao) };
451 unsafe { gl.delete_framebuffer(queue.draw_fbo) };
452 unsafe { gl.delete_framebuffer(queue.copy_fbo) };
453 unsafe { gl.delete_buffer(queue.zero_buffer) };
454 }
455
456 unsafe fn create_buffer(
457 &self,
458 desc: &crate::BufferDescriptor,
459 ) -> Result<super::Buffer, crate::DeviceError> {
460 let target = if desc.usage.contains(crate::BufferUses::INDEX) {
461 glow::ELEMENT_ARRAY_BUFFER
462 } else {
463 glow::ARRAY_BUFFER
464 };
465
466 let emulate_map = self
467 .shared
468 .workarounds
469 .contains(super::Workarounds::EMULATE_BUFFER_MAP)
470 || !self
471 .shared
472 .private_caps
473 .contains(super::PrivateCapabilities::BUFFER_ALLOCATION);
474
475 if emulate_map && desc.usage.intersects(crate::BufferUses::MAP_WRITE) {
476 return Ok(super::Buffer {
477 raw: None,
478 target,
479 size: desc.size,
480 map_flags: 0,
481 data: Some(Arc::new(Mutex::new(vec![0; desc.size as usize]))),
482 });
483 }
484
485 let gl = &self.shared.context.lock();
486
487 let target = if desc.usage.contains(crate::BufferUses::INDEX) {
488 glow::ELEMENT_ARRAY_BUFFER
489 } else {
490 glow::ARRAY_BUFFER
491 };
492
493 let is_host_visible = desc
494 .usage
495 .intersects(crate::BufferUses::MAP_READ | crate::BufferUses::MAP_WRITE);
496 let is_coherent = desc
497 .memory_flags
498 .contains(crate::MemoryFlags::PREFER_COHERENT);
499
500 let mut map_flags = 0;
501 if desc.usage.contains(crate::BufferUses::MAP_READ) {
502 map_flags |= glow::MAP_READ_BIT;
503 }
504 if desc.usage.contains(crate::BufferUses::MAP_WRITE) {
505 map_flags |= glow::MAP_WRITE_BIT;
506 }
507
508 let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
509 unsafe { gl.bind_buffer(target, raw) };
510 let raw_size = desc
511 .size
512 .try_into()
513 .map_err(|_| crate::DeviceError::OutOfMemory)?;
514
515 if self
516 .shared
517 .private_caps
518 .contains(super::PrivateCapabilities::BUFFER_ALLOCATION)
519 {
520 if is_host_visible {
521 map_flags |= glow::MAP_PERSISTENT_BIT;
522 if is_coherent {
523 map_flags |= glow::MAP_COHERENT_BIT;
524 }
525 }
526 if desc.usage.intersects(crate::BufferUses::QUERY_RESOLVE) {
528 map_flags |= glow::DYNAMIC_STORAGE_BIT;
529 }
530 unsafe { gl.buffer_storage(target, raw_size, None, map_flags) };
531 } else {
532 assert!(!is_coherent);
533 let usage = if is_host_visible {
534 if desc.usage.contains(crate::BufferUses::MAP_READ) {
535 glow::STREAM_READ
536 } else {
537 glow::DYNAMIC_DRAW
538 }
539 } else {
540 glow::DYNAMIC_DRAW
544 };
545 unsafe { gl.buffer_data_size(target, raw_size, usage) };
546 }
547
548 unsafe { gl.bind_buffer(target, None) };
549
550 if !is_coherent && desc.usage.contains(crate::BufferUses::MAP_WRITE) {
551 map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT;
552 }
553 #[cfg(not(target_arch = "wasm32"))]
556 if let Some(label) = desc.label {
557 if gl.supports_debug() {
558 let name = unsafe { mem::transmute(raw) };
559 unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
560 }
561 }
562
563 let data = if emulate_map && desc.usage.contains(crate::BufferUses::MAP_READ) {
564 Some(Arc::new(Mutex::new(vec![0; desc.size as usize])))
565 } else {
566 None
567 };
568
569 Ok(super::Buffer {
570 raw,
571 target,
572 size: desc.size,
573 map_flags,
574 data,
575 })
576 }
577 unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
578 if let Some(raw) = buffer.raw {
579 let gl = &self.shared.context.lock();
580 unsafe { gl.delete_buffer(raw) };
581 }
582 }
583
584 unsafe fn map_buffer(
585 &self,
586 buffer: &super::Buffer,
587 range: crate::MemoryRange,
588 ) -> Result<crate::BufferMapping, crate::DeviceError> {
589 let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
590 let ptr = match buffer.raw {
591 None => {
592 let mut vec = buffer.data.as_ref().unwrap().lock().unwrap();
593 let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
594 slice.as_mut_ptr()
595 }
596 Some(raw) => {
597 let gl = &self.shared.context.lock();
598 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
599 let ptr = if let Some(ref map_read_allocation) = buffer.data {
600 let mut guard = map_read_allocation.lock().unwrap();
601 let slice = guard.as_mut_slice();
602 unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
603 slice.as_mut_ptr()
604 } else {
605 unsafe {
606 gl.map_buffer_range(
607 buffer.target,
608 range.start as i32,
609 (range.end - range.start) as i32,
610 buffer.map_flags,
611 )
612 }
613 };
614 unsafe { gl.bind_buffer(buffer.target, None) };
615 ptr
616 }
617 };
618 Ok(crate::BufferMapping {
619 ptr: ptr::NonNull::new(ptr).ok_or(crate::DeviceError::Lost)?,
620 is_coherent,
621 })
622 }
623 unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> {
624 if let Some(raw) = buffer.raw {
625 if buffer.data.is_none() {
626 let gl = &self.shared.context.lock();
627 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
628 unsafe { gl.unmap_buffer(buffer.target) };
629 unsafe { gl.bind_buffer(buffer.target, None) };
630 }
631 }
632 Ok(())
633 }
634 unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
635 where
636 I: Iterator<Item = crate::MemoryRange>,
637 {
638 if let Some(raw) = buffer.raw {
639 let gl = &self.shared.context.lock();
640 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
641 for range in ranges {
642 unsafe {
643 gl.flush_mapped_buffer_range(
644 buffer.target,
645 range.start as i32,
646 (range.end - range.start) as i32,
647 )
648 };
649 }
650 }
651 }
652 unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
653 }
655
656 unsafe fn create_texture(
657 &self,
658 desc: &crate::TextureDescriptor,
659 ) -> Result<super::Texture, crate::DeviceError> {
660 let gl = &self.shared.context.lock();
661
662 let render_usage = crate::TextureUses::COLOR_TARGET
663 | crate::TextureUses::DEPTH_STENCIL_WRITE
664 | crate::TextureUses::DEPTH_STENCIL_READ;
665 let format_desc = self.shared.describe_texture_format(desc.format);
666
667 let inner = if render_usage.contains(desc.usage)
668 && desc.dimension == wgt::TextureDimension::D2
669 && desc.size.depth_or_array_layers == 1
670 {
671 let raw = unsafe { gl.create_renderbuffer().unwrap() };
672 unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) };
673 if desc.sample_count > 1 {
674 unsafe {
675 gl.renderbuffer_storage_multisample(
676 glow::RENDERBUFFER,
677 desc.sample_count as i32,
678 format_desc.internal,
679 desc.size.width as i32,
680 desc.size.height as i32,
681 )
682 };
683 } else {
684 unsafe {
685 gl.renderbuffer_storage(
686 glow::RENDERBUFFER,
687 format_desc.internal,
688 desc.size.width as i32,
689 desc.size.height as i32,
690 )
691 };
692 }
693
694 #[cfg(not(target_arch = "wasm32"))]
695 if let Some(label) = desc.label {
696 if gl.supports_debug() {
697 let name = unsafe { mem::transmute(raw) };
698 unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) };
699 }
700 }
701
702 unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
703 super::TextureInner::Renderbuffer { raw }
704 } else {
705 let raw = unsafe { gl.create_texture().unwrap() };
706 let target = super::Texture::get_info_from_desc(desc);
707
708 unsafe { gl.bind_texture(target, Some(raw)) };
709 match desc.format.sample_type(None) {
711 Some(
712 wgt::TextureSampleType::Float { filterable: false }
713 | wgt::TextureSampleType::Uint
714 | wgt::TextureSampleType::Sint,
715 ) => {
716 unsafe {
718 gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32)
719 };
720 unsafe {
721 gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32)
722 };
723 }
724 _ => {}
725 }
726
727 if conv::is_layered_target(target) {
728 unsafe {
729 gl.tex_storage_3d(
730 target,
731 desc.mip_level_count as i32,
732 format_desc.internal,
733 desc.size.width as i32,
734 desc.size.height as i32,
735 desc.size.depth_or_array_layers as i32,
736 )
737 };
738 } else if desc.sample_count > 1 {
739 unsafe {
740 gl.tex_storage_2d_multisample(
741 target,
742 desc.sample_count as i32,
743 format_desc.internal,
744 desc.size.width as i32,
745 desc.size.height as i32,
746 true,
747 )
748 };
749 } else {
750 unsafe {
751 gl.tex_storage_2d(
752 target,
753 desc.mip_level_count as i32,
754 format_desc.internal,
755 desc.size.width as i32,
756 desc.size.height as i32,
757 )
758 };
759 }
760
761 #[cfg(not(target_arch = "wasm32"))]
762 if let Some(label) = desc.label {
763 if gl.supports_debug() {
764 let name = unsafe { mem::transmute(raw) };
765 unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) };
766 }
767 }
768
769 unsafe { gl.bind_texture(target, None) };
770 super::TextureInner::Texture { raw, target }
771 };
772
773 Ok(super::Texture {
774 inner,
775 drop_guard: None,
776 mip_level_count: desc.mip_level_count,
777 array_layer_count: desc.array_layer_count(),
778 format: desc.format,
779 format_desc,
780 copy_size: desc.copy_extent(),
781 })
782 }
783 unsafe fn destroy_texture(&self, texture: super::Texture) {
784 if texture.drop_guard.is_none() {
785 let gl = &self.shared.context.lock();
786 match texture.inner {
787 super::TextureInner::Renderbuffer { raw, .. } => {
788 unsafe { gl.delete_renderbuffer(raw) };
789 }
790 super::TextureInner::DefaultRenderbuffer => {}
791 super::TextureInner::Texture { raw, .. } => {
792 unsafe { gl.delete_texture(raw) };
793 }
794 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
795 super::TextureInner::ExternalFramebuffer { .. } => {}
796 }
797 }
798
799 drop(texture.drop_guard);
802 }
803
804 unsafe fn create_texture_view(
805 &self,
806 texture: &super::Texture,
807 desc: &crate::TextureViewDescriptor,
808 ) -> Result<super::TextureView, crate::DeviceError> {
809 Ok(super::TextureView {
810 inner: texture.inner.clone(),
812 aspects: crate::FormatAspects::new(texture.format, desc.range.aspect),
813 mip_levels: desc.range.mip_range(texture.mip_level_count),
814 array_layers: desc.range.layer_range(texture.array_layer_count),
815 format: texture.format,
816 })
817 }
818 unsafe fn destroy_texture_view(&self, _view: super::TextureView) {}
819
820 unsafe fn create_sampler(
821 &self,
822 desc: &crate::SamplerDescriptor,
823 ) -> Result<super::Sampler, crate::DeviceError> {
824 let gl = &self.shared.context.lock();
825
826 let raw = unsafe { gl.create_sampler().unwrap() };
827
828 let (min, mag) =
829 conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter);
830
831 unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) };
832 unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) };
833
834 unsafe {
835 gl.sampler_parameter_i32(
836 raw,
837 glow::TEXTURE_WRAP_S,
838 conv::map_address_mode(desc.address_modes[0]) as i32,
839 )
840 };
841 unsafe {
842 gl.sampler_parameter_i32(
843 raw,
844 glow::TEXTURE_WRAP_T,
845 conv::map_address_mode(desc.address_modes[1]) as i32,
846 )
847 };
848 unsafe {
849 gl.sampler_parameter_i32(
850 raw,
851 glow::TEXTURE_WRAP_R,
852 conv::map_address_mode(desc.address_modes[2]) as i32,
853 )
854 };
855
856 if let Some(border_color) = desc.border_color {
857 let border = match border_color {
858 wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
859 [0.0; 4]
860 }
861 wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0],
862 wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4],
863 };
864 unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) };
865 }
866
867 unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, desc.lod_clamp.start) };
868 unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, desc.lod_clamp.end) };
869
870 if desc.anisotropy_clamp != 1 {
872 unsafe {
873 gl.sampler_parameter_i32(
874 raw,
875 glow::TEXTURE_MAX_ANISOTROPY,
876 desc.anisotropy_clamp as i32,
877 )
878 };
879 }
880
881 if let Some(compare) = desc.compare {
884 unsafe {
885 gl.sampler_parameter_i32(
886 raw,
887 glow::TEXTURE_COMPARE_MODE,
888 glow::COMPARE_REF_TO_TEXTURE as i32,
889 )
890 };
891 unsafe {
892 gl.sampler_parameter_i32(
893 raw,
894 glow::TEXTURE_COMPARE_FUNC,
895 conv::map_compare_func(compare) as i32,
896 )
897 };
898 }
899
900 #[cfg(not(target_arch = "wasm32"))]
901 if let Some(label) = desc.label {
902 if gl.supports_debug() {
903 let name = unsafe { mem::transmute(raw) };
904 unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) };
905 }
906 }
907
908 Ok(super::Sampler { raw })
909 }
910 unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
911 let gl = &self.shared.context.lock();
912 unsafe { gl.delete_sampler(sampler.raw) };
913 }
914
915 unsafe fn create_command_encoder(
916 &self,
917 _desc: &crate::CommandEncoderDescriptor<super::Api>,
918 ) -> Result<super::CommandEncoder, crate::DeviceError> {
919 Ok(super::CommandEncoder {
920 cmd_buffer: super::CommandBuffer::default(),
921 state: Default::default(),
922 private_caps: self.shared.private_caps,
923 })
924 }
925 unsafe fn destroy_command_encoder(&self, _encoder: super::CommandEncoder) {}
926
927 unsafe fn create_bind_group_layout(
928 &self,
929 desc: &crate::BindGroupLayoutDescriptor,
930 ) -> Result<super::BindGroupLayout, crate::DeviceError> {
931 Ok(super::BindGroupLayout {
932 entries: Arc::from(desc.entries),
933 })
934 }
935 unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {}
936
937 unsafe fn create_pipeline_layout(
938 &self,
939 desc: &crate::PipelineLayoutDescriptor<super::Api>,
940 ) -> Result<super::PipelineLayout, crate::DeviceError> {
941 use naga::back::glsl;
942
943 let mut group_infos = Vec::with_capacity(desc.bind_group_layouts.len());
944 let mut num_samplers = 0u8;
945 let mut num_textures = 0u8;
946 let mut num_images = 0u8;
947 let mut num_uniform_buffers = 0u8;
948 let mut num_storage_buffers = 0u8;
949
950 let mut writer_flags = glsl::WriterFlags::ADJUST_COORDINATE_SPACE;
951 writer_flags.set(
952 glsl::WriterFlags::TEXTURE_SHADOW_LOD,
953 self.shared
954 .private_caps
955 .contains(super::PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD),
956 );
957 writer_flags.set(glsl::WriterFlags::FORCE_POINT_SIZE, true);
960 let mut binding_map = glsl::BindingMap::default();
961
962 for (group_index, bg_layout) in desc.bind_group_layouts.iter().enumerate() {
963 let mut binding_to_slot = vec![
965 !0;
966 bg_layout
967 .entries
968 .last()
969 .map_or(0, |b| b.binding as usize + 1)
970 ]
971 .into_boxed_slice();
972
973 for entry in bg_layout.entries.iter() {
974 let counter = match entry.ty {
975 wgt::BindingType::Sampler { .. } => &mut num_samplers,
976 wgt::BindingType::Texture { .. } => &mut num_textures,
977 wgt::BindingType::StorageTexture { .. } => &mut num_images,
978 wgt::BindingType::Buffer {
979 ty: wgt::BufferBindingType::Uniform,
980 ..
981 } => &mut num_uniform_buffers,
982 wgt::BindingType::Buffer {
983 ty: wgt::BufferBindingType::Storage { .. },
984 ..
985 } => &mut num_storage_buffers,
986 };
987
988 binding_to_slot[entry.binding as usize] = *counter;
989 let br = naga::ResourceBinding {
990 group: group_index as u32,
991 binding: entry.binding,
992 };
993 binding_map.insert(br, *counter);
994 *counter += entry.count.map_or(1, |c| c.get() as u8);
995 }
996
997 group_infos.push(super::BindGroupLayoutInfo {
998 entries: Arc::clone(&bg_layout.entries),
999 binding_to_slot,
1000 });
1001 }
1002
1003 Ok(super::PipelineLayout {
1004 group_infos: group_infos.into_boxed_slice(),
1005 naga_options: glsl::Options {
1006 version: self.shared.shading_language_version,
1007 writer_flags,
1008 binding_map,
1009 zero_initialize_workgroup_memory: true,
1010 },
1011 })
1012 }
1013 unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {}
1014
1015 unsafe fn create_bind_group(
1016 &self,
1017 desc: &crate::BindGroupDescriptor<super::Api>,
1018 ) -> Result<super::BindGroup, crate::DeviceError> {
1019 let mut contents = Vec::new();
1020
1021 for (entry, layout) in desc.entries.iter().zip(desc.layout.entries.iter()) {
1022 let binding = match layout.ty {
1023 wgt::BindingType::Buffer { .. } => {
1024 let bb = &desc.buffers[entry.resource_index as usize];
1025 super::RawBinding::Buffer {
1026 raw: bb.buffer.raw.unwrap(),
1027 offset: bb.offset as i32,
1028 size: match bb.size {
1029 Some(s) => s.get() as i32,
1030 None => (bb.buffer.size - bb.offset) as i32,
1031 },
1032 }
1033 }
1034 wgt::BindingType::Sampler { .. } => {
1035 let sampler = desc.samplers[entry.resource_index as usize];
1036 super::RawBinding::Sampler(sampler.raw)
1037 }
1038 wgt::BindingType::Texture { .. } => {
1039 let view = desc.textures[entry.resource_index as usize].view;
1040 if view.mip_levels.start != 0 || view.array_layers.start != 0 {
1041 log::error!("Unable to create a sampled texture binding for non-zero mipmap level or array layer.\n{}",
1042 "This is an implementation problem of wgpu-hal/gles backend.")
1043 }
1044 let (raw, target) = view.inner.as_native();
1045 super::RawBinding::Texture {
1046 raw,
1047 target,
1048 aspects: view.aspects,
1049 }
1050 }
1051 wgt::BindingType::StorageTexture {
1052 access,
1053 format,
1054 view_dimension,
1055 } => {
1056 let view = desc.textures[entry.resource_index as usize].view;
1057 let format_desc = self.shared.describe_texture_format(format);
1058 let (raw, _target) = view.inner.as_native();
1059 super::RawBinding::Image(super::ImageBinding {
1060 raw,
1061 mip_level: view.mip_levels.start,
1062 array_layer: match view_dimension {
1063 wgt::TextureViewDimension::D2Array
1064 | wgt::TextureViewDimension::CubeArray => None,
1065 _ => Some(view.array_layers.start),
1066 },
1067 access: conv::map_storage_access(access),
1068 format: format_desc.internal,
1069 })
1070 }
1071 };
1072 contents.push(binding);
1073 }
1074
1075 Ok(super::BindGroup {
1076 contents: contents.into_boxed_slice(),
1077 })
1078 }
1079 unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {}
1080
1081 unsafe fn create_shader_module(
1082 &self,
1083 desc: &crate::ShaderModuleDescriptor,
1084 shader: crate::ShaderInput,
1085 ) -> Result<super::ShaderModule, crate::ShaderError> {
1086 Ok(super::ShaderModule {
1087 naga: match shader {
1088 crate::ShaderInput::SpirV(_) => {
1089 panic!("`Features::SPIRV_SHADER_PASSTHROUGH` is not enabled")
1090 }
1091 crate::ShaderInput::Naga(naga) => naga,
1092 },
1093 label: desc.label.map(|str| str.to_string()),
1094 id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
1095 })
1096 }
1097 unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {}
1098
1099 unsafe fn create_render_pipeline(
1100 &self,
1101 desc: &crate::RenderPipelineDescriptor<super::Api>,
1102 ) -> Result<super::RenderPipeline, crate::PipelineError> {
1103 let gl = &self.shared.context.lock();
1104 let mut shaders = ArrayVec::new();
1105 shaders.push((naga::ShaderStage::Vertex, &desc.vertex_stage));
1106 if let Some(ref fs) = desc.fragment_stage {
1107 shaders.push((naga::ShaderStage::Fragment, fs));
1108 }
1109 let inner =
1110 unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview) }?;
1111
1112 let (vertex_buffers, vertex_attributes) = {
1113 let mut buffers = Vec::new();
1114 let mut attributes = Vec::new();
1115 for (index, vb_layout) in desc.vertex_buffers.iter().enumerate() {
1116 buffers.push(super::VertexBufferDesc {
1117 step: vb_layout.step_mode,
1118 stride: vb_layout.array_stride as u32,
1119 });
1120 for vat in vb_layout.attributes.iter() {
1121 let format_desc = conv::describe_vertex_format(vat.format);
1122 attributes.push(super::AttributeDesc {
1123 location: vat.shader_location,
1124 offset: vat.offset as u32,
1125 buffer_index: index as u32,
1126 format_desc,
1127 });
1128 }
1129 }
1130 (buffers.into_boxed_slice(), attributes.into_boxed_slice())
1131 };
1132
1133 let color_targets = {
1134 let mut targets = Vec::new();
1135 for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
1136 targets.push(super::ColorTargetDesc {
1137 mask: ct.write_mask,
1138 blend: ct.blend.as_ref().map(conv::map_blend),
1139 });
1140 }
1141 targets.into_boxed_slice()
1144 };
1145
1146 Ok(super::RenderPipeline {
1147 inner,
1148 primitive: desc.primitive,
1149 vertex_buffers,
1150 vertex_attributes,
1151 color_targets,
1152 depth: desc.depth_stencil.as_ref().map(|ds| super::DepthState {
1153 function: conv::map_compare_func(ds.depth_compare),
1154 mask: ds.depth_write_enabled,
1155 }),
1156 depth_bias: desc
1157 .depth_stencil
1158 .as_ref()
1159 .map(|ds| ds.bias)
1160 .unwrap_or_default(),
1161 stencil: desc
1162 .depth_stencil
1163 .as_ref()
1164 .map(|ds| conv::map_stencil(&ds.stencil)),
1165 alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
1166 })
1167 }
1168 unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
1169 let mut program_cache = self.shared.program_cache.lock();
1170 if Arc::strong_count(&pipeline.inner) == 2 {
1175 program_cache.retain(|_, v| match *v {
1176 Ok(ref p) => p.program != pipeline.inner.program,
1177 Err(_) => false,
1178 });
1179 let gl = &self.shared.context.lock();
1180 unsafe { gl.delete_program(pipeline.inner.program) };
1181 }
1182 }
1183
1184 unsafe fn create_compute_pipeline(
1185 &self,
1186 desc: &crate::ComputePipelineDescriptor<super::Api>,
1187 ) -> Result<super::ComputePipeline, crate::PipelineError> {
1188 let gl = &self.shared.context.lock();
1189 let mut shaders = ArrayVec::new();
1190 shaders.push((naga::ShaderStage::Compute, &desc.stage));
1191 let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
1192
1193 Ok(super::ComputePipeline { inner })
1194 }
1195 unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
1196 let mut program_cache = self.shared.program_cache.lock();
1197 if Arc::strong_count(&pipeline.inner) == 2 {
1202 program_cache.retain(|_, v| match *v {
1203 Ok(ref p) => p.program != pipeline.inner.program,
1204 Err(_) => false,
1205 });
1206 let gl = &self.shared.context.lock();
1207 unsafe { gl.delete_program(pipeline.inner.program) };
1208 }
1209 }
1210
1211 #[cfg_attr(target_arch = "wasm32", allow(unused))]
1212 unsafe fn create_query_set(
1213 &self,
1214 desc: &wgt::QuerySetDescriptor<crate::Label>,
1215 ) -> Result<super::QuerySet, crate::DeviceError> {
1216 let gl = &self.shared.context.lock();
1217 let mut temp_string = String::new();
1218
1219 let mut queries = Vec::with_capacity(desc.count as usize);
1220 for i in 0..desc.count {
1221 let query =
1222 unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
1223 #[cfg(not(target_arch = "wasm32"))]
1224 if gl.supports_debug() {
1225 use std::fmt::Write;
1226
1227 match desc.ty {
1229 wgt::QueryType::Timestamp => unsafe {
1230 gl.query_counter(query, glow::TIMESTAMP)
1231 },
1232 _ => (),
1233 }
1234
1235 if let Some(label) = desc.label {
1236 temp_string.clear();
1237 let _ = write!(temp_string, "{label}[{i}]");
1238 let name = unsafe { mem::transmute(query) };
1239 unsafe { gl.object_label(glow::QUERY, name, Some(&temp_string)) };
1240 }
1241 }
1242 queries.push(query);
1243 }
1244
1245 Ok(super::QuerySet {
1246 queries: queries.into_boxed_slice(),
1247 target: match desc.ty {
1248 wgt::QueryType::Occlusion => glow::ANY_SAMPLES_PASSED_CONSERVATIVE,
1249 wgt::QueryType::Timestamp => glow::TIMESTAMP,
1250 _ => unimplemented!(),
1251 },
1252 })
1253 }
1254 unsafe fn destroy_query_set(&self, set: super::QuerySet) {
1255 let gl = &self.shared.context.lock();
1256 for &query in set.queries.iter() {
1257 unsafe { gl.delete_query(query) };
1258 }
1259 }
1260 unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
1261 Ok(super::Fence {
1262 last_completed: 0,
1263 pending: Vec::new(),
1264 })
1265 }
1266 unsafe fn destroy_fence(&self, fence: super::Fence) {
1267 let gl = &self.shared.context.lock();
1268 for (_, sync) in fence.pending {
1269 unsafe { gl.delete_sync(sync) };
1270 }
1271 }
1272 unsafe fn get_fence_value(
1273 &self,
1274 fence: &super::Fence,
1275 ) -> Result<crate::FenceValue, crate::DeviceError> {
1276 #[cfg_attr(target_arch = "wasm32", allow(clippy::needless_borrow))]
1277 Ok(fence.get_latest(&self.shared.context.lock()))
1278 }
1279 unsafe fn wait(
1280 &self,
1281 fence: &super::Fence,
1282 wait_value: crate::FenceValue,
1283 timeout_ms: u32,
1284 ) -> Result<bool, crate::DeviceError> {
1285 if fence.last_completed < wait_value {
1286 let gl = &self.shared.context.lock();
1287 let timeout_ns = if cfg!(target_arch = "wasm32") {
1288 0
1289 } else {
1290 (timeout_ms as u64 * 1_000_000).min(!0u32 as u64)
1291 };
1292 let &(_, sync) = fence
1293 .pending
1294 .iter()
1295 .find(|&&(value, _)| value >= wait_value)
1296 .unwrap();
1297 match unsafe {
1298 gl.client_wait_sync(sync, glow::SYNC_FLUSH_COMMANDS_BIT, timeout_ns as i32)
1299 } {
1300 #[cfg(target_arch = "wasm32")]
1302 glow::WAIT_FAILED => {
1303 log::warn!("wait failed!");
1304 Ok(false)
1305 }
1306 glow::TIMEOUT_EXPIRED => Ok(false),
1307 glow::CONDITION_SATISFIED | glow::ALREADY_SIGNALED => Ok(true),
1308 _ => Err(crate::DeviceError::Lost),
1309 }
1310 } else {
1311 Ok(true)
1312 }
1313 }
1314
1315 unsafe fn start_capture(&self) -> bool {
1316 #[cfg(all(not(target_arch = "wasm32"), feature = "renderdoc"))]
1317 return unsafe {
1318 self.render_doc
1319 .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut())
1320 };
1321 #[allow(unreachable_code)]
1322 false
1323 }
1324 unsafe fn stop_capture(&self) {
1325 #[cfg(all(not(target_arch = "wasm32"), feature = "renderdoc"))]
1326 unsafe {
1327 self.render_doc
1328 .end_frame_capture(ptr::null_mut(), ptr::null_mut())
1329 }
1330 }
1331}
1332
1333#[cfg(all(
1334 target_arch = "wasm32",
1335 feature = "fragile-send-sync-non-atomic-wasm",
1336 not(target_feature = "atomics")
1337))]
1338unsafe impl Sync for super::Device {}
1339#[cfg(all(
1340 target_arch = "wasm32",
1341 feature = "fragile-send-sync-non-atomic-wasm",
1342 not(target_feature = "atomics")
1343))]
1344unsafe impl Send for super::Device {}