1use glam::Vec4Swizzles;
2use rustc_hash::FxHashMap;
3use std::borrow::Cow;
4
5use wgpu::util::align_to;
6
7use crate::defines::{
8 FragmentBlendUniforms, FragmentBlendWithFogUniforms, FragmentCombineUniforms,
9 FragmentFrameUniforms, PipelineConfig, PipelineId, ShaderEntry, TextureData, VertexUniforms,
10 VertexWithFogUniforms,
11};
12use crate::wgpu_program::ShaderVersion;
13use fast3d::output::{
14 gfx::{BlendFactor, BlendOperation, BlendState, CompareFunction, Face},
15 models::{OutputSampler, OutputStencil, OutputTexture},
16 ShaderConfig, ShaderId,
17};
18use fast3d::output::{IntermediateDrawCall, RenderData};
19use fast3d_gbi::defines::WrapMode;
20
21use super::wgpu_program::WgpuProgram;
22
23const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
24
25type BindGroupConfig = (ShaderId, wgpu::BufferAddress, wgpu::BufferAddress);
26type BindGroupBufferConfig = (wgpu::BufferAddress, wgpu::BufferAddress);
27type BindGroupConfigOutput = (
28 Vec<u64>,
29 Vec<BindGroupConfig>,
30 Vec<BindGroupConfig>,
31 Vec<BindGroupBufferConfig>,
32 Vec<Option<BindGroupBufferConfig>>,
33);
34
35pub struct WgpuDrawCall {
36 pub shader_id: ShaderId,
37 pub pipeline_id: PipelineId,
38 pub textures: [Option<usize>; 2],
39
40 pub vertex_buffer_offset: wgpu::BufferAddress,
41 pub vertex_count: usize,
42
43 pub viewport: glam::Vec4,
44 pub scissor: [u32; 4],
45}
46
47impl WgpuDrawCall {
48 fn new(
49 shader_id: ShaderId,
50 pipeline_id: PipelineId,
51 vertex_buffer_offset: wgpu::BufferAddress,
52 vertex_count: usize,
53 viewport: glam::Vec4,
54 scissor: [u32; 4],
55 ) -> Self {
56 Self {
57 shader_id,
58 pipeline_id,
59 textures: [None; 2],
60
61 vertex_buffer_offset,
62 vertex_count,
63
64 viewport,
65 scissor,
66 }
67 }
68}
69
70pub struct WgpuRenderer<'a> {
71 frame_count: i32,
72 current_height: i32,
73 screen_size: [u32; 2],
74
75 texture_cache: Vec<TextureData>,
76 shader_cache: FxHashMap<ShaderId, ShaderEntry<'a>>,
77 pipeline_cache: FxHashMap<PipelineId, wgpu::RenderPipeline>,
78
79 vertex_buffer: wgpu::Buffer,
80 vertex_uniform_buffer: wgpu::Buffer,
81 blend_uniform_buffer: wgpu::Buffer,
82 combine_uniform_buffer: wgpu::Buffer,
83 frame_uniform_buffer: wgpu::Buffer,
84 vertex_uniform_bind_groups: Vec<wgpu::BindGroup>,
85 fragment_uniform_bind_groups: Vec<wgpu::BindGroup>,
86
87 texture_bind_group_layout: wgpu::BindGroupLayout,
88 texture_bind_groups: FxHashMap<usize, wgpu::BindGroup>,
89
90 draw_calls: Vec<WgpuDrawCall>,
91
92 last_pipeline_id: Option<PipelineId>,
93}
94
95impl<'a> WgpuRenderer<'a> {
96 pub fn new(device: &wgpu::Device, screen_size: [u32; 2]) -> Self {
97 let texture_bind_group_layout =
98 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
99 label: None,
100 entries: &[
101 wgpu::BindGroupLayoutEntry {
102 binding: 0,
103 visibility: wgpu::ShaderStages::FRAGMENT,
104 ty: wgpu::BindingType::Texture {
105 sample_type: wgpu::TextureSampleType::Float { filterable: true },
106 view_dimension: wgpu::TextureViewDimension::D2,
107 multisampled: false,
108 },
109 count: None,
110 },
111 wgpu::BindGroupLayoutEntry {
112 binding: 1,
113 visibility: wgpu::ShaderStages::FRAGMENT,
114 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
115 count: None,
116 },
117 ],
118 });
119
120 let vertex_buffer = device.create_buffer(&wgpu::BufferDescriptor {
121 label: Some("Vertex Buffer"),
122 size: 600000, usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
124 mapped_at_creation: false,
125 });
126
127 let vertex_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
128 label: Some("Vertex Uniform Buffer"),
129 size: 400000, usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
131 mapped_at_creation: false,
132 });
133
134 let blend_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
135 label: Some("Blend Uniform Buffer"),
136 size: 400000, usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
138 mapped_at_creation: false,
139 });
140
141 let combine_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
142 label: Some("Combine Uniform Buffer"),
143 size: 400000, usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
145 mapped_at_creation: false,
146 });
147
148 let frame_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
149 label: Some("Frame Uniform Buffer"),
150 size: 100000, usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
152 mapped_at_creation: false,
153 });
154
155 Self {
156 frame_count: 0,
157 current_height: 0,
158 screen_size,
159
160 texture_cache: Vec::new(),
161 shader_cache: FxHashMap::default(),
162 pipeline_cache: FxHashMap::default(),
163
164 vertex_buffer,
165 vertex_uniform_buffer,
166 blend_uniform_buffer,
167 combine_uniform_buffer,
168 frame_uniform_buffer,
169 vertex_uniform_bind_groups: Vec::new(),
170 fragment_uniform_bind_groups: Vec::new(),
171
172 texture_bind_group_layout,
173 texture_bind_groups: FxHashMap::default(),
174
175 draw_calls: Vec::new(),
176
177 last_pipeline_id: None,
178 }
179 }
180
181 fn clear_state(&mut self) {
182 self.draw_calls.clear();
183 self.last_pipeline_id = None;
184 self.vertex_uniform_bind_groups.clear();
185 self.fragment_uniform_bind_groups.clear();
186 }
187
188 pub fn process_rcp_output(
189 &mut self,
190 device: &wgpu::Device,
191 queue: &wgpu::Queue,
192 surface_format: wgpu::TextureFormat,
193 render_data: &mut RenderData,
194 ) {
195 self.clear_state();
196
197 let usable_draw_calls = &render_data.draw_calls[0..render_data.draw_calls.len() - 1];
198
199 usable_draw_calls.iter().for_each(|draw_call| {
201 self.prepare_shader(device, &draw_call.shader_id, &draw_call.shader_config);
202 });
203
204 let (
206 vertex_buffer_offsets,
207 vertex_uniform_buffer_configs,
208 blend_uniform_buffer_configs,
209 combine_uniform_buffer_configs,
210 frame_uniform_buffer_configs,
211 ) = self.configure_buffers(queue, usable_draw_calls);
212
213 self.configure_uniform_bind_groups(
215 device,
216 &vertex_uniform_buffer_configs,
217 &blend_uniform_buffer_configs,
218 &combine_uniform_buffer_configs,
219 &frame_uniform_buffer_configs,
220 );
221
222 for (index, draw_call) in usable_draw_calls.iter().enumerate() {
224 assert!(!draw_call.vbo.vbo.is_empty());
225
226 let pipeline_config = PipelineConfig {
228 shader: draw_call.shader_id,
229 blend_state: draw_call.blend_state,
230 cull_mode: draw_call.cull_mode,
231 depth_stencil: draw_call.stencil,
232 };
233 let pipeline_id = PipelineId(pipeline_config);
234
235 self.configure_pipeline(
236 device,
237 surface_format,
238 &draw_call.shader_id,
239 pipeline_id,
240 draw_call.blend_state,
241 draw_call.cull_mode,
242 draw_call.stencil,
243 );
244
245 let mut wgpu_draw_call = WgpuDrawCall::new(
247 draw_call.shader_id,
248 pipeline_id,
249 vertex_buffer_offsets[index],
250 draw_call.vbo.num_tris * 3,
251 draw_call.viewport,
252 draw_call.scissor,
253 );
254
255 for (index, tex_cache_id) in draw_call.texture_indices.iter().enumerate() {
257 if let Some(tex_cache_id) = tex_cache_id {
258 let sampler = draw_call.samplers[index];
259 assert!(sampler.is_some());
260
261 let texture = render_data.texture_cache.get_mut(*tex_cache_id).unwrap();
262 let sampler = sampler.unwrap();
263
264 self.configure_textures(
265 device,
266 queue,
267 index,
268 texture,
269 &sampler,
270 &mut wgpu_draw_call,
271 );
272 }
273 }
274
275 self.draw_calls.push(wgpu_draw_call);
276 }
277 }
278
279 pub fn draw<'r>(&'r mut self, rpass: &mut wgpu::RenderPass<'r>) {
280 for (index, draw_call) in self.draw_calls.iter().enumerate() {
281 if self.last_pipeline_id != Some(draw_call.pipeline_id) {
282 let pipeline = self.pipeline_cache.get(&draw_call.pipeline_id).unwrap();
283 rpass.set_pipeline(pipeline);
284 self.last_pipeline_id = Some(draw_call.pipeline_id);
285 }
286
287 let vertex_uniform_bind_group = self.vertex_uniform_bind_groups.get(index).unwrap();
288 rpass.set_bind_group(0, vertex_uniform_bind_group, &[]);
289 let fragment_uniform_bind_group = self.fragment_uniform_bind_groups.get(index).unwrap();
290 rpass.set_bind_group(1, fragment_uniform_bind_group, &[]);
291
292 for i in 0..2 {
293 if let Some(texture_id) = draw_call.textures[i] {
294 let texture_bind_group = self
295 .texture_bind_groups
296 .get(&texture_id)
297 .expect("Texture bind group not found");
298
299 rpass.set_bind_group(2 + i as u32, texture_bind_group, &[]);
300 }
301 }
302
303 if index < self.draw_calls.len() - 1 {
306 let next_draw_call = &self.draw_calls[index + 1];
307 rpass.set_vertex_buffer(
308 0,
309 self.vertex_buffer
310 .slice(draw_call.vertex_buffer_offset..next_draw_call.vertex_buffer_offset),
311 );
312 } else {
313 rpass.set_vertex_buffer(
314 0,
315 self.vertex_buffer.slice(draw_call.vertex_buffer_offset..),
316 );
317 }
318
319 rpass.set_viewport(
320 draw_call.viewport.x,
321 self.screen_size[1] as f32 - draw_call.viewport.y - draw_call.viewport.w,
322 draw_call.viewport.z,
323 draw_call.viewport.w,
324 0.0,
325 1.0,
326 );
327
328 rpass.set_scissor_rect(
330 draw_call.scissor[0],
331 self.screen_size[1] - draw_call.scissor[1] - draw_call.scissor[3],
332 draw_call.scissor[2],
333 draw_call.scissor[3],
334 );
335
336 rpass.draw(0..draw_call.vertex_count as u32, 0..1);
337 }
338 }
339
340 pub fn resize(&mut self, screen_size: [u32; 2]) {
341 self.screen_size = screen_size;
342 }
343
344 pub fn update_frame_count(&mut self) {
345 self.frame_count += 1;
346 }
347
348 fn configure_textures(
351 &mut self,
352 device: &wgpu::Device,
353 queue: &wgpu::Queue,
354 tile: usize,
355 texture: &mut OutputTexture,
356 sampler: &OutputSampler,
357 output_draw_call: &mut WgpuDrawCall,
358 ) {
359 if let Some(texture_id) = texture.device_id {
361 output_draw_call.textures[tile] = Some(texture_id as usize);
362 return;
363 }
364
365 let texture_extent = wgpu::Extent3d {
367 width: texture.width,
368 height: texture.height,
369 depth_or_array_layers: 1,
370 };
371
372 let device_texture = device.create_texture(&wgpu::TextureDescriptor {
373 label: None,
374 size: texture_extent,
375 mip_level_count: 1,
376 sample_count: 1,
377 dimension: wgpu::TextureDimension::D2,
378 format: wgpu::TextureFormat::Rgba8Unorm,
379 usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
380 view_formats: &[],
381 });
382
383 let bytes_per_pixel = 4;
385 let bytes_per_row = bytes_per_pixel * texture.width;
386
387 queue.write_texture(
388 device_texture.as_image_copy(),
389 &texture.data,
390 wgpu::ImageDataLayout {
391 offset: 0,
392 bytes_per_row: Some(bytes_per_row),
393 rows_per_image: None,
394 },
395 texture_extent,
396 );
397
398 let texture_view = device_texture.create_view(&wgpu::TextureViewDescriptor::default());
400
401 output_draw_call.textures[tile] = Some(self.texture_cache.len());
402 texture.device_id = Some(self.texture_cache.len() as u32);
403
404 let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
406 label: Some("Texture Sampler"),
407 address_mode_u: clamp_to_wgpu(sampler.clamp_s),
408 address_mode_v: clamp_to_wgpu(sampler.clamp_t),
409 address_mode_w: wgpu::AddressMode::Repeat,
410 mag_filter: if sampler.linear_filter {
411 wgpu::FilterMode::Linear
412 } else {
413 wgpu::FilterMode::Nearest
414 },
415 min_filter: if sampler.linear_filter {
416 wgpu::FilterMode::Linear
417 } else {
418 wgpu::FilterMode::Nearest
419 },
420 ..Default::default()
421 });
422
423 let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
425 label: Some("Texture Bind Group"),
426 layout: &self.texture_bind_group_layout,
427 entries: &[
428 wgpu::BindGroupEntry {
429 binding: 0,
430 resource: wgpu::BindingResource::TextureView(&texture_view),
431 },
432 wgpu::BindGroupEntry {
433 binding: 1,
434 resource: wgpu::BindingResource::Sampler(&sampler),
435 },
436 ],
437 });
438
439 self.texture_bind_groups
440 .insert(self.texture_cache.len(), bind_group);
441 self.texture_cache
442 .push(TextureData::new(texture_view, sampler));
443 }
444
445 fn prepare_shader(
446 &mut self,
447 device: &wgpu::Device,
448 shader_id: &ShaderId,
449 shader_config: &ShaderConfig,
450 ) {
451 if self.shader_cache.contains_key(shader_id) {
453 return;
454 }
455
456 let mut program = WgpuProgram::new(shader_config);
458 program.init();
459 program.preprocess(&ShaderVersion::GLSL440);
460
461 program.compiled_vertex_program =
462 Some(device.create_shader_module(wgpu::ShaderModuleDescriptor {
463 label: None,
464 source: wgpu::ShaderSource::Glsl {
465 shader: Cow::Borrowed(&program.preprocessed_vertex),
466 stage: naga::ShaderStage::Vertex,
467 defines: program.defines.clone(),
468 },
469 }));
470
471 program.compiled_fragment_program =
472 Some(device.create_shader_module(wgpu::ShaderModuleDescriptor {
473 label: None,
474 source: wgpu::ShaderSource::Glsl {
475 shader: Cow::Borrowed(&program.preprocessed_frag),
476 stage: naga::ShaderStage::Fragment,
477 defines: program.defines.clone(),
478 },
479 }));
480
481 self.shader_cache
483 .insert(*shader_id, ShaderEntry::new(program, device));
484 }
485
486 fn configure_buffers(
487 &mut self,
488 queue: &wgpu::Queue,
489 draw_calls: &[IntermediateDrawCall],
490 ) -> BindGroupConfigOutput {
491 let mut current_vbo_offset = 0;
492 let mut vertex_buffer_content: Vec<u8> = Vec::new();
493 let mut vertex_buffer_offsets: Vec<u64> = Vec::new();
494
495 let mut current_vertex_uniform_offset = 0;
496 let mut vertex_uniform_buffer_content: Vec<u8> = Vec::new();
497 let mut vertex_uniform_buffer_configs: Vec<BindGroupConfig> = Vec::new();
498
499 let mut current_blend_uniform_offset = 0;
500 let mut blend_uniform_buffer_content: Vec<u8> = Vec::new();
501 let mut blend_uniform_buffer_configs: Vec<BindGroupConfig> = Vec::new();
502
503 let mut current_combine_uniform_offset = 0;
504 let mut combine_uniform_buffer_content: Vec<u8> = Vec::new();
505 let mut combine_uniform_buffer_configs: Vec<BindGroupBufferConfig> = Vec::new();
506
507 let mut current_frame_uniform_offset = 0;
508 let mut frame_uniform_buffer_content: Vec<u8> = Vec::new();
509 let mut frame_uniform_buffer_configs: Vec<Option<BindGroupBufferConfig>> = Vec::new();
510
511 for draw_call in draw_calls {
512 let shader_entry = self.shader_cache.get(&draw_call.shader_id).unwrap();
513
514 vertex_buffer_content.extend_from_slice(&draw_call.vbo.vbo);
516 vertex_buffer_offsets.push(current_vbo_offset);
517 current_vbo_offset += draw_call.vbo.vbo.len() as u64;
518
519 {
521 if shader_entry.program.uses_fog() {
522 let uniform = VertexWithFogUniforms::new(
523 [self.screen_size[0] as f32, self.screen_size[1] as f32],
524 draw_call.projection_matrix.to_cols_array_2d(),
525 draw_call.fog.multiplier as f32,
526 draw_call.fog.offset as f32,
527 );
528
529 let uniform_size = align_to(
530 std::mem::size_of::<VertexWithFogUniforms>() as wgpu::BufferAddress,
531 256,
532 );
533
534 vertex_uniform_buffer_content.extend_from_slice(bytemuck::bytes_of(&uniform));
535 vertex_uniform_buffer_content.extend_from_slice(&vec![
537 0;
538 uniform_size as usize
539 - std::mem::size_of::<
540 VertexWithFogUniforms,
541 >(
542 )
543 ]);
544
545 vertex_uniform_buffer_configs.push((
546 draw_call.shader_id,
547 current_vertex_uniform_offset,
548 uniform_size,
549 ));
550 current_vertex_uniform_offset += uniform_size;
551 } else {
552 let uniform = VertexUniforms::new(
553 [self.screen_size[0] as f32, self.screen_size[1] as f32],
554 draw_call.projection_matrix.to_cols_array_2d(),
555 );
556
557 let uniform_size = align_to(
558 std::mem::size_of::<VertexUniforms>() as wgpu::BufferAddress,
559 256,
560 );
561
562 vertex_uniform_buffer_content.extend_from_slice(bytemuck::bytes_of(&uniform));
563 vertex_uniform_buffer_content.extend_from_slice(&vec![
565 0;
566 uniform_size as usize
567 - std::mem::size_of::<
568 VertexUniforms,
569 >(
570 )
571 ]);
572
573 vertex_uniform_buffer_configs.push((
574 draw_call.shader_id,
575 current_vertex_uniform_offset,
576 uniform_size,
577 ));
578 current_vertex_uniform_offset += uniform_size;
579 }
580 }
581
582 {
584 if shader_entry.program.uses_fog() {
585 let uniform = FragmentBlendWithFogUniforms::new(
586 draw_call.uniforms.blend.blend_color.to_array(),
587 draw_call.uniforms.blend.fog_color.xyz().to_array(),
588 );
589
590 let uniform_size = align_to(
591 std::mem::size_of::<FragmentBlendWithFogUniforms>() as wgpu::BufferAddress,
592 256,
593 );
594
595 blend_uniform_buffer_content.extend_from_slice(bytemuck::bytes_of(&uniform));
596 blend_uniform_buffer_content.extend_from_slice(&vec![
598 0;
599 uniform_size as usize
600 - std::mem::size_of::<
601 FragmentBlendWithFogUniforms,
602 >(
603 )
604 ]);
605
606 blend_uniform_buffer_configs.push((
607 draw_call.shader_id,
608 current_blend_uniform_offset,
609 uniform_size,
610 ));
611 current_blend_uniform_offset += uniform_size;
612 } else {
613 let uniform = FragmentBlendUniforms {
614 blend_color: draw_call.uniforms.blend.blend_color.to_array(),
615 };
616
617 let uniform_size = align_to(
618 std::mem::size_of::<FragmentBlendUniforms>() as wgpu::BufferAddress,
619 256,
620 );
621
622 blend_uniform_buffer_content.extend_from_slice(bytemuck::bytes_of(&uniform));
623 blend_uniform_buffer_content.extend_from_slice(&vec![
625 0;
626 uniform_size as usize
627 - std::mem::size_of::<
628 FragmentBlendUniforms,
629 >(
630 )
631 ]);
632
633 blend_uniform_buffer_configs.push((
634 draw_call.shader_id,
635 current_blend_uniform_offset,
636 uniform_size,
637 ));
638 current_blend_uniform_offset += uniform_size;
639 }
640 }
641
642 {
644 let uniform = FragmentCombineUniforms::new(
645 draw_call.uniforms.combine.prim_color.to_array(),
646 draw_call.uniforms.combine.env_color.to_array(),
647 draw_call.uniforms.combine.key_center.to_array(),
648 draw_call.uniforms.combine.key_scale.to_array(),
649 draw_call.uniforms.combine.prim_lod.x,
650 draw_call.uniforms.combine.convert_k4,
651 draw_call.uniforms.combine.convert_k5,
652 );
653
654 let uniform_size = align_to(
655 std::mem::size_of::<FragmentCombineUniforms>() as wgpu::BufferAddress,
656 256,
657 );
658
659 combine_uniform_buffer_content.extend_from_slice(bytemuck::bytes_of(&uniform));
660 combine_uniform_buffer_content.extend_from_slice(&vec![
662 0;
663 uniform_size as usize
664 - std::mem::size_of::<
665 FragmentCombineUniforms,
666 >(
667 )
668 ]);
669
670 combine_uniform_buffer_configs.push((current_combine_uniform_offset, uniform_size));
671 current_combine_uniform_offset += uniform_size;
672 }
673
674 {
676 if shader_entry.program.uses_alpha()
677 && shader_entry.program.uses_alpha_compare_dither()
678 {
679 let uniform = FragmentFrameUniforms {
680 count: self.frame_count as u32,
681 height: self.current_height as u32,
682 };
683
684 let uniform_size = align_to(
685 std::mem::size_of::<FragmentFrameUniforms>() as wgpu::BufferAddress,
686 256,
687 );
688
689 frame_uniform_buffer_content.extend_from_slice(bytemuck::bytes_of(&uniform));
690 frame_uniform_buffer_content.extend_from_slice(&vec![
692 0;
693 uniform_size as usize
694 - std::mem::size_of::<
695 FragmentFrameUniforms,
696 >(
697 )
698 ]);
699
700 frame_uniform_buffer_configs
701 .push(Some((current_frame_uniform_offset, uniform_size)));
702 current_frame_uniform_offset += uniform_size;
703 } else {
704 frame_uniform_buffer_configs.push(None);
705 }
706 }
707 }
708
709 queue.write_buffer(&self.vertex_buffer, 0, &vertex_buffer_content);
710 queue.write_buffer(
711 &self.vertex_uniform_buffer,
712 0,
713 &vertex_uniform_buffer_content,
714 );
715 queue.write_buffer(&self.blend_uniform_buffer, 0, &blend_uniform_buffer_content);
716 queue.write_buffer(
717 &self.combine_uniform_buffer,
718 0,
719 &combine_uniform_buffer_content,
720 );
721 queue.write_buffer(&self.frame_uniform_buffer, 0, &frame_uniform_buffer_content);
722
723 (
724 vertex_buffer_offsets,
725 vertex_uniform_buffer_configs,
726 blend_uniform_buffer_configs,
727 combine_uniform_buffer_configs,
728 frame_uniform_buffer_configs,
729 )
730 }
731
732 fn configure_uniform_bind_groups(
733 &mut self,
734 device: &wgpu::Device,
735 vertex_uniform_buffer_configs: &[BindGroupConfig],
736 blend_uniform_buffer_configs: &[BindGroupConfig],
737 combine_uniform_buffer_configs: &[BindGroupBufferConfig],
738 frame_uniform_buffer_configs: &[Option<BindGroupBufferConfig>],
739 ) {
740 for (shader_id, offset, size) in vertex_uniform_buffer_configs {
742 let shader_entry = self.shader_cache.get(shader_id).unwrap();
743
744 let vertex_uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
745 label: Some("Vertex Uniform Bind Group"),
746 layout: &shader_entry.vertex_uniform_bind_group_layout,
747 entries: &[wgpu::BindGroupEntry {
748 binding: 0,
749 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
750 buffer: &self.vertex_uniform_buffer,
751 offset: *offset,
752 size: wgpu::BufferSize::new(*size),
753 }),
754 }],
755 });
756
757 self.vertex_uniform_bind_groups
758 .push(vertex_uniform_bind_group);
759 }
760
761 for (
763 ((shader_id, blend_offset, blend_size), (combine_offset, combine_size)),
764 frame_option,
765 ) in blend_uniform_buffer_configs
766 .iter()
767 .zip(combine_uniform_buffer_configs.iter())
768 .zip(frame_uniform_buffer_configs.iter())
769 {
770 let shader_entry = self.shader_cache.get(shader_id).unwrap();
771
772 let mut bind_group_entries = vec![
773 wgpu::BindGroupEntry {
774 binding: 0,
775 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
776 buffer: &self.blend_uniform_buffer,
777 offset: *blend_offset,
778 size: wgpu::BufferSize::new(*blend_size),
779 }),
780 },
781 wgpu::BindGroupEntry {
782 binding: 1,
783 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
784 buffer: &self.combine_uniform_buffer,
785 offset: *combine_offset,
786 size: wgpu::BufferSize::new(*combine_size),
787 }),
788 },
789 ];
790
791 if let Some((frame_offset, frame_size)) = frame_option {
792 bind_group_entries.push(wgpu::BindGroupEntry {
793 binding: 2,
794 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
795 buffer: &self.frame_uniform_buffer,
796 offset: *frame_offset,
797 size: wgpu::BufferSize::new(*frame_size),
798 }),
799 });
800 }
801
802 let fragment_uniform_bind_group =
803 device.create_bind_group(&wgpu::BindGroupDescriptor {
804 label: Some("Fragment Uniform Bind Group"),
805 layout: &shader_entry.fragment_uniform_bind_group_layout,
806 entries: &bind_group_entries,
807 });
808
809 self.fragment_uniform_bind_groups
810 .push(fragment_uniform_bind_group);
811 }
812 }
813
814 #[allow(clippy::too_many_arguments)]
815 fn configure_pipeline(
816 &mut self,
817 device: &wgpu::Device,
818 surface_texture_format: wgpu::TextureFormat,
819 shader_id: &ShaderId,
820 pipeline_id: PipelineId,
821 blend_state: Option<BlendState>,
822 cull_mode: Option<Face>,
823 depth_stencil: Option<OutputStencil>,
824 ) {
825 let program = self.shader_cache.get(shader_id).unwrap();
827
828 if self.pipeline_cache.contains_key(&pipeline_id) {
830 return;
831 }
832
833 let mut bind_group_layout_entries = vec![
835 &program.vertex_uniform_bind_group_layout,
836 &program.fragment_uniform_bind_group_layout,
837 ];
838
839 if program.program.uses_texture_0() {
840 bind_group_layout_entries.push(&self.texture_bind_group_layout);
841 }
842
843 if program.program.uses_texture_1() {
844 bind_group_layout_entries.push(&self.texture_bind_group_layout);
845 }
846
847 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
848 label: Some("Pipeline Layout"),
849 bind_group_layouts: &bind_group_layout_entries,
850 push_constant_ranges: &[],
851 });
852
853 let color_target_states = wgpu::ColorTargetState {
855 format: surface_texture_format,
856 blend: blend_state_to_wgpu(blend_state),
857 write_mask: wgpu::ColorWrites::ALL,
858 };
859
860 let depth_stencil = depth_stencil.map(|ds| wgpu::DepthStencilState {
862 format: DEPTH_FORMAT,
863 depth_write_enabled: ds.depth_write_enabled,
864 depth_compare: compare_function_to_wgpu(ds.depth_compare),
865 stencil: wgpu::StencilState::default(),
866 bias: wgpu::DepthBiasState {
867 constant: 0,
868 slope_scale: if ds.polygon_offset { -2.0 } else { 0.0 },
869 clamp: 0.0,
870 },
871 });
872
873 let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
875 label: Some("Render Pipeline"),
876 layout: Some(&pipeline_layout),
877 vertex: wgpu::VertexState {
878 module: program.program.compiled_vertex_program.as_ref().unwrap(),
879 entry_point: "main",
880 buffers: &[program.vertex_buf_layout.clone()],
881 },
882 fragment: Some(wgpu::FragmentState {
883 module: program.program.compiled_fragment_program.as_ref().unwrap(),
884 entry_point: "main",
885 targets: &[Some(color_target_states)],
886 }),
887 primitive: wgpu::PrimitiveState {
888 cull_mode: face_to_wgpu(cull_mode),
889 ..Default::default()
890 },
891 depth_stencil,
892 multisample: wgpu::MultisampleState::default(),
893 multiview: None,
894 });
895
896 self.pipeline_cache.insert(pipeline_id, pipeline);
897 }
898}
899
900fn clamp_to_wgpu(clamp: WrapMode) -> wgpu::AddressMode {
901 if clamp == WrapMode::Clamp {
902 return wgpu::AddressMode::ClampToEdge;
903 } else if clamp == WrapMode::MirrorRepeat {
904 return wgpu::AddressMode::MirrorRepeat;
905 }
906
907 wgpu::AddressMode::Repeat
908}
909
910fn face_to_wgpu(face: Option<Face>) -> Option<wgpu::Face> {
911 face.map(|face| match face {
912 Face::Front => wgpu::Face::Front,
913 Face::Back => wgpu::Face::Back,
914 })
915}
916
917fn compare_function_to_wgpu(func: CompareFunction) -> wgpu::CompareFunction {
918 match func {
919 CompareFunction::Never => wgpu::CompareFunction::Never,
920 CompareFunction::Less => wgpu::CompareFunction::Less,
921 CompareFunction::Equal => wgpu::CompareFunction::Equal,
922 CompareFunction::LessEqual => wgpu::CompareFunction::LessEqual,
923 CompareFunction::Greater => wgpu::CompareFunction::Greater,
924 CompareFunction::NotEqual => wgpu::CompareFunction::NotEqual,
925 CompareFunction::GreaterEqual => wgpu::CompareFunction::GreaterEqual,
926 CompareFunction::Always => wgpu::CompareFunction::Always,
927 }
928}
929
930fn blend_state_to_wgpu(state: Option<BlendState>) -> Option<wgpu::BlendState> {
931 state.map(|state| wgpu::BlendState {
932 color: wgpu::BlendComponent {
933 src_factor: blend_factor_to_wgpu(state.color.src_factor),
934 dst_factor: blend_factor_to_wgpu(state.color.dst_factor),
935 operation: blend_op_to_wgpu(state.color.operation),
936 },
937 alpha: wgpu::BlendComponent {
938 src_factor: blend_factor_to_wgpu(state.alpha.src_factor),
939 dst_factor: blend_factor_to_wgpu(state.alpha.dst_factor),
940 operation: blend_op_to_wgpu(state.alpha.operation),
941 },
942 })
943}
944
945fn blend_factor_to_wgpu(factor: BlendFactor) -> wgpu::BlendFactor {
946 match factor {
947 BlendFactor::Zero => wgpu::BlendFactor::Zero,
948 BlendFactor::One => wgpu::BlendFactor::One,
949 BlendFactor::Src => wgpu::BlendFactor::Src,
950 BlendFactor::OneMinusSrc => wgpu::BlendFactor::OneMinusSrc,
951 BlendFactor::SrcAlpha => wgpu::BlendFactor::SrcAlpha,
952 BlendFactor::OneMinusSrcAlpha => wgpu::BlendFactor::OneMinusSrcAlpha,
953 BlendFactor::Dst => wgpu::BlendFactor::Dst,
954 BlendFactor::OneMinusDst => wgpu::BlendFactor::OneMinusDst,
955 BlendFactor::DstAlpha => wgpu::BlendFactor::DstAlpha,
956 BlendFactor::OneMinusDstAlpha => wgpu::BlendFactor::OneMinusDstAlpha,
957 BlendFactor::SrcAlphaSaturated => wgpu::BlendFactor::SrcAlphaSaturated,
958 BlendFactor::Constant => wgpu::BlendFactor::Constant,
959 BlendFactor::OneMinusConstant => wgpu::BlendFactor::OneMinusConstant,
960 }
961}
962
963fn blend_op_to_wgpu(op: BlendOperation) -> wgpu::BlendOperation {
964 match op {
965 BlendOperation::Add => wgpu::BlendOperation::Add,
966 BlendOperation::Subtract => wgpu::BlendOperation::Subtract,
967 BlendOperation::ReverseSubtract => wgpu::BlendOperation::ReverseSubtract,
968 BlendOperation::Min => wgpu::BlendOperation::Min,
969 BlendOperation::Max => wgpu::BlendOperation::Max,
970 }
971}