1use crate::context::GraphicsContext;
2use crate::types::{GpuTexture, TypedBuffer, UniformBuffer};
3use std::sync::Arc;
4
5pub struct Renderer {
10 context: Arc<GraphicsContext>,
11}
12
13impl Renderer {
14 pub fn new(context: Arc<GraphicsContext>) -> Self {
16 Self { context }
17 }
18
19 pub fn context(&self) -> &GraphicsContext {
21 &self.context
22 }
23
24 pub fn device(&self) -> &wgpu::Device {
26 &self.context.device
27 }
28
29 pub fn queue(&self) -> &wgpu::Queue {
31 &self.context.queue
32 }
33
34 pub fn create_shader(&self, label: Option<&str>, source: &str) -> wgpu::ShaderModule {
36 self.context
37 .device
38 .create_shader_module(wgpu::ShaderModuleDescriptor {
39 label,
40 source: wgpu::ShaderSource::Wgsl(source.into()),
41 })
42 }
43
44 pub fn create_vertex_buffer<T: bytemuck::Pod>(
46 &self,
47 label: Option<&str>,
48 data: &[T],
49 ) -> wgpu::Buffer {
50 let buffer = self.context.device.create_buffer(&wgpu::BufferDescriptor {
51 label,
52 size: std::mem::size_of_val(data) as u64,
53 usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
54 mapped_at_creation: false,
55 });
56
57 self.context
58 .queue
59 .write_buffer(&buffer, 0, bytemuck::cast_slice(data));
60
61 buffer
62 }
63
64 pub fn create_index_buffer<T: bytemuck::Pod>(
66 &self,
67 label: Option<&str>,
68 data: &[T],
69 ) -> wgpu::Buffer {
70 let buffer = self.context.device.create_buffer(&wgpu::BufferDescriptor {
71 label,
72 size: std::mem::size_of_val(data) as u64,
73 usage: wgpu::BufferUsages::INDEX | wgpu::BufferUsages::COPY_DST,
74 mapped_at_creation: false,
75 });
76
77 self.context
78 .queue
79 .write_buffer(&buffer, 0, bytemuck::cast_slice(data));
80
81 buffer
82 }
83
84 pub fn create_uniform_buffer<T: bytemuck::Pod>(
86 &self,
87 label: Option<&str>,
88 data: &T,
89 ) -> wgpu::Buffer {
90 let buffer = self.context.device.create_buffer(&wgpu::BufferDescriptor {
91 label,
92 size: std::mem::size_of::<T>() as u64,
93 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
94 mapped_at_creation: false,
95 });
96
97 self.context.queue.write_buffer(
98 &buffer,
99 0,
100 bytemuck::cast_slice(std::slice::from_ref(data)),
101 );
102
103 buffer
104 }
105
106 pub fn update_uniform_buffer<T: bytemuck::Pod>(&self, buffer: &wgpu::Buffer, data: &T) {
108 self.context.queue.write_buffer(
109 buffer,
110 0,
111 bytemuck::cast_slice(std::slice::from_ref(data)),
112 );
113 }
114
115 pub fn create_storage_buffer(
124 &self,
125 label: Option<&str>,
126 size: u64,
127 read_only: bool,
128 ) -> wgpu::Buffer {
129 let usage = if read_only {
130 wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST
131 } else {
132 wgpu::BufferUsages::STORAGE
133 | wgpu::BufferUsages::COPY_DST
134 | wgpu::BufferUsages::COPY_SRC
135 };
136
137 self.context.device.create_buffer(&wgpu::BufferDescriptor {
138 label,
139 size,
140 usage,
141 mapped_at_creation: false,
142 })
143 }
144
145 pub fn create_storage_buffer_init<T: bytemuck::Pod>(
154 &self,
155 label: Option<&str>,
156 data: &[T],
157 read_only: bool,
158 ) -> wgpu::Buffer {
159 let usage = if read_only {
160 wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST
161 } else {
162 wgpu::BufferUsages::STORAGE
163 | wgpu::BufferUsages::COPY_DST
164 | wgpu::BufferUsages::COPY_SRC
165 };
166
167 let buffer = self.context.device.create_buffer(&wgpu::BufferDescriptor {
168 label,
169 size: std::mem::size_of_val(data) as u64,
170 usage,
171 mapped_at_creation: false,
172 });
173
174 self.context
175 .queue
176 .write_buffer(&buffer, 0, bytemuck::cast_slice(data));
177
178 buffer
179 }
180
181 pub fn update_storage_buffer<T: bytemuck::Pod>(
189 &self,
190 buffer: &wgpu::Buffer,
191 offset: u64,
192 data: &[T],
193 ) {
194 self.context
195 .queue
196 .write_buffer(buffer, offset, bytemuck::cast_slice(data));
197 }
198
199 pub fn create_texture(&self, descriptor: &wgpu::TextureDescriptor) -> wgpu::Texture {
201 self.context.device.create_texture(descriptor)
202 }
203
204 pub fn create_texture_2d(
206 &self,
207 label: Option<&str>,
208 width: u32,
209 height: u32,
210 format: wgpu::TextureFormat,
211 usage: wgpu::TextureUsages,
212 data: &[u8],
213 ) -> wgpu::Texture {
214 let size = wgpu::Extent3d {
215 width,
216 height,
217 depth_or_array_layers: 1,
218 };
219
220 let texture = self
221 .context
222 .device
223 .create_texture(&wgpu::TextureDescriptor {
224 label,
225 size,
226 mip_level_count: 1,
227 sample_count: 1,
228 dimension: wgpu::TextureDimension::D2,
229 format,
230 usage: usage | wgpu::TextureUsages::COPY_DST,
231 view_formats: &[],
232 });
233
234 let bytes_per_pixel = format.block_copy_size(None).unwrap();
235
236 self.context.queue.write_texture(
237 wgpu::TexelCopyTextureInfo {
238 texture: &texture,
239 mip_level: 0,
240 origin: wgpu::Origin3d::ZERO,
241 aspect: wgpu::TextureAspect::All,
242 },
243 data,
244 wgpu::TexelCopyBufferLayout {
245 offset: 0,
246 bytes_per_row: Some(width * bytes_per_pixel),
247 rows_per_image: Some(height),
248 },
249 size,
250 );
251
252 texture
253 }
254
255 pub fn create_sampler(&self, descriptor: &wgpu::SamplerDescriptor) -> wgpu::Sampler {
257 self.context.device.create_sampler(descriptor)
258 }
259
260 pub fn create_linear_sampler(&self, label: Option<&str>) -> wgpu::Sampler {
262 self.context
263 .device
264 .create_sampler(&wgpu::SamplerDescriptor {
265 label,
266 address_mode_u: wgpu::AddressMode::ClampToEdge,
267 address_mode_v: wgpu::AddressMode::ClampToEdge,
268 address_mode_w: wgpu::AddressMode::ClampToEdge,
269 mag_filter: wgpu::FilterMode::Linear,
270 min_filter: wgpu::FilterMode::Linear,
271 mipmap_filter: wgpu::FilterMode::Nearest,
272 ..Default::default()
273 })
274 }
275
276 pub fn create_nearest_sampler(&self, label: Option<&str>) -> wgpu::Sampler {
278 self.context
279 .device
280 .create_sampler(&wgpu::SamplerDescriptor {
281 label,
282 address_mode_u: wgpu::AddressMode::ClampToEdge,
283 address_mode_v: wgpu::AddressMode::ClampToEdge,
284 address_mode_w: wgpu::AddressMode::ClampToEdge,
285 mag_filter: wgpu::FilterMode::Nearest,
286 min_filter: wgpu::FilterMode::Nearest,
287 mipmap_filter: wgpu::FilterMode::Nearest,
288 ..Default::default()
289 })
290 }
291
292 pub fn create_bind_group_layout(
294 &self,
295 label: Option<&str>,
296 entries: &[wgpu::BindGroupLayoutEntry],
297 ) -> wgpu::BindGroupLayout {
298 self.context
299 .device
300 .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { label, entries })
301 }
302
303 pub fn create_bind_group(
305 &self,
306 label: Option<&str>,
307 layout: &wgpu::BindGroupLayout,
308 entries: &[wgpu::BindGroupEntry],
309 ) -> wgpu::BindGroup {
310 self.context
311 .device
312 .create_bind_group(&wgpu::BindGroupDescriptor {
313 label,
314 layout,
315 entries,
316 })
317 }
318
319 pub fn create_pipeline_layout(
321 &self,
322 label: Option<&str>,
323 bind_group_layouts: &[&wgpu::BindGroupLayout],
324 push_constant_ranges: &[wgpu::PushConstantRange],
325 ) -> wgpu::PipelineLayout {
326 self.context
327 .device
328 .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
329 label,
330 bind_group_layouts,
331 push_constant_ranges,
332 })
333 }
334
335 pub fn create_render_pipeline(
337 &self,
338 descriptor: &wgpu::RenderPipelineDescriptor,
339 ) -> wgpu::RenderPipeline {
340 self.context.device.create_render_pipeline(descriptor)
341 }
342
343 pub fn create_compute_pipeline(
345 &self,
346 descriptor: &wgpu::ComputePipelineDescriptor,
347 ) -> wgpu::ComputePipeline {
348 self.context.device.create_compute_pipeline(descriptor)
349 }
350
351 pub fn create_command_encoder(&self, label: Option<&str>) -> wgpu::CommandEncoder {
353 self.context
354 .device
355 .create_command_encoder(&wgpu::CommandEncoderDescriptor { label })
356 }
357
358 pub fn submit<I>(&self, command_buffers: I)
360 where
361 I: IntoIterator<Item = wgpu::CommandBuffer>,
362 {
363 self.context.queue.submit(command_buffers);
364 }
365
366 pub fn create_typed_vertex_buffer<T: bytemuck::Pod>(
374 &self,
375 label: Option<&str>,
376 data: &[T],
377 ) -> TypedBuffer<T> {
378 TypedBuffer::new(
379 &self.context.device,
380 label,
381 data,
382 wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
383 )
384 }
385
386 pub fn create_typed_index_buffer<T: bytemuck::Pod>(
390 &self,
391 label: Option<&str>,
392 data: &[T],
393 ) -> TypedBuffer<T> {
394 TypedBuffer::new(
395 &self.context.device,
396 label,
397 data,
398 wgpu::BufferUsages::INDEX | wgpu::BufferUsages::COPY_DST,
399 )
400 }
401
402 pub fn create_typed_uniform<T: bytemuck::Pod>(
406 &self,
407 label: Option<&str>,
408 data: &T,
409 ) -> UniformBuffer<T> {
410 UniformBuffer::new_uniform(&self.context.device, label, data)
411 }
412
413 pub fn create_gpu_texture_2d(
417 &self,
418 label: Option<&str>,
419 width: u32,
420 height: u32,
421 format: wgpu::TextureFormat,
422 usage: wgpu::TextureUsages,
423 ) -> GpuTexture {
424 GpuTexture::new_2d(&self.context.device, label, width, height, format, usage)
425 }
426
427 pub fn create_gpu_texture_from_data(
431 &self,
432 label: Option<&str>,
433 width: u32,
434 height: u32,
435 format: wgpu::TextureFormat,
436 data: &[u8],
437 ) -> GpuTexture {
438 GpuTexture::from_data(
439 &self.context.device,
440 &self.context.queue,
441 label,
442 width,
443 height,
444 format,
445 data,
446 )
447 }
448}
449
450pub struct RenderPipelineBuilder<'a> {
452 renderer: &'a Renderer,
453 label: Option<&'a str>,
454 shader: Option<&'a wgpu::ShaderModule>,
455 vertex_entry: &'a str,
456 fragment_entry: &'a str,
457 layout: Option<&'a wgpu::PipelineLayout>,
458 vertex_buffers: Vec<wgpu::VertexBufferLayout<'a>>,
459 color_targets: Vec<Option<wgpu::ColorTargetState>>,
460 primitive: wgpu::PrimitiveState,
461 depth_stencil: Option<wgpu::DepthStencilState>,
462 multisample: wgpu::MultisampleState,
463}
464
465impl<'a> RenderPipelineBuilder<'a> {
466 pub fn new(renderer: &'a Renderer) -> Self {
467 Self {
468 renderer,
469 label: None,
470 shader: None,
471 vertex_entry: "vs_main",
472 fragment_entry: "fs_main",
473 layout: None,
474 vertex_buffers: Vec::new(),
475 color_targets: Vec::new(),
476 primitive: wgpu::PrimitiveState {
477 topology: wgpu::PrimitiveTopology::TriangleList,
478 strip_index_format: None,
479 front_face: wgpu::FrontFace::Ccw,
480 cull_mode: Some(wgpu::Face::Back),
481 polygon_mode: wgpu::PolygonMode::Fill,
482 unclipped_depth: false,
483 conservative: false,
484 },
485 depth_stencil: None,
486 multisample: wgpu::MultisampleState {
487 count: 1,
488 mask: !0,
489 alpha_to_coverage_enabled: false,
490 },
491 }
492 }
493
494 pub fn label(mut self, label: &'a str) -> Self {
495 self.label = Some(label);
496 self
497 }
498
499 pub fn shader(mut self, shader: &'a wgpu::ShaderModule) -> Self {
500 self.shader = Some(shader);
501 self
502 }
503
504 pub fn vertex_entry(mut self, entry: &'a str) -> Self {
505 self.vertex_entry = entry;
506 self
507 }
508
509 pub fn fragment_entry(mut self, entry: &'a str) -> Self {
510 self.fragment_entry = entry;
511 self
512 }
513
514 pub fn layout(mut self, layout: &'a wgpu::PipelineLayout) -> Self {
515 self.layout = Some(layout);
516 self
517 }
518
519 pub fn vertex_buffer(mut self, layout: wgpu::VertexBufferLayout<'a>) -> Self {
520 self.vertex_buffers.push(layout);
521 self
522 }
523
524 pub fn color_target(mut self, target: wgpu::ColorTargetState) -> Self {
525 self.color_targets.push(Some(target));
526 self
527 }
528
529 pub fn primitive(mut self, primitive: wgpu::PrimitiveState) -> Self {
530 self.primitive = primitive;
531 self
532 }
533
534 pub fn depth_stencil(mut self, depth_stencil: wgpu::DepthStencilState) -> Self {
535 self.depth_stencil = Some(depth_stencil);
536 self
537 }
538
539 pub fn multisample(mut self, multisample: wgpu::MultisampleState) -> Self {
540 self.multisample = multisample;
541 self
542 }
543
544 pub fn build(self) -> wgpu::RenderPipeline {
545 let shader = self.shader.expect("Shader module is required");
546 let layout = self.layout.expect("Pipeline layout is required");
547
548 self.renderer
549 .create_render_pipeline(&wgpu::RenderPipelineDescriptor {
550 label: self.label,
551 layout: Some(layout),
552 vertex: wgpu::VertexState {
553 module: shader,
554 entry_point: Some(self.vertex_entry),
555 buffers: &self.vertex_buffers,
556 compilation_options: wgpu::PipelineCompilationOptions::default(),
557 },
558 fragment: Some(wgpu::FragmentState {
559 module: shader,
560 entry_point: Some(self.fragment_entry),
561 targets: &self.color_targets,
562 compilation_options: wgpu::PipelineCompilationOptions::default(),
563 }),
564 primitive: self.primitive,
565 depth_stencil: self.depth_stencil,
566 multisample: self.multisample,
567 multiview: None,
568 cache: None,
569 })
570 }
571}
572
573pub struct ComputePipelineBuilder<'a> {
586 renderer: &'a Renderer,
587 label: Option<&'a str>,
588 shader: Option<&'a wgpu::ShaderModule>,
589 entry: &'a str,
590 layout: Option<&'a wgpu::PipelineLayout>,
591}
592
593impl<'a> ComputePipelineBuilder<'a> {
594 pub fn new(renderer: &'a Renderer) -> Self {
596 Self {
597 renderer,
598 label: None,
599 shader: None,
600 entry: "main",
601 layout: None,
602 }
603 }
604
605 pub fn label(mut self, label: &'a str) -> Self {
607 self.label = Some(label);
608 self
609 }
610
611 pub fn shader(mut self, shader: &'a wgpu::ShaderModule) -> Self {
613 self.shader = Some(shader);
614 self
615 }
616
617 pub fn entry(mut self, entry: &'a str) -> Self {
621 self.entry = entry;
622 self
623 }
624
625 pub fn layout(mut self, layout: &'a wgpu::PipelineLayout) -> Self {
627 self.layout = Some(layout);
628 self
629 }
630
631 pub fn build(self) -> wgpu::ComputePipeline {
637 let shader = self.shader.expect("Shader module is required");
638 let layout = self.layout.expect("Pipeline layout is required");
639
640 self.renderer
641 .create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
642 label: self.label,
643 layout: Some(layout),
644 module: shader,
645 entry_point: Some(self.entry),
646 compilation_options: wgpu::PipelineCompilationOptions::default(),
647 cache: None,
648 })
649 }
650}