1use std::error::Error;
2
3use crate::math::{PhysicalSizeI32, Point, ScaleFactor};
4
5pub type PrimitiveID = u32;
6
7#[derive(Debug, Clone, Copy, PartialEq)]
8pub struct CustomPrimitive {
9 pub id: PrimitiveID,
10 pub offset: Point,
11 pub pipeline_index: u8,
12}
13
14pub trait CustomPipeline {
15 fn needs_preparing(&self) -> bool;
16
17 fn prepare(
18 &mut self,
19 device: &wgpu::Device,
20 queue: &wgpu::Queue,
21 screen_size: PhysicalSizeI32,
22 scale_factor: ScaleFactor,
23 primitives: &[QueuedCustomPrimitive],
24 ) -> Result<(), Box<dyn Error>>;
25
26 fn render_primitives<'pass>(
27 &'pass self,
28 primitives: &[QueuedCustomPrimitive],
29 render_pass: &mut wgpu::RenderPass<'pass>,
30 ) -> Result<(), Box<dyn Error>>;
31}
32
33#[derive(Debug, Clone, Copy, PartialEq)]
34pub struct QueuedCustomPrimitive {
35 pub id: PrimitiveID,
36 pub offset: Point,
37}
38
39#[repr(C)]
42#[derive(Debug, Clone, Copy, bytemuck::Zeroable, bytemuck::Pod)]
43pub struct DefaultConstantUniforms {
44 pub screen_to_clip_scale: [f32; 2],
45 pub scale_factor: f32,
46 pub _padding: f32,
47}
48
49impl DefaultConstantUniforms {
50 pub fn new(screen_size: PhysicalSizeI32, scale_factor: ScaleFactor) -> Self {
51 Self {
52 screen_to_clip_scale: crate::math::screen_to_clip_scale(screen_size, scale_factor),
53 scale_factor: scale_factor.0,
54 _padding: 0.0,
55 }
56 }
57
58 pub fn layout_buffer_and_bind_group(
59 device: &wgpu::Device,
60 ) -> (wgpu::BindGroupLayout, wgpu::Buffer, wgpu::BindGroup) {
61 let layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
62 label: Some("rootvg-core constants layout"),
63 entries: &[Self::entry(0)],
64 });
65
66 let buffer = device.create_buffer(&wgpu::BufferDescriptor {
67 label: Some("rootvg-core constants buffer"),
68 size: std::mem::size_of::<Self>() as wgpu::BufferAddress,
69 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
70 mapped_at_creation: false,
71 });
72
73 let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
74 label: Some("rootvg-core constants bind group"),
75 layout: &layout,
76 entries: &[wgpu::BindGroupEntry {
77 binding: 0,
78 resource: buffer.as_entire_binding(),
79 }],
80 });
81
82 (layout, buffer, bind_group)
83 }
84
85 pub fn prepare_buffer(
86 buffer: &wgpu::Buffer,
87 screen_size: PhysicalSizeI32,
88 scale_factor: ScaleFactor,
89 queue: &wgpu::Queue,
90 ) {
91 let uniforms = Self::new(screen_size, scale_factor);
92 queue.write_buffer(buffer, 0, bytemuck::bytes_of(&uniforms));
93 }
94
95 pub fn entry(binding: u32) -> wgpu::BindGroupLayoutEntry {
96 wgpu::BindGroupLayoutEntry {
97 binding,
98 visibility: wgpu::ShaderStages::VERTEX,
99 ty: wgpu::BindingType::Buffer {
100 ty: wgpu::BufferBindingType::Uniform,
101 has_dynamic_offset: false,
102 min_binding_size: wgpu::BufferSize::new(
103 std::mem::size_of::<Self>() as wgpu::BufferAddress
104 ),
105 },
106 count: None,
107 }
108 }
109}