1
2use crate::{
3 buffer::Buffer,
4 compute_pass::ComputePass,
5 context::Context,
6 dispatch::Dispatch,
7 draw_call::DrawCall,
8 render_pass::{ColorAttachment, DepthStencilAttachment, RenderPass},
9};
10
11#[derive(Debug)]
12pub(crate) enum Pass {
13 Render {
14 label: Option<String>,
15 color_attachments: Vec<ColorAttachment>,
16 depth_stencil_attachment: Option<DepthStencilAttachment>,
17 multisample: Option<wgpu::MultisampleState>,
18 draw_calls: Vec<DrawCall>,
19 },
20 Compute(Option<String>, Vec<Dispatch>),
21 ClearBuffer(Buffer, u64, Option<u64>),
22 CopyBufferToBuffer {
23 source: Buffer,
24 source_offset: usize,
25 destination: Buffer,
26 destination_offset: usize,
27 size: usize,
28 },
29}
30
31pub struct CommandEncoder {
38 label: Option<String>,
39 context: Context,
40 pub(crate) passes: Vec<Pass>,
41}
42
43impl CommandEncoder {
44 pub fn new(label: Option<&str>, context: &Context) -> Self {
45 Self {
46 label: label.map(|s| s.to_string()),
47 context: context.clone(),
48 passes: vec![],
49 }
50 }
51
52 pub fn compute_pass(&mut self, label: Option<&str>) -> ComputePass {
54 ComputePass::new(label, self)
55 }
56
57 pub fn render_pass(
59 &mut self,
60 label: Option<&str>,
61 color_attachments: Vec<ColorAttachment>,
62 depth_stencil_attachment: Option<DepthStencilAttachment>,
63 multisample: Option<wgpu::MultisampleState>,
64 ) -> RenderPass {
65 RenderPass::new(
66 label,
67 color_attachments,
68 depth_stencil_attachment,
69 multisample,
70 self,
71 )
72 }
73
74 pub fn clear_buffer(&mut self, buffer: &Buffer, offset: u64, size: Option<u64>) {
75 self.passes
76 .push(Pass::ClearBuffer(buffer.clone(), offset, size));
77 }
78
79 pub fn copy_buffer_to_buffer(
80 &mut self,
81 source: &Buffer,
82 source_offset: usize,
83 destination: &Buffer,
84 destination_offset: usize,
85 size: usize,
86 ) {
87 self.passes.push(Pass::CopyBufferToBuffer {
88 source: source.clone(),
89 source_offset,
90 destination: destination.clone(),
91 destination_offset,
92 size,
93 });
94 }
95
96 fn submit(&mut self) {
98 let mut encoder =
99 self.context
100 .device()
101 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
102 label: self.label.as_deref(),
103 });
104
105 for p in &self.passes {
106 match p {
107 Pass::Render {
108 label,
109 color_attachments,
110 depth_stencil_attachment,
111 multisample,
112 draw_calls,
113 } => Self::record_render_pass(
114 label,
115 color_attachments,
116 depth_stencil_attachment,
117 multisample,
118 draw_calls,
119 &mut encoder,
120 &self.context,
121 ),
122 Pass::Compute(label, dispatches) => {
123 Self::record_compute_pass(label, dispatches, &mut encoder, &self.context)
124 }
125 Pass::ClearBuffer(buffer, offset, size) => {
126 encoder.clear_buffer(buffer.buffer(), *offset, *size)
127 }
128 Pass::CopyBufferToBuffer {
129 source,
130 source_offset,
131 destination,
132 destination_offset,
133 size,
134 } => encoder.copy_buffer_to_buffer(
135 source.buffer(),
136 *source_offset as u64,
137 destination.buffer(),
138 *destination_offset as u64,
139 *size as u64,
140 ),
141 }
142 }
143
144 self.context.queue().submit(Some(encoder.finish()));
145
146 self.context.caches().age();
147 }
148
149 fn record_compute_pass(
150 label: &Option<String>,
151 dispatches: &Vec<Dispatch>,
152 encoder: &mut wgpu::CommandEncoder,
153 context: &Context,
154 ) {
155 let bind_groups = dispatches
156 .iter()
157 .map(|dispatch| {
158 dispatch
159 .bind_groups
160 .iter()
161 .map(|bind_group| bind_group.get_or_build(context))
162 .collect::<Vec<_>>()
163 })
164 .collect::<Vec<_>>();
165
166 let pipelines = dispatches
167 .iter()
168 .map(|dispatch| {
169 dispatch
170 .pipeline
171 .get_or_build(context, &dispatch.bind_groups)
172 })
173 .collect::<Vec<_>>();
174
175 let mut compute_pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
176 label: label.as_deref(),
177 timestamp_writes: None,
178 });
179
180 for (i, dispatch) in dispatches.iter().enumerate() {
181 for j in 0..dispatch.bind_groups.len() {
182 compute_pass.set_bind_group(
183 j as u32,
184 &bind_groups[i][j],
185 &dispatch.bind_group_offsets[j],
186 );
187 }
188
189 compute_pass.set_pipeline(&pipelines[i]);
190
191 let (x, y, z) = dispatch.extent;
192 compute_pass.dispatch_workgroups(x, y, z);
193 }
194 }
195
196 fn record_render_pass(
197 label: &Option<String>,
198 color_attachments: &Vec<ColorAttachment>,
199 depth_stencil_attachment: &Option<DepthStencilAttachment>,
200 multisample: &Option<wgpu::MultisampleState>,
201 draw_calls: &Vec<DrawCall>,
202 encoder: &mut wgpu::CommandEncoder,
203 context: &Context,
204 ) {
205 let bind_groups = draw_calls
206 .iter()
207 .map(|draw_call| {
208 draw_call
209 .bind_groups
210 .iter()
211 .map(|bind_group| bind_group.get_or_build(context))
212 .collect::<Vec<_>>()
213 })
214 .collect::<Vec<_>>();
215
216 let color_formats = color_attachments
217 .iter()
218 .map(|c| c.target.format)
219 .collect::<Vec<_>>();
220
221 let pipelines = draw_calls
222 .iter()
223 .map(|draw_call| {
224 draw_call.pipeline.get_or_build(
225 &color_formats,
226 depth_stencil_attachment.as_ref().map(|d| d.target.format),
227 multisample,
228 &draw_call.rasteriser_state,
229 &draw_call.bind_groups,
230 context,
231 )
232 })
233 .collect::<Vec<_>>();
234
235 let resolve_targets = color_attachments
236 .iter()
237 .map(|c| c.resolve_target.as_ref().map(|r| r.view.clone()))
238 .collect::<Vec<_>>();
239
240 let color_attachments = color_attachments
241 .iter()
242 .enumerate()
243 .map(|(i, c)| {
244 Some(wgpu::RenderPassColorAttachment {
245 view: &c.target.view,
246 resolve_target: resolve_targets[i].as_ref(),
247 ops: c.ops,
248 })
249 })
250 .collect::<Vec<_>>();
251
252 let depth_view = depth_stencil_attachment
253 .as_ref()
254 .map(|d| d.target.view.clone());
255
256 let desc = wgpu::RenderPassDescriptor {
257 label: label.as_deref(),
258 color_attachments: &color_attachments,
259 depth_stencil_attachment: depth_stencil_attachment.as_ref().map(|d| {
260 wgpu::RenderPassDepthStencilAttachment {
261 view: &depth_view.as_ref().unwrap(),
262 depth_ops: d.depth_ops,
263 stencil_ops: d.stencil_ops,
264 }
265 }),
266 timestamp_writes: None,
267 occlusion_query_set: None,
268 };
269 let mut render_pass = encoder.begin_render_pass(&desc);
270
271 for (index, draw_call) in draw_calls.iter().enumerate() {
272 for j in 0..draw_call.bind_groups.len() {
273 render_pass.set_bind_group(
274 j as u32,
275 &bind_groups[index][j],
276 &draw_call.bind_group_offsets[j],
277 );
278 }
279
280 render_pass.set_pipeline(&pipelines[index]);
281
282 for (idx, buffer_slice) in draw_call.vertices.iter().enumerate() {
283 render_pass.set_vertex_buffer(idx as u32, buffer_slice.get());
284 }
285
286 if let Some(buffer_slice) = &draw_call.indices {
287 render_pass.set_index_buffer(buffer_slice.get(), wgpu::IndexFormat::Uint16);
288
289 render_pass.draw_indexed(
290 draw_call.element_range.start as u32..draw_call.element_range.end as u32,
291 0,
292 draw_call.instance_range.start as u32..draw_call.instance_range.end as u32,
293 );
294 } else {
295 render_pass.draw(
296 draw_call.element_range.start as u32..draw_call.element_range.end as u32,
297 draw_call.instance_range.start as u32..draw_call.instance_range.end as u32,
298 );
299 }
300 }
301 }
302}
303
304impl Drop for CommandEncoder {
305 fn drop(&mut self) {
306 self.submit();
307 }
308}