1#[cfg(any(debug_assertions, feature = "enable-release-validation"))]
4use std::sync::atomic::Ordering;
5
6use std::sync::{atomic::AtomicBool, Arc};
7
8use crate::utils::ArcRef;
9use super::{
10 GPUInner,
11 SwapchainError,
12 texture::{Texture, BlendState},
13 buffer::Buffer,
14};
15
16pub(crate) mod renderpass;
17pub(crate) mod computepass;
18pub(crate) mod drawing;
19pub(crate) mod utils;
20
21use renderpass::{
22 RenderPass, RenderPassBuildError, RenderpassBuilder,
23};
24
25use utils::BindGroupType;
26
27use computepass::{ComputePass, ComputePassBuildError};
28use wgpu::util::TextureBlitter;
29
30pub enum PassAttachment {
31 Texture(Texture, BlendState),
32}
33
34#[derive(Clone, Debug)]
35pub(crate) struct BindGroupAttachment {
36 pub group: u32,
37 pub binding: u32,
38 pub attachment: BindGroupType,
39}
40
41pub struct TextureInput<'a> {
42 pub texture: Option<&'a Texture>,
43 pub binding_texture: usize,
44 pub binding_sampler: usize,
45}
46
47#[derive(Clone, Debug)]
48pub enum CommandBufferBuildError {
49 None
50}
51
52#[derive(Clone, Debug)]
53pub struct CommandBuffer {
54 pub(crate) inner: ArcRef<GPUInner>,
55
56 pub(crate) command: Option<ArcRef<wgpu::CommandEncoder>>,
57 pub(crate) on_renderpass: Arc<AtomicBool>,
58 pub(crate) on_compute: Arc<AtomicBool>,
59
60 pub(crate) swapchain: SurfaceTexture,
61}
62
63impl CommandBuffer {
64 pub(crate) fn new(inner: ArcRef<GPUInner>) -> Result<Self, CommandBufferBuildError> {
65 let inner_ref = inner.borrow();
66 let command =
67 inner_ref
68 .device()
69 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
70 label: Some("Command Encoder"),
71 });
72
73 drop(inner_ref);
74
75 Ok(Self {
76 inner,
77 command: Some(ArcRef::new(command)),
78 on_renderpass: Arc::new(AtomicBool::new(false)),
79 on_compute: Arc::new(AtomicBool::new(false)),
80
81 swapchain: SurfaceTexture::new(),
82 })
83 }
84
85 pub(crate) fn new_with_surface(
86 inner: ArcRef<GPUInner>,
87 surface: SurfaceTexture,
88 ) -> Result<Self, CommandBufferBuildError> {
89 let inner_ref = inner.borrow();
90 let command =
91 inner_ref
92 .device()
93 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
94 label: Some("Command Encoder"),
95 });
96
97 drop(inner_ref);
98
99 Ok(Self {
100 inner,
101 command: Some(ArcRef::new(command)),
102 on_renderpass: Arc::new(AtomicBool::new(false)),
103 on_compute: Arc::new(AtomicBool::new(false)),
104
105 swapchain: surface,
106 })
107 }
108
109 pub fn renderpass_builder(&mut self) -> RenderpassBuilder {
117 let gpu_arc_ref = ArcRef::clone(&self.inner);
118 let cmd_arc_ref = ArcRef::clone(self.command.as_ref().unwrap());
119 let atomic_pass = Arc::clone(&self.on_renderpass);
120
121 self.on_renderpass.store(true, Ordering::Relaxed);
122
123 RenderpassBuilder::new(gpu_arc_ref, cmd_arc_ref, atomic_pass)
124 }
125
126 pub fn begin_renderpass(&mut self) -> Result<RenderPass, RenderPassBuildError> {
133 #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
134 if self.on_renderpass.load(Ordering::Relaxed) || self.on_compute.load(Ordering::Relaxed) {
135 panic!("CMD already in a render pass or compute pass");
136 }
137
138 if !self.swapchain.is_valid() {
139 let inner_ref = self.inner.borrow();
140
141 let swapchain = inner_ref.get_swapchain();
142
143 match swapchain {
144 Ok(swapchain) => {
145 self.swapchain.set_texture(swapchain);
146 }
147 Err(SwapchainError::Suboptimal(swapchain)) => {
148 self.swapchain.set_texture(swapchain);
149 }
150 Err(err) => {
151 crate::log!("Swapchain error: {}", err);
152 return Err(RenderPassBuildError::SwapchainError(format!(
153 "Failed to create swapchain: {}",
154 err
155 )));
156 }
157 }
158 }
159
160 self.on_renderpass.store(true, Ordering::Relaxed);
161
162 let gpu_arc_ref = ArcRef::clone(&self.inner);
163 let cmd_arc_ref = ArcRef::clone(self.command.as_ref().unwrap());
164 let atomic_pass = Arc::clone(&self.on_renderpass);
165
166 RenderpassBuilder::new(gpu_arc_ref, cmd_arc_ref, atomic_pass)
167 .add_surface_color_attachment(&self.swapchain, None)
168 .build()
169 }
170
171 pub fn begin_depth_texture(
176 &mut self,
177 texture: &Texture,
178 ) -> Result<RenderPass, RenderPassBuildError> {
179 #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
180 if self.on_renderpass.load(Ordering::Relaxed) || self.on_compute.load(Ordering::Relaxed) {
181 panic!("CMD already in a render pass or compute pass");
182 }
183
184 self.on_renderpass.store(true, Ordering::Relaxed);
185
186 let gpu_arc_ref = ArcRef::clone(&self.inner);
187 let cmd_arc_ref = ArcRef::clone(self.command.as_ref().unwrap());
188 let atomic_pass = Arc::clone(&self.on_renderpass);
189
190 RenderpassBuilder::new(gpu_arc_ref, cmd_arc_ref, atomic_pass)
191 .set_depth_attachment(texture)
192 .build()
193 }
194
195 pub fn begin_texture<'a>(
202 &'a mut self,
203 texture: &'a Texture,
204 ) -> Result<RenderPass, RenderPassBuildError> {
205 #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
206 if self.on_renderpass.load(Ordering::Relaxed) || self.on_compute.load(Ordering::Relaxed) {
207 panic!("CMD already in a render pass or compute pass");
208 }
209
210 self.on_renderpass.store(false, Ordering::Relaxed);
211
212 let gpu_arc_ref = ArcRef::clone(&self.inner);
213 let cmd_arc_ref = ArcRef::clone(self.command.as_ref().unwrap());
214 let atomic_pass = Arc::clone(&self.on_renderpass);
215
216 RenderpassBuilder::new(gpu_arc_ref, cmd_arc_ref, atomic_pass)
217 .add_color_attachment(texture, None)
218 .build()
219 }
220
221 pub fn begin_computepass(&mut self) -> Result<ComputePass, ComputePassBuildError> {
223 #[cfg(any(debug_assertions, feature = "enable-release-validation"))]
224 if self.on_renderpass.load(Ordering::Relaxed) || self.on_compute.load(Ordering::Relaxed) {
225 panic!("CMD already in a render pass or compute pass");
226 }
227
228 self.on_renderpass.store(false, Ordering::Relaxed);
229
230 let gpu_arc_ref = ArcRef::clone(&self.inner);
231 let cmd_ref = ArcRef::clone(self.command.as_ref().unwrap());
232 let atomic_pass = Arc::clone(&self.on_compute);
233
234 ComputePass::new(gpu_arc_ref, cmd_ref, atomic_pass)
235 }
236
237 pub fn write_buffer(&mut self, src: &Buffer, dst: &Buffer) {
242 dst.write_cmd(src, self);
243 }
244
245 pub fn write_buffer_raw<T: bytemuck::Pod>(&mut self, data: &[T], dst: &Buffer) {
250 dst.write_raw_cmd(data, self);
251 }
252
253 pub fn blit_texture(&mut self, src: &Texture, dst: &Texture) {
258 let gpu_inner = self.inner.borrow();
259 let mut cmd = self.command.as_ref().unwrap().borrow_mut();
260
261 let blitter = {
262 let dst_format = dst.inner.borrow().format;
263
264 TextureBlitter::new(gpu_inner.device(), dst_format.into())
265 };
266
267 let src_view = &src.inner.borrow().wgpu_view;
268
269 let dst_view = &dst.inner.borrow().wgpu_view;
270
271 blitter.copy(&gpu_inner.device(), &mut cmd, src_view, dst_view);
272 }
273
274 pub fn copy_texture(&mut self, src: &Texture, dst: &Texture) {
281 let mut cmd = self.command.as_ref().unwrap().borrow_mut();
282
283 let src_inner = src.inner.borrow();
285 let dst_inner = dst.inner.borrow();
286
287 if src_inner.format != dst_inner.format {
288 panic!("Source and destination textures must have the same format");
289 }
290
291 if src_inner.size != dst_inner.size {
292 panic!("Source and destination textures must have the same size");
293 }
294
295 if src_inner.wgpu_texture.mip_level_count() != 1 {
296 panic!("Source texture must have only one mip level");
297 }
298
299 if dst_inner.wgpu_texture.mip_level_count() != 1 {
300 panic!("Destination texture must have only one mip level");
301 }
302
303 let src_tex = &src_inner.wgpu_texture;
304 let dst_tex = &dst_inner.wgpu_texture;
305
306 cmd.copy_texture_to_texture(
307 wgpu::TexelCopyTextureInfoBase {
308 texture: src_tex,
309 mip_level: 0,
310 origin: wgpu::Origin3d::ZERO,
311 aspect: wgpu::TextureAspect::All,
312 },
313 wgpu::TexelCopyTextureInfoBase {
314 texture: dst_tex,
315 mip_level: 0,
316 origin: wgpu::Origin3d::ZERO,
317 aspect: wgpu::TextureAspect::All,
318 },
319 wgpu::Extent3d {
320 width: src_inner.size.x as u32,
321 height: src_inner.size.y as u32,
322 depth_or_array_layers: 1,
323 },
324 );
325 }
326
327 pub fn end(&mut self, present: bool) {
328 let inner_ref = self.inner.borrow();
329
330 if self.command.is_none() {
331 return;
332 }
333
334 let cmd = ArcRef::try_unwrap(self.command.take().unwrap()).unwrap_or_else(|_| {
335 panic!("Command buffer dropped while still in use");
336 });
337
338 inner_ref.queue().submit(std::iter::once(cmd.finish()));
339
340 if present {
341 self.swapchain.present();
342 }
343 }
344
345 pub fn get_surface_texture(&mut self) -> Result<SurfaceTexture, SurfaceTextureError> {
349 if !self.swapchain.is_valid() {
350 let inner_ref = self.inner.borrow();
351
352 let swapchain = inner_ref.get_swapchain();
353
354 match swapchain {
355 Ok(swapchain) => {
356 self.swapchain.set_texture(swapchain);
357 }
358 Err(SwapchainError::Suboptimal(swapchain)) => {
359 self.swapchain.set_texture(swapchain);
360 }
361 Err(err) => {
362 match err {
363 SwapchainError::NotAvailable => {
364 return Err(SurfaceTextureError::NotAvailable);
365 }
366 SwapchainError::ConfigNeeded => {
367 return Err(SurfaceTextureError::ConfigNeeded);
368 }
369 SwapchainError::DeviceLost => {
370 return Err(SurfaceTextureError::DeviceLost);
371 }
372 _ => {
373 crate::log!("Swapchain error: {}", err);
374 return Err(SurfaceTextureError::NotAvailable);
375 }
376 }
377 }
378 }
379 }
380
381 Ok(self.swapchain.clone())
382 }
383}
384
385impl Drop for CommandBuffer {
386 fn drop(&mut self) {
387 if std::thread::panicking() {
388 crate::dbg_log!("Dropping command buffer while panicking");
389 return;
390 }
391
392 if self.on_renderpass.load(Ordering::Relaxed) || self.on_compute.load(Ordering::Relaxed) {
393 panic!("Command buffer dropped while still in a render pass or compute pass");
394 }
395
396 self.end(true);
397 }
398}
399
400#[derive(Clone, Debug)]
401pub enum SurfaceTextureError {
402 NotAvailable,
403 ConfigNeeded,
404 DeviceLost,
405}
406
407#[derive(Clone, Debug)]
408pub(crate) struct SurfaceTextureInner {
409 pub texture: Option<wgpu::SurfaceTexture>,
410 pub suboptimal: bool,
411 pub presented: bool,
412}
413
414#[derive(Clone, Debug)]
419pub struct SurfaceTexture {
420 pub(crate) inner: ArcRef<SurfaceTextureInner>,
421}
422
423impl SurfaceTexture {
424 pub(crate) fn new() -> SurfaceTexture {
425 SurfaceTexture {
426 inner: ArcRef::new(SurfaceTextureInner {
427 texture: None,
428 suboptimal: false,
429 presented: false,
430 }),
431 }
432 }
433
434 pub(crate) fn set_texture(&mut self, texture: wgpu::SurfaceTexture) {
435 let mut inner = self.inner.borrow_mut();
436 inner.suboptimal = texture.suboptimal;
437 inner.texture = Some(texture);
438 inner.presented = false;
439 }
440
441 pub fn get_view(&self) -> wgpu::TextureView {
442 let inner = self.inner.borrow();
443 inner.texture.as_ref().map_or_else(
444 || {
445 panic!("SurfaceTexture has no texture");
446 },
447 |texture| {
448 texture.texture.create_view(&wgpu::TextureViewDescriptor {
449 label: Some("Surface Texture View"),
450 ..Default::default()
451 })
452 },
453 )
454 }
455
456 pub fn get_size(&self) -> wgpu::Extent3d {
457 let inner = self.inner.borrow();
458 inner.texture.as_ref().map_or_else(
459 || {
460 panic!("SurfaceTexture has no texture");
461 },
462 |texture| texture.texture.size(),
463 )
464 }
465
466 pub fn get_format(&self) -> wgpu::TextureFormat {
467 let inner = self.inner.borrow();
468 inner.texture.as_ref().map_or_else(
469 || {
470 panic!("SurfaceTexture has no texture");
471 },
472 |texture| texture.texture.format(),
473 )
474 }
475
476 pub fn is_valid(&self) -> bool {
477 let inner = self.inner.borrow();
478 inner.texture.is_some()
479 }
480
481 pub fn is_suboptimal(&self) -> bool {
482 let inner = self.inner.borrow();
483 inner.suboptimal
484 }
485
486 pub fn present(&mut self) {
487 let mut inner = self.inner.borrow_mut();
488 if let Some(texture) = inner.texture.take() {
489 texture.present();
490 inner.presented = true;
491 }
492 }
493}