1use std::{
2 fmt,
3 mem::{forget, size_of_val},
4 ops::Range,
5};
6
7use bytemuck::{cast_slice, Pod};
8use scoped_arena::Scope;
9
10use crate::{
11 accel::AccelerationStructureBuildGeometryInfo,
12 access::Access,
13 arith_ge, arith_le,
14 buffer::{Buffer, BufferMemoryBarrier},
15 descriptor::{DescriptorSet, UpdatedPipelineDescriptors},
16 framebuffer::{Framebuffer, FramebufferError},
17 image::{Image, ImageBlit, ImageMemoryBarrier, Layout, SubresourceLayers},
18 minimal_extent,
19 pipeline::{
20 ComputePipeline, DynamicGraphicsPipeline, GraphicsPipeline, PipelineInputLayout,
21 PipelineLayout, RayTracingPipeline, ShaderBindingTable, Viewport,
22 },
23 queue::QueueCapabilityFlags,
24 render_pass::{ClearValue, RenderPass, RenderPassInstance},
25 sampler::Filter,
26 shader::ShaderStageFlags,
27 stage::PipelineStages,
28 BufferInfo, BufferUsage, Device, Extent3, Format, IndexType, Offset3, OutOfMemory,
29 PipelinePushConstants, Rect, RenderingColorInfo, RenderingDepthStencilAttachmentInfo,
30 RenderingInfo,
31};
32
33pub use crate::backend::CommandBuffer;
34
35#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
37pub(crate) struct MemoryBarrier {
38 pub src: Access,
40
41 pub dst: Access,
43}
44
45#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
46#[cfg_attr(feature = "serde-1", derive(serde::Serialize, serde::Deserialize))]
47pub struct BufferCopy {
48 pub src_offset: u64,
49 pub dst_offset: u64,
50 pub size: u64,
51}
52
53#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
54#[cfg_attr(feature = "serde-1", derive(serde::Serialize, serde::Deserialize))]
55pub struct ImageCopy {
56 pub src_subresource: SubresourceLayers,
57 pub src_offset: Offset3,
58 pub dst_subresource: SubresourceLayers,
59 pub dst_offset: Offset3,
60 pub extent: Extent3,
61}
62
63#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
64#[cfg_attr(feature = "serde-1", derive(serde::Serialize, serde::Deserialize))]
65pub struct BufferImageCopy {
66 pub buffer_offset: u64,
67 pub buffer_row_length: u32,
68 pub buffer_image_height: u32,
69 pub image_subresource: SubresourceLayers,
70 pub image_offset: Offset3,
71 pub image_extent: Extent3,
72}
73
74#[derive(Debug)]
75pub(crate) enum Command<'a> {
76 BeginRenderPass {
77 framebuffer: &'a Framebuffer,
78 clears: &'a [ClearValue],
79 },
80 EndRenderPass,
81
82 BindGraphicsPipeline {
83 pipeline: &'a GraphicsPipeline,
84 },
85
86 BindComputePipeline {
87 pipeline: &'a ComputePipeline,
88 },
89
90 BindRayTracingPipeline {
91 pipeline: &'a RayTracingPipeline,
92 },
93
94 BindGraphicsDescriptorSets {
95 layout: &'a PipelineLayout,
96 first_set: u32,
97 sets: &'a [&'a DescriptorSet],
98 dynamic_offsets: &'a [u32],
99 },
100
101 BindComputeDescriptorSets {
102 layout: &'a PipelineLayout,
103 first_set: u32,
104 sets: &'a [&'a DescriptorSet],
105 dynamic_offsets: &'a [u32],
106 },
107
108 BindRayTracingDescriptorSets {
109 layout: &'a PipelineLayout,
110 first_set: u32,
111 sets: &'a [&'a DescriptorSet],
112 dynamic_offsets: &'a [u32],
113 },
114
115 SetViewport {
116 viewport: Viewport,
117 },
118
119 SetScissor {
120 scissor: Rect,
121 },
122
123 Draw {
124 vertices: Range<u32>,
125 instances: Range<u32>,
126 },
127
128 DrawIndexed {
129 indices: Range<u32>,
130 vertex_offset: i32,
131 instances: Range<u32>,
132 },
133
134 UpdateBuffer {
135 buffer: &'a Buffer,
136 offset: u64,
137 data: &'a [u8],
138 },
139
140 BindVertexBuffers {
141 first: u32,
142 buffers: &'a [(&'a Buffer, u64)],
143 },
144
145 BindIndexBuffer {
146 buffer: &'a Buffer,
147 offset: u64,
148 index_type: IndexType,
149 },
150
151 BuildAccelerationStructure {
152 infos: &'a [AccelerationStructureBuildGeometryInfo<'a>],
153 },
154
155 TraceRays {
156 shader_binding_table: &'a ShaderBindingTable,
157 extent: Extent3,
158 },
159
160 CopyBuffer {
161 src_buffer: &'a Buffer,
162 dst_buffer: &'a Buffer,
163 regions: &'a [BufferCopy],
164 },
165
166 CopyImage {
167 src_image: &'a Image,
168 src_layout: Layout,
169 dst_image: &'a Image,
170 dst_layout: Layout,
171 regions: &'a [ImageCopy],
172 },
173
174 CopyBufferImage {
175 src_buffer: &'a Buffer,
176 dst_image: &'a Image,
177 dst_layout: Layout,
178 regions: &'a [BufferImageCopy],
179 },
180
181 BlitImage {
182 src_image: &'a Image,
183 src_layout: Layout,
184 dst_image: &'a Image,
185 dst_layout: Layout,
186 regions: &'a [ImageBlit],
187 filter: Filter,
188 },
189
190 PipelineBarrier {
191 src: PipelineStages,
192 dst: PipelineStages,
193 images: &'a [ImageMemoryBarrier<'a>],
194 buffers: &'a [BufferMemoryBarrier<'a>],
195 memory: Option<MemoryBarrier>,
196 },
197
198 PushConstants {
199 layout: &'a PipelineLayout,
200 stages: ShaderStageFlags,
201 offset: u32,
202 data: &'a [u8],
203 },
204
205 Dispatch {
206 x: u32,
207 y: u32,
208 z: u32,
209 },
210
211 BeginRendering {
212 info: RenderingInfo<'a>,
213 },
214
215 EndRendering,
216}
217
218#[allow(missing_debug_implementations)]
222pub struct EncoderCommon<'a> {
223 capabilities: QueueCapabilityFlags,
224 scope: &'a Scope<'a>,
225 command_buffer: CommandBuffer,
226}
227
228impl<'a> EncoderCommon<'a> {
229 pub fn scope(&self) -> &'a Scope<'a> {
230 self.scope
231 }
232
233 pub fn set_viewport(&mut self, viewport: Viewport) {
234 assert!(self.capabilities.supports_graphics());
235
236 self.command_buffer
237 .write(self.scope, Command::SetViewport { viewport });
238 }
239
240 pub fn set_scissor(&mut self, scissor: Rect) {
241 assert!(self.capabilities.supports_graphics());
242
243 self.command_buffer
244 .write(self.scope, Command::SetScissor { scissor })
245 }
246
247 pub fn bind_graphics_pipeline(&mut self, pipeline: &GraphicsPipeline) {
248 assert!(self.capabilities.supports_graphics());
249
250 self.command_buffer
251 .write(self.scope, Command::BindGraphicsPipeline { pipeline })
252 }
253
254 pub fn bind_compute_pipeline(&mut self, pipeline: &ComputePipeline) {
255 assert!(self.capabilities.supports_compute());
256 self.command_buffer
257 .write(self.scope, Command::BindComputePipeline { pipeline })
258 }
259
260 pub fn bind_ray_tracing_pipeline(&mut self, pipeline: &RayTracingPipeline) {
261 assert!(self.capabilities.supports_compute());
262
263 self.command_buffer
264 .write(self.scope, Command::BindRayTracingPipeline { pipeline })
265 }
266
267 pub fn bind_vertex_buffers(&mut self, first: u32, buffers: &[(&Buffer, u64)]) {
268 assert!(self.capabilities.supports_graphics());
269
270 self.command_buffer
271 .write(self.scope, Command::BindVertexBuffers { first, buffers })
272 }
273
274 pub fn bind_index_buffer(&mut self, buffer: &Buffer, offset: u64, index_type: IndexType) {
275 assert!(self.capabilities.supports_graphics());
276
277 self.command_buffer.write(
278 self.scope,
279 Command::BindIndexBuffer {
280 buffer,
281 offset,
282 index_type,
283 },
284 )
285 }
286
287 pub fn bind_graphics_descriptor_sets(
288 &mut self,
289 layout: &PipelineLayout,
290 first_set: u32,
291 sets: &[&DescriptorSet],
292 dynamic_offsets: &[u32],
293 ) {
294 assert!(self.capabilities.supports_graphics());
295
296 self.command_buffer.write(
297 self.scope,
298 Command::BindGraphicsDescriptorSets {
299 layout,
300 first_set,
301 sets,
302 dynamic_offsets,
303 },
304 );
305 }
306
307 pub fn bind_graphics_descriptors<P, const N: u32>(
308 &mut self,
309 layout: &P,
310 descriptors: &impl UpdatedPipelineDescriptors<P, N>,
311 ) where
312 P: PipelineInputLayout,
313 {
314 layout.bind_graphics(descriptors, self);
315 }
316
317 pub fn bind_compute_descriptor_sets(
318 &mut self,
319 layout: &PipelineLayout,
320 first_set: u32,
321 sets: &[&DescriptorSet],
322 dynamic_offsets: &[u32],
323 ) {
324 assert!(self.capabilities.supports_compute());
325
326 self.command_buffer.write(
327 self.scope,
328 Command::BindComputeDescriptorSets {
329 layout,
330 first_set,
331 sets,
332 dynamic_offsets,
333 },
334 );
335 }
336
337 pub fn bind_compute_descriptors<P, const N: u32>(
338 &mut self,
339 layout: &P,
340 descriptors: &impl UpdatedPipelineDescriptors<P, N>,
341 ) where
342 P: PipelineInputLayout,
343 {
344 layout.bind_compute(descriptors, self);
345 }
346
347 pub fn bind_ray_tracing_descriptor_sets(
348 &mut self,
349 layout: &PipelineLayout,
350 first_set: u32,
351 sets: &[&DescriptorSet],
352 dynamic_offsets: &[u32],
353 ) {
354 assert!(self.capabilities.supports_compute());
355
356 self.command_buffer.write(
357 self.scope,
358 Command::BindRayTracingDescriptorSets {
359 layout,
360 first_set,
361 sets,
362 dynamic_offsets,
363 },
364 );
365 }
366
367 pub fn bind_ray_tracing_descriptors<P, const N: u32>(
368 &mut self,
369 layout: &P,
370 descriptors: &impl UpdatedPipelineDescriptors<P, N>,
371 ) where
372 P: PipelineInputLayout,
373 {
374 layout.bind_ray_tracing(descriptors, self);
375 }
376
377 pub fn push_constants_pod<T>(
378 &mut self,
379 layout: &PipelineLayout,
380 stages: ShaderStageFlags,
381 offset: u32,
382 data: &[T],
383 ) where
384 T: Pod,
385 {
386 assert!(arith_le(size_of_val(data), u32::max_value()));
387
388 self.command_buffer.write(
389 self.scope,
390 Command::PushConstants {
391 layout,
392 stages,
393 offset,
394 data: cast_slice(data),
395 },
396 );
397 }
398
399 pub fn push_constants<P>(&mut self, layout: &P, constants: &impl PipelinePushConstants<P>)
400 where
401 P: PipelineInputLayout,
402 {
403 layout.push_constants(constants, self);
404 }
405}
406
407pub struct Encoder<'a> {
409 inner: EncoderCommon<'a>,
410 drop: EncoderDrop,
411}
412
413impl<'a> fmt::Debug for Encoder<'a> {
414 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
415 f.debug_struct("Encoder")
416 .field("command_buffer", &self.inner.command_buffer)
417 .field("capabilities", &self.inner.capabilities)
418 .finish()
419 }
420}
421
422struct EncoderDrop;
423
424impl Drop for EncoderDrop {
425 fn drop(&mut self) {
426 #[cfg(feature = "tracing")]
427 tracing::warn!(
428 "Encoder is dropped. Encoders must be either submitted or explicitly discarded"
429 );
430
431 #[cfg(not(feature = "tracing"))]
432 eprintln!("Encoder is dropped. Encoders must be either submitted or explicitly discarded")
433 }
434}
435
436impl<'a> Encoder<'a> {
437 pub fn discard(self) {
438 forget(self.drop)
439 }
440}
441
442impl<'a> std::ops::Deref for Encoder<'a> {
443 type Target = EncoderCommon<'a>;
444
445 fn deref(&self) -> &EncoderCommon<'a> {
446 &self.inner
447 }
448}
449
450impl<'a> std::ops::DerefMut for Encoder<'a> {
451 fn deref_mut(&mut self) -> &mut EncoderCommon<'a> {
452 &mut self.inner
453 }
454}
455
456impl<'a> Encoder<'a> {
457 pub(crate) fn new(
458 command_buffer: CommandBuffer,
459 capabilities: QueueCapabilityFlags,
460 scope: &'a Scope<'a>,
461 ) -> Self {
462 Encoder {
463 inner: EncoderCommon {
464 capabilities,
465 scope,
466 command_buffer,
467 },
468 drop: EncoderDrop,
469 }
470 }
471
472 pub fn with_framebuffer(
482 &mut self,
483 framebuffer: &'a Framebuffer,
484 clears: &[ClearValue],
485 ) -> RenderPassEncoder<'_, 'a> {
486 assert!(self.inner.capabilities.supports_graphics());
487
488 self.inner.command_buffer.write(
489 self.scope,
490 Command::BeginRenderPass {
491 framebuffer,
492 clears,
493 },
494 );
495
496 RenderPassEncoder {
497 framebuffer,
498 render_pass: &framebuffer.info().render_pass,
499 inner: &mut self.inner,
500 subpass: 0,
501 }
502 }
503
504 pub fn with_render_pass<R, I>(
515 &mut self,
516 render_pass: &'a mut R,
517 input: &I,
518 device: &Device,
519 ) -> Result<RenderPassEncoder<'_, 'a>, FramebufferError>
520 where
521 R: RenderPassInstance<Input = I>,
522 {
523 render_pass.begin_render_pass(input, device, self)
524 }
525
526 pub fn begin_rendering(&mut self, mut info: RenderingInfo<'_>) -> RenderingEncoder<'_, 'a> {
528 assert!(self.inner.capabilities.supports_graphics());
529
530 let color_format = |a: &RenderingColorInfo| a.color_view.info().image.info().format;
531
532 let depth_stencil_format = |a: &RenderingDepthStencilAttachmentInfo| {
533 a.depth_stencil_view.info().image.info().format
534 };
535
536 let colors = &*self
537 .scope
538 .to_scope_from_iter(info.colors.iter().map(color_format));
539
540 let depth_stencil = info.depth_stencil.as_ref().map(depth_stencil_format);
541
542 let render_area = *info.render_area.get_or_insert_with(|| {
543 let mut me = minimal_extent();
544 for color in info.colors.iter() {
545 me.add(color.color_view.info().image.info().extent.into_2d());
546 }
547
548 if let Some(depth_stencil) = &info.depth_stencil {
549 me.add(
550 depth_stencil
551 .depth_stencil_view
552 .info()
553 .image
554 .info()
555 .extent
556 .into_2d(),
557 );
558 }
559
560 Rect::from(me.get())
561 });
562
563 self.inner
564 .command_buffer
565 .write(self.scope, Command::BeginRendering { info });
566
567 RenderingEncoder {
568 render_area,
569 colors,
570 depth_stencil,
571 inner: &mut self.inner,
572 }
573 }
574
575 pub fn update_buffer<T>(&mut self, buffer: &Buffer, offset: u64, data: &[T])
577 where
578 T: Pod,
579 {
580 assert_eq!(offset % 4, 0);
581 assert!(size_of_val(data) <= 65_536, "Data length greater than 65536 MUST NOT be uploaded with encoder, consider buffer mapping. Actual data is {} bytes", size_of_val(data));
582
583 if data.is_empty() {
584 return;
585 }
586
587 let data = unsafe {
588 std::slice::from_raw_parts(data.as_ptr() as *const u8, std::mem::size_of_val(data))
589 };
590
591 self.inner.command_buffer.write(
592 self.scope,
593 Command::UpdateBuffer {
594 buffer,
595 offset,
596 data,
597 },
598 )
599 }
600
601 pub fn upload_buffer<T>(
604 &mut self,
605 buffer: &'a Buffer,
606 offset: u64,
607 data: &'a [T],
608 device: &Device,
609 ) -> Result<(), OutOfMemory>
610 where
611 T: Pod,
612 {
613 const UPDATE_LIMIT: usize = 16384;
614
615 assert_eq!(
616 size_of_val(data) & 3,
617 0,
618 "Buffer uploading data size must be a multiple of 4"
619 );
620
621 if data.is_empty() {
622 return Ok(());
623 }
624
625 if size_of_val(data) <= UPDATE_LIMIT {
626 self.update_buffer(buffer, offset, data);
627 } else {
628 let staging = device.create_buffer_static(
629 BufferInfo {
630 align: 15,
631 size: size_of_val(data) as u64,
632 usage: BufferUsage::TRANSFER_SRC,
633 },
634 data,
635 )?;
636
637 self.copy_buffer(
638 &staging,
639 buffer,
640 &[BufferCopy {
641 src_offset: 0,
642 dst_offset: offset,
643 size: size_of_val(data) as u64,
644 }],
645 );
646 }
647
648 Ok(())
649 }
650
651 pub fn upload_buffer_cached<T, S>(
654 &mut self,
655 buffer: &'a Buffer,
656 offset: u64,
657 data: &'a [T],
658 device: &Device,
659 staging: &mut S,
660 ) -> Result<(), OutOfMemory>
661 where
662 T: Pod,
663 S: AsMut<[Buffer]> + Extend<Buffer>,
664 {
665 const UPDATE_LIMIT: usize = 16384;
666
667 assert_eq!(
668 size_of_val(data) & 3,
669 0,
670 "Buffer uploading data size must be a multiple of 4"
671 );
672
673 if data.is_empty() {
674 return Ok(());
675 }
676
677 if size_of_val(data) <= UPDATE_LIMIT {
678 self.update_buffer(buffer, offset, data);
679 } else {
680 let new_staging;
681 let mut iter = staging.as_mut().iter_mut();
682 let staging = loop {
683 match iter.next() {
684 None => {
685 new_staging = device.create_buffer_static(
686 BufferInfo {
687 align: 15,
688 size: size_of_val(data) as u64,
689 usage: BufferUsage::TRANSFER_SRC,
690 },
691 data,
692 )?;
693 break &new_staging;
694 }
695 Some(buffer) => {
696 if arith_ge(buffer.info().size, size_of_val(data)) {
697 if let Some(mappable_buffer) = buffer.try_as_mappable() {
698 device
699 .upload_to_memory(mappable_buffer, offset, data)
700 .expect("Map failed");
701
702 break &*buffer;
703 }
704 }
705 }
706 }
707 };
708
709 self.copy_buffer(
710 staging,
711 buffer,
712 &[BufferCopy {
713 src_offset: 0,
714 dst_offset: offset,
715 size: size_of_val(data) as u64,
716 }],
717 );
718 }
719
720 Ok(())
721 }
722
723 pub fn build_acceleration_structure(
725 &mut self,
726 infos: &[AccelerationStructureBuildGeometryInfo],
727 ) {
728 assert!(self.inner.capabilities.supports_compute());
729
730 if infos.is_empty() {
731 return;
732 }
733
734 for (i, info) in infos.iter().enumerate() {
736 if let Some(src) = info.src {
737 for (j, info) in infos[..i].iter().enumerate() {
738 assert_ne!(
739 info.dst, src,
740 "`infos[{}].src` and `infos[{}].dst` collision",
741 i, j,
742 );
743 }
744 }
745
746 for (j, info) in infos[..i].iter().enumerate() {
747 assert_ne!(
748 info.src,
749 Some(info.dst),
750 "`infos[{}].src` and `infos[{}].dst` collision",
751 j,
752 i,
753 );
754 }
755 }
756
757 self.inner.command_buffer.write(
758 self.inner.scope,
759 Command::BuildAccelerationStructure { infos },
760 )
761 }
762
763 pub fn trace_rays(&mut self, shader_binding_table: &'a ShaderBindingTable, extent: Extent3) {
764 assert!(self.inner.capabilities.supports_compute());
765
766 self.inner.command_buffer.write(
767 self.inner.scope,
768 Command::TraceRays {
769 shader_binding_table,
770 extent,
771 },
772 )
773 }
774
775 pub fn copy_buffer(
776 &mut self,
777 src_buffer: &Buffer,
778 dst_buffer: &Buffer,
779 regions: &[BufferCopy],
780 ) {
781 #[cfg(debug_assertions)]
782 {
783 for region in regions {
784 assert!(src_buffer.info().size >= region.src_offset + region.size);
785 assert!(dst_buffer.info().size >= region.dst_offset + region.size);
786 }
787 }
788
789 self.inner.command_buffer.write(
790 self.inner.scope,
791 Command::CopyBuffer {
792 src_buffer,
793 dst_buffer,
794 regions,
795 },
796 )
797 }
798
799 pub fn copy_image(
800 &mut self,
801 src_image: &Image,
802 src_layout: Layout,
803 dst_image: &Image,
804 dst_layout: Layout,
805 regions: &[ImageCopy],
806 ) {
807 self.inner.command_buffer.write(
808 self.inner.scope,
809 Command::CopyImage {
810 src_image,
811 src_layout,
812 dst_image,
813 dst_layout,
814 regions,
815 },
816 )
817 }
818
819 pub fn copy_buffer_to_image(
820 &mut self,
821 src_buffer: &Buffer,
822 dst_image: &Image,
823 dst_layout: Layout,
824 regions: &[BufferImageCopy],
825 ) {
826 self.inner.command_buffer.write(
827 self.inner.scope,
828 Command::CopyBufferImage {
829 src_buffer,
830 dst_image,
831 dst_layout,
832 regions,
833 },
834 )
835 }
836
837 pub fn blit_image(
838 &mut self,
839 src_image: &Image,
840 src_layout: Layout,
841 dst_image: &Image,
842 dst_layout: Layout,
843 regions: &[ImageBlit],
844 filter: Filter,
845 ) {
846 assert!(self.inner.capabilities.supports_graphics());
847
848 self.inner.command_buffer.write(
849 self.inner.scope,
850 Command::BlitImage {
851 src_image,
852 src_layout,
853 dst_image,
854 dst_layout,
855 regions,
856 filter,
857 },
858 )
859 }
860
861 pub fn dispatch(&mut self, x: u32, y: u32, z: u32) {
862 assert!(self.inner.capabilities.supports_compute());
863
864 self.inner
865 .command_buffer
866 .write(self.inner.scope, Command::Dispatch { x, y, z });
867 }
868
869 pub fn memory_barrier(
870 &mut self,
871 src: PipelineStages,
872 src_acc: Access,
873 dst: PipelineStages,
874 dst_acc: Access,
875 ) {
876 self.inner.command_buffer.write(
877 self.inner.scope,
878 Command::PipelineBarrier {
879 src,
880 dst,
881 images: &[],
882 buffers: &[],
883 memory: Some(MemoryBarrier {
884 src: src_acc,
885 dst: dst_acc,
886 }),
887 },
888 );
889 }
890
891 pub fn image_barriers(
892 &mut self,
893 src: PipelineStages,
894 dst: PipelineStages,
895 images: &[ImageMemoryBarrier],
896 ) {
897 self.inner.command_buffer.write(
898 self.inner.scope,
899 Command::PipelineBarrier {
900 src,
901 dst,
902 images,
903 buffers: &[],
904 memory: None,
905 },
906 );
907 }
908
909 pub fn buffer_barriers(
910 &mut self,
911 src: PipelineStages,
912 dst: PipelineStages,
913 buffers: &[BufferMemoryBarrier],
914 ) {
915 self.inner.command_buffer.write(
916 self.inner.scope,
917 Command::PipelineBarrier {
918 src,
919 dst,
920 images: &[],
921 buffers,
922 memory: None,
923 },
924 );
925 }
926
927 pub fn finish(mut self) -> CommandBuffer {
930 forget(self.drop);
931
932 self.inner
933 .command_buffer
934 .end()
935 .expect("TODO: Handle command buffer writing error");
936
937 self.inner.command_buffer
938 }
939}
940
941pub struct RenderPassEncoder<'a, 'b> {
943 framebuffer: &'b Framebuffer,
944 render_pass: &'b RenderPass,
945 subpass: u32,
946 inner: &'a mut EncoderCommon<'b>,
947}
948
949impl<'a, 'b> fmt::Debug for RenderPassEncoder<'a, 'b> {
950 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
951 f.debug_struct("RenderPassEncoder")
952 .field("framebuffer", self.framebuffer)
953 .field("render_pass", self.render_pass)
954 .field("subpass", &self.subpass)
955 .field("command_buffer", &self.inner.command_buffer)
956 .field("capabilities", &self.inner.capabilities)
957 .finish()
958 }
959}
960
961impl<'a, 'b> RenderPassEncoder<'a, 'b> {
962 pub fn render_pass(&self) -> &RenderPass {
963 self.render_pass
964 }
965
966 pub fn framebuffer(&self) -> &Framebuffer {
967 self.framebuffer
968 }
969
970 pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
971 self.inner.command_buffer.write(
972 self.scope,
973 Command::Draw {
974 vertices,
975 instances,
976 },
977 );
978 }
979
980 pub fn draw_indexed(&mut self, indices: Range<u32>, vertex_offset: i32, instances: Range<u32>) {
981 self.inner.command_buffer.write(
982 self.scope,
983 Command::DrawIndexed {
984 indices,
985 vertex_offset,
986 instances,
987 },
988 );
989 }
990
991 pub fn bind_dynamic_graphics_pipeline(
992 &mut self,
993 pipeline: &mut DynamicGraphicsPipeline,
994 device: &Device,
995 ) -> Result<(), OutOfMemory> {
996 assert!(self.capabilities.supports_graphics());
997
998 let mut set_viewport = false;
999 let mut set_scissor = false;
1000
1001 if let Some(rasterizer) = &pipeline.desc.rasterizer {
1002 set_viewport = rasterizer.viewport.is_dynamic();
1003 set_scissor = rasterizer.scissor.is_dynamic();
1004 }
1005
1006 if set_scissor {
1007 self.inner
1008 .set_scissor(self.framebuffer.info().extent.into());
1009 }
1010
1011 if set_viewport {
1012 self.inner
1013 .set_viewport(self.framebuffer.info().extent.into());
1014 }
1015
1016 let gp = pipeline.get_for_render_pass(self.render_pass, self.subpass, device)?;
1017 self.inner.bind_graphics_pipeline(gp);
1018 Ok(())
1019 }
1020}
1021
1022impl Drop for RenderPassEncoder<'_, '_> {
1023 fn drop(&mut self) {
1024 self.inner
1025 .command_buffer
1026 .write(self.scope, Command::EndRenderPass);
1027 }
1028}
1029
1030impl<'a, 'b> std::ops::Deref for RenderPassEncoder<'a, 'b> {
1031 type Target = EncoderCommon<'b>;
1032
1033 fn deref(&self) -> &EncoderCommon<'b> {
1034 self.inner
1035 }
1036}
1037
1038impl<'a, 'b> std::ops::DerefMut for RenderPassEncoder<'a, 'b> {
1039 fn deref_mut(&mut self) -> &mut EncoderCommon<'b> {
1040 self.inner
1041 }
1042}
1043
1044pub struct RenderingEncoder<'a, 'b> {
1046 render_area: Rect,
1047 colors: &'b [Format],
1048 depth_stencil: Option<Format>,
1049 inner: &'a mut EncoderCommon<'b>,
1050}
1051
1052impl<'a, 'b> fmt::Debug for RenderingEncoder<'a, 'b> {
1053 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1054 f.debug_struct("RenderingEncoder")
1055 .field("command_buffer", &self.inner.command_buffer)
1056 .field("capabilities", &self.inner.capabilities)
1057 .finish()
1058 }
1059}
1060
1061impl<'a, 'b> RenderingEncoder<'a, 'b> {
1062 pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
1063 self.inner.command_buffer.write(
1064 self.scope,
1065 Command::Draw {
1066 vertices,
1067 instances,
1068 },
1069 );
1070 }
1071
1072 pub fn draw_indexed(&mut self, indices: Range<u32>, vertex_offset: i32, instances: Range<u32>) {
1073 self.inner.command_buffer.write(
1074 self.scope,
1075 Command::DrawIndexed {
1076 indices,
1077 vertex_offset,
1078 instances,
1079 },
1080 );
1081 }
1082
1083 pub fn bind_dynamic_graphics_pipeline(
1084 &mut self,
1085 pipeline: &mut DynamicGraphicsPipeline,
1086 device: &Device,
1087 ) -> Result<(), OutOfMemory> {
1088 assert!(self.capabilities.supports_graphics());
1089
1090 let mut set_viewport = false;
1091 let mut set_scissor = false;
1092
1093 if let Some(rasterizer) = &pipeline.desc.rasterizer {
1094 set_viewport = rasterizer.viewport.is_dynamic();
1095 set_scissor = rasterizer.scissor.is_dynamic();
1096 }
1097
1098 if set_scissor {
1099 self.inner.set_scissor(self.render_area);
1100 }
1101
1102 if set_viewport {
1103 self.inner.set_viewport(self.render_area.into());
1104 }
1105
1106 let gp = pipeline.get_for_dynamic_rendering(self.colors, self.depth_stencil, device)?;
1107 self.inner.bind_graphics_pipeline(gp);
1108 Ok(())
1109 }
1110}
1111
1112impl Drop for RenderingEncoder<'_, '_> {
1113 fn drop(&mut self) {
1114 self.inner
1115 .command_buffer
1116 .write(self.scope, Command::EndRendering);
1117 }
1118}
1119
1120impl<'a, 'b> std::ops::Deref for RenderingEncoder<'a, 'b> {
1121 type Target = EncoderCommon<'b>;
1122
1123 fn deref(&self) -> &EncoderCommon<'b> {
1124 self.inner
1125 }
1126}
1127
1128impl<'a, 'b> std::ops::DerefMut for RenderingEncoder<'a, 'b> {
1129 fn deref_mut(&mut self) -> &mut EncoderCommon<'b> {
1130 self.inner
1131 }
1132}