1#![warn(unused_crate_dependencies)]
89#![warn(clippy::print_stdout, clippy::print_stderr)]
90#![cfg_attr(target_pointer_width = "64", warn(clippy::trivially_copy_pass_by_ref))]
92#![cfg_attr(docsrs, feature(doc_auto_cfg))]
94#![allow(missing_docs, reason = "We have many as-yet undocumented items.")]
99#![expect(
100 missing_debug_implementations,
101 single_use_lifetimes,
102 unnameable_types,
103 unreachable_pub,
104 clippy::cast_possible_truncation,
105 clippy::missing_assert_message,
106 clippy::shadow_unrelated,
107 clippy::missing_panics_doc,
108 clippy::exhaustive_enums,
109 clippy::print_stderr,
110 clippy::match_same_arms,
111 reason = "Deferred"
112)]
113#![allow(
114 clippy::missing_errors_doc,
115 clippy::todo,
116 clippy::partial_pub_fields,
117 reason = "Deferred, only apply in some feature sets so not expect"
118)]
119
120mod debug;
121mod recording;
122mod render;
123mod scene;
124mod shaders;
125
126#[cfg(feature = "wgpu")]
127pub mod util;
128#[cfg(feature = "wgpu")]
129mod wgpu_engine;
130
131pub mod low_level {
132 pub use crate::debug::DebugLayers;
137 pub use crate::recording::{
138 BindType, BufferProxy, Command, ImageFormat, ImageProxy, Recording, ResourceId,
139 ResourceProxy, ShaderId,
140 };
141 pub use crate::render::Render;
142 pub use crate::shaders::FullShaders;
143 pub use vello_encoding::BumpAllocators;
145}
146pub use peniko;
148pub use peniko::kurbo;
150
151#[cfg(feature = "wgpu")]
152pub use wgpu;
153
154pub use scene::{DrawGlyphs, Scene};
155pub use vello_encoding::{Glyph, NormalizedCoord};
156
157use low_level::ShaderId;
158#[cfg(feature = "wgpu")]
159use low_level::{
160 BindType, BumpAllocators, FullShaders, ImageFormat, ImageProxy, Recording, Render,
161 ResourceProxy,
162};
163use thiserror::Error;
164
165#[cfg(feature = "wgpu")]
166use debug::DebugLayers;
167#[cfg(feature = "wgpu")]
168use vello_encoding::Resolver;
169#[cfg(feature = "wgpu")]
170use wgpu_engine::{ExternalResource, WgpuEngine};
171
172#[cfg(feature = "wgpu")]
173use std::{
174 num::NonZeroUsize,
175 sync::{atomic::AtomicBool, Arc},
176};
177#[cfg(feature = "wgpu")]
178use wgpu::{Device, Queue, SurfaceTexture, TextureFormat, TextureView};
179#[cfg(all(feature = "wgpu", feature = "wgpu-profiler"))]
180use wgpu_profiler::{GpuProfiler, GpuProfilerSettings};
181
182#[derive(Debug, Copy, Clone, PartialEq, Eq)]
190pub enum AaConfig {
191 Area,
200 Msaa8,
204 Msaa16,
208}
209
210#[derive(Debug, Copy, Clone, PartialEq, Eq)]
218pub struct AaSupport {
219 pub area: bool,
221 pub msaa8: bool,
223 pub msaa16: bool,
225}
226
227impl AaSupport {
228 pub fn all() -> Self {
232 Self {
233 area: true,
234 msaa8: true,
235 msaa16: true,
236 }
237 }
238
239 pub fn area_only() -> Self {
243 Self {
244 area: true,
245 msaa8: false,
246 msaa16: false,
247 }
248 }
249}
250
251impl FromIterator<AaConfig> for AaSupport {
252 fn from_iter<T: IntoIterator<Item = AaConfig>>(iter: T) -> Self {
253 let mut result = Self {
254 area: false,
255 msaa8: false,
256 msaa16: false,
257 };
258 for config in iter {
259 match config {
260 AaConfig::Area => result.area = true,
261 AaConfig::Msaa8 => result.msaa8 = true,
262 AaConfig::Msaa16 => result.msaa16 = true,
263 }
264 }
265 result
266 }
267}
268
269#[derive(Error, Debug)]
271#[non_exhaustive]
272pub enum Error {
273 #[cfg(feature = "wgpu")]
275 #[error("Couldn't find suitable device")]
276 NoCompatibleDevice,
277 #[cfg(feature = "wgpu")]
280 #[error("Couldn't create wgpu surface")]
281 WgpuCreateSurfaceError(#[from] wgpu::CreateSurfaceError),
282 #[cfg(feature = "wgpu")]
286 #[error("Couldn't find `Rgba8Unorm` or `Bgra8Unorm` texture formats for surface")]
287 UnsupportedSurfaceFormat,
288
289 #[cfg(feature = "wgpu")]
292 #[error("Buffer '{0}' is not available but used for {1}")]
293 UnavailableBufferUsed(&'static str, &'static str),
294 #[cfg(feature = "wgpu")]
297 #[error("Failed to async map a buffer")]
298 BufferAsyncError(#[from] wgpu::BufferAsyncError),
299 #[cfg(feature = "wgpu")]
301 #[cfg(feature = "debug_layers")]
302 #[error("Failed to download internal buffer '{0}' for visualization")]
303 DownloadError(&'static str),
304
305 #[cfg(feature = "wgpu")]
306 #[error("wgpu Error from scope")]
307 WgpuErrorFromScope(#[from] wgpu::Error),
308
309 #[cfg(feature = "wgpu-profiler")]
312 #[error("Couldn't create wgpu profiler")]
313 #[doc(hidden)] ProfilerCreationError(#[from] wgpu_profiler::CreationError),
315
316 #[cfg(feature = "hot_reload")]
318 #[error("Failed to compile shaders:\n{0}")]
319 #[doc(hidden)] ShaderCompilation(#[from] vello_shaders::compile::ErrorVec),
321}
322
323#[cfg_attr(
324 not(feature = "wgpu"),
325 expect(dead_code, reason = "this can be unused when wgpu feature is not used")
326)]
327pub(crate) type Result<T, E = Error> = std::result::Result<T, E>;
328
329#[cfg(feature = "wgpu")]
335pub struct Renderer {
336 #[cfg_attr(
337 not(feature = "hot_reload"),
338 expect(
339 dead_code,
340 reason = "Options are only used to reinitialise on a hot reload"
341 )
342 )]
343 options: RendererOptions,
344 engine: WgpuEngine,
345 resolver: Resolver,
346 shaders: FullShaders,
347 blit: Option<BlitPipeline>,
348 #[cfg(feature = "debug_layers")]
349 debug: Option<debug::DebugRenderer>,
350 target: Option<TargetTexture>,
351 #[cfg(feature = "wgpu-profiler")]
352 #[doc(hidden)] pub profiler: GpuProfiler,
355 #[cfg(feature = "wgpu-profiler")]
356 #[doc(hidden)] pub profile_result: Option<Vec<wgpu_profiler::GpuTimerQueryResult>>,
359}
360#[cfg(all(feature = "wgpu", not(target_arch = "wasm32")))]
366static_assertions::assert_impl_all!(Renderer: Send);
367
368pub struct RenderParams {
372 pub base_color: peniko::Color,
375
376 pub width: u32,
378 pub height: u32,
379
380 pub antialiasing_method: AaConfig,
383}
384
385#[cfg(feature = "wgpu")]
386pub struct RendererOptions {
388 pub surface_format: Option<TextureFormat>,
391
392 pub use_cpu: bool,
396
397 pub antialiasing_support: AaSupport,
400
401 pub num_init_threads: Option<NonZeroUsize>,
410}
411
412#[cfg(feature = "wgpu")]
413struct RenderResult {
414 bump: Option<BumpAllocators>,
415 #[cfg(feature = "debug_layers")]
416 captured: Option<render::CapturedBuffers>,
417}
418
419#[cfg(feature = "wgpu")]
420impl Renderer {
421 pub fn new(device: &Device, options: RendererOptions) -> Result<Self> {
423 let mut engine = WgpuEngine::new(options.use_cpu);
424 if options.num_init_threads != NonZeroUsize::new(1) {
426 #[cfg(not(target_arch = "wasm32"))]
427 engine.use_parallel_initialisation();
428 }
429 let shaders = shaders::full_shaders(device, &mut engine, &options)?;
430 #[cfg(not(target_arch = "wasm32"))]
431 engine.build_shaders_if_needed(device, options.num_init_threads);
432 let blit = options
433 .surface_format
434 .map(|surface_format| BlitPipeline::new(device, surface_format, &mut engine))
435 .transpose()?;
436 #[cfg(feature = "debug_layers")]
437 let debug = options
438 .surface_format
439 .map(|surface_format| debug::DebugRenderer::new(device, surface_format, &mut engine));
440
441 Ok(Self {
442 options,
443 engine,
444 resolver: Resolver::new(),
445 shaders,
446 blit,
447 #[cfg(feature = "debug_layers")]
448 debug,
449 target: None,
450 #[cfg(feature = "wgpu-profiler")]
451 profiler: GpuProfiler::new(GpuProfilerSettings {
452 ..Default::default()
453 })?,
454 #[cfg(feature = "wgpu-profiler")]
455 profile_result: None,
456 })
457 }
458
459 pub fn render_to_texture(
465 &mut self,
466 device: &Device,
467 queue: &Queue,
468 scene: &Scene,
469 texture: &TextureView,
470 params: &RenderParams,
471 ) -> Result<()> {
472 let (recording, target) =
473 render::render_full(scene, &mut self.resolver, &self.shaders, params);
474 let external_resources = [ExternalResource::Image(
475 *target.as_image().unwrap(),
476 texture,
477 )];
478 self.engine.run_recording(
479 device,
480 queue,
481 &recording,
482 &external_resources,
483 "render_to_texture",
484 #[cfg(feature = "wgpu-profiler")]
485 &mut self.profiler,
486 )?;
487 Ok(())
488 }
489
490 pub fn render_to_surface(
499 &mut self,
500 device: &Device,
501 queue: &Queue,
502 scene: &Scene,
503 surface: &SurfaceTexture,
504 params: &RenderParams,
505 ) -> Result<()> {
506 let width = params.width;
507 let height = params.height;
508 let mut target = self
509 .target
510 .take()
511 .unwrap_or_else(|| TargetTexture::new(device, width, height));
512 if target.width != width || target.height != height {
515 target = TargetTexture::new(device, width, height);
516 }
517 self.render_to_texture(device, queue, scene, &target.view, params)?;
518 let blit = self
519 .blit
520 .as_ref()
521 .expect("renderer should have configured surface_format to use on a surface");
522 let mut recording = Recording::default();
523 let target_proxy = ImageProxy::new(
524 width,
525 height,
526 ImageFormat::from_wgpu(target.format)
527 .expect("`TargetTexture` always has a supported texture format"),
528 );
529 let surface_proxy = ImageProxy::new(
530 width,
531 height,
532 ImageFormat::from_wgpu(surface.texture.format())
533 .ok_or(Error::UnsupportedSurfaceFormat)?,
534 );
535 recording.draw(recording::DrawParams {
536 shader_id: blit.0,
537 instance_count: 1,
538 vertex_count: 6,
539 vertex_buffer: None,
540 resources: vec![ResourceProxy::Image(target_proxy)],
541 target: surface_proxy,
542 clear_color: Some([0., 0., 0., 0.]),
543 });
544
545 let surface_view = surface
546 .texture
547 .create_view(&wgpu::TextureViewDescriptor::default());
548 let external_resources = [
549 ExternalResource::Image(target_proxy, &target.view),
550 ExternalResource::Image(surface_proxy, &surface_view),
551 ];
552 self.engine.run_recording(
553 device,
554 queue,
555 &recording,
556 &external_resources,
557 "blit (render_to_surface)",
558 #[cfg(feature = "wgpu-profiler")]
559 &mut self.profiler,
560 )?;
561 self.target = Some(target);
562 #[cfg(feature = "wgpu-profiler")]
563 {
564 self.profiler.end_frame().unwrap();
565 if let Some(result) = self
566 .profiler
567 .process_finished_frame(queue.get_timestamp_period())
568 {
569 self.profile_result = Some(result);
570 }
571 }
572 Ok(())
573 }
574
575 pub fn override_image(
585 &mut self,
586 image: &peniko::Image,
587 texture: Option<wgpu::ImageCopyTextureBase<Arc<wgpu::Texture>>>,
588 ) -> Option<wgpu::ImageCopyTextureBase<Arc<wgpu::Texture>>> {
589 match texture {
590 Some(texture) => self.engine.image_overrides.insert(image.data.id(), texture),
591 None => self.engine.image_overrides.remove(&image.data.id()),
592 }
593 }
594
595 #[cfg(feature = "hot_reload")]
597 #[doc(hidden)] pub async fn reload_shaders(&mut self, device: &Device) -> Result<(), Error> {
599 device.push_error_scope(wgpu::ErrorFilter::Validation);
600 let mut engine = WgpuEngine::new(self.options.use_cpu);
601 let shaders = shaders::full_shaders(device, &mut engine, &self.options)?;
603 let blit = self
604 .options
605 .surface_format
606 .map(|surface_format| BlitPipeline::new(device, surface_format, &mut engine))
607 .transpose()?;
608 #[cfg(feature = "debug_layers")]
609 let debug = self
610 .options
611 .surface_format
612 .map(|format| debug::DebugRenderer::new(device, format, &mut engine));
613 let error = device.pop_error_scope().await;
614 if let Some(error) = error {
615 return Err(error.into());
616 }
617 self.engine = engine;
618 self.shaders = shaders;
619 self.blit = blit;
620 #[cfg(feature = "debug_layers")]
621 {
622 self.debug = debug;
623 }
624 Ok(())
625 }
626
627 #[cfg_attr(docsrs, doc(hidden))]
641 #[deprecated(
642 note = "render_to_texture should be preferred, as the _async version has no stability guarantees"
643 )]
644 pub async fn render_to_texture_async(
645 &mut self,
646 device: &Device,
647 queue: &Queue,
648 scene: &Scene,
649 texture: &TextureView,
650 params: &RenderParams,
651 ) -> Result<Option<BumpAllocators>> {
652 let result = self
653 .render_to_texture_async_internal(device, queue, scene, texture, params)
654 .await?;
655 #[cfg(feature = "debug_layers")]
656 {
657 if let Some(captured) = result.captured {
660 let mut recording = Recording::default();
661 self.engine.free_download(captured.lines);
663 captured.release_buffers(&mut recording);
664 self.engine.run_recording(
665 device,
666 queue,
667 &recording,
668 &[],
669 "free memory",
670 #[cfg(feature = "wgpu-profiler")]
671 &mut self.profiler,
672 )?;
673 }
674 }
675 Ok(result.bump)
676 }
677
678 async fn render_to_texture_async_internal(
679 &mut self,
680 device: &Device,
681 queue: &Queue,
682 scene: &Scene,
683 texture: &TextureView,
684 params: &RenderParams,
685 ) -> Result<RenderResult> {
686 let mut render = Render::new();
687 let encoding = scene.encoding();
688 let robust = cfg!(feature = "debug_layers");
692 let recording = render.render_encoding_coarse(
693 encoding,
694 &mut self.resolver,
695 &self.shaders,
696 params,
697 robust,
698 );
699 let target = render.out_image();
700 let bump_buf = render.bump_buf();
701 #[cfg(feature = "debug_layers")]
702 let captured = render.take_captured_buffers();
703 self.engine.run_recording(
704 device,
705 queue,
706 &recording,
707 &[],
708 "t_async_coarse",
709 #[cfg(feature = "wgpu-profiler")]
710 &mut self.profiler,
711 )?;
712
713 let mut bump: Option<BumpAllocators> = None;
714 if let Some(bump_buf) = self.engine.get_download(bump_buf) {
715 let buf_slice = bump_buf.slice(..);
716 let (sender, receiver) = futures_intrusive::channel::shared::oneshot_channel();
717 buf_slice.map_async(wgpu::MapMode::Read, move |v| sender.send(v).unwrap());
718 receiver.receive().await.expect("channel was closed")?;
719 let mapped = buf_slice.get_mapped_range();
720 bump = Some(bytemuck::pod_read_unaligned(&mapped));
721 }
722 self.engine.free_download(bump_buf);
725 let mut recording = Recording::default();
727 render.record_fine(&self.shaders, &mut recording);
728 let external_resources = [ExternalResource::Image(target, texture)];
729 self.engine.run_recording(
730 device,
731 queue,
732 &recording,
733 &external_resources,
734 "t_async_fine",
735 #[cfg(feature = "wgpu-profiler")]
736 &mut self.profiler,
737 )?;
738 Ok(RenderResult {
739 bump,
740 #[cfg(feature = "debug_layers")]
741 captured,
742 })
743 }
744
745 #[cfg_attr(docsrs, doc(hidden))]
751 #[deprecated(
752 note = "render_to_surface should be preferred, as the _async version has no stability guarantees"
753 )]
754 pub async fn render_to_surface_async(
755 &mut self,
756 device: &Device,
757 queue: &Queue,
758 scene: &Scene,
759 surface: &SurfaceTexture,
760 params: &RenderParams,
761 debug_layers: DebugLayers,
762 ) -> Result<Option<BumpAllocators>> {
763 if cfg!(not(feature = "debug_layers")) && !debug_layers.is_empty() {
764 static HAS_WARNED: AtomicBool = AtomicBool::new(false);
765 if !HAS_WARNED.swap(true, std::sync::atomic::Ordering::Release) {
766 log::warn!(
767 "Requested debug layers {debug:?} but `debug_layers` feature is not enabled.",
768 debug = debug_layers
769 );
770 }
771 }
772
773 let width = params.width;
774 let height = params.height;
775 let mut target = self
776 .target
777 .take()
778 .unwrap_or_else(|| TargetTexture::new(device, width, height));
779 if target.width != width || target.height != height {
782 target = TargetTexture::new(device, width, height);
783 }
784 let result = self
785 .render_to_texture_async_internal(device, queue, scene, &target.view, params)
786 .await?;
787 let blit = self
788 .blit
789 .as_ref()
790 .expect("renderer should have configured surface_format to use on a surface");
791 let mut recording = Recording::default();
792 let target_proxy = ImageProxy::new(
793 width,
794 height,
795 ImageFormat::from_wgpu(target.format)
796 .expect("`TargetTexture` always has a supported texture format"),
797 );
798 let surface_proxy = ImageProxy::new(
799 width,
800 height,
801 ImageFormat::from_wgpu(surface.texture.format())
802 .ok_or(Error::UnsupportedSurfaceFormat)?,
803 );
804 recording.draw(recording::DrawParams {
805 shader_id: blit.0,
806 instance_count: 1,
807 vertex_count: 6,
808 vertex_buffer: None,
809 resources: vec![ResourceProxy::Image(target_proxy)],
810 target: surface_proxy,
811 clear_color: Some([0., 0., 0., 0.]),
812 });
813
814 #[cfg(feature = "debug_layers")]
815 {
816 if let Some(captured) = result.captured {
817 let debug = self
818 .debug
819 .as_ref()
820 .expect("renderer should have configured surface_format to use on a surface");
821 let bump = result.bump.as_ref().unwrap();
822 let downloads = DebugDownloads::map(&self.engine, &captured, bump).await?;
824 debug.render(
825 &mut recording,
826 surface_proxy,
827 &captured,
828 bump,
829 params,
830 &downloads,
831 debug_layers,
832 );
833
834 self.engine.free_download(captured.lines);
837 captured.release_buffers(&mut recording);
838 }
839 }
840
841 let surface_view = surface
842 .texture
843 .create_view(&wgpu::TextureViewDescriptor::default());
844 let external_resources = [
845 ExternalResource::Image(target_proxy, &target.view),
846 ExternalResource::Image(surface_proxy, &surface_view),
847 ];
848 self.engine.run_recording(
849 device,
850 queue,
851 &recording,
852 &external_resources,
853 "blit (render_to_surface_async)",
854 #[cfg(feature = "wgpu-profiler")]
855 &mut self.profiler,
856 )?;
857
858 #[cfg(feature = "wgpu-profiler")]
859 {
860 self.profiler.end_frame().unwrap();
861 if let Some(result) = self
862 .profiler
863 .process_finished_frame(queue.get_timestamp_period())
864 {
865 self.profile_result = Some(result);
866 }
867 }
868
869 self.target = Some(target);
870 Ok(result.bump)
871 }
872}
873
874#[cfg(feature = "wgpu")]
875struct TargetTexture {
876 view: TextureView,
877 width: u32,
878 height: u32,
879 format: TextureFormat,
880}
881
882#[cfg(feature = "wgpu")]
883impl TargetTexture {
884 fn new(device: &Device, width: u32, height: u32) -> Self {
885 let format = TextureFormat::Rgba8Unorm;
886 let texture = device.create_texture(&wgpu::TextureDescriptor {
887 label: None,
888 size: wgpu::Extent3d {
889 width,
890 height,
891 depth_or_array_layers: 1,
892 },
893 mip_level_count: 1,
894 sample_count: 1,
895 dimension: wgpu::TextureDimension::D2,
896 usage: wgpu::TextureUsages::STORAGE_BINDING | wgpu::TextureUsages::TEXTURE_BINDING,
897 format,
898 view_formats: &[],
899 });
900 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
901 Self {
902 view,
903 width,
904 height,
905 format,
906 }
907 }
908}
909
910#[cfg(feature = "wgpu")]
911struct BlitPipeline(ShaderId);
912
913#[cfg(feature = "wgpu")]
914impl BlitPipeline {
915 fn new(device: &Device, format: TextureFormat, engine: &mut WgpuEngine) -> Result<Self> {
916 const SHADERS: &str = r#"
917 @vertex
918 fn vs_main(@builtin(vertex_index) ix: u32) -> @builtin(position) vec4<f32> {
919 // Generate a full screen quad in normalized device coordinates
920 var vertex = vec2(-1.0, 1.0);
921 switch ix {
922 case 1u: {
923 vertex = vec2(-1.0, -1.0);
924 }
925 case 2u, 4u: {
926 vertex = vec2(1.0, -1.0);
927 }
928 case 5u: {
929 vertex = vec2(1.0, 1.0);
930 }
931 default: {}
932 }
933 return vec4(vertex, 0.0, 1.0);
934 }
935
936 @group(0) @binding(0)
937 var fine_output: texture_2d<f32>;
938
939 @fragment
940 fn fs_main(@builtin(position) pos: vec4<f32>) -> @location(0) vec4<f32> {
941 let rgba_sep = textureLoad(fine_output, vec2<i32>(pos.xy), 0);
942 return vec4(rgba_sep.rgb * rgba_sep.a, rgba_sep.a);
943 }
944 "#;
945 let module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
946 label: Some("blit shaders"),
947 source: wgpu::ShaderSource::Wgsl(SHADERS.into()),
948 });
949 let shader_id = engine.add_render_shader(
950 device,
951 "vello.blit",
952 &module,
953 "vs_main",
954 "fs_main",
955 wgpu::PrimitiveTopology::TriangleList,
956 wgpu::ColorTargetState {
957 format,
958 blend: None,
959 write_mask: wgpu::ColorWrites::ALL,
960 },
961 None,
962 &[(
963 BindType::ImageRead(
964 ImageFormat::from_wgpu(format).ok_or(Error::UnsupportedSurfaceFormat)?,
965 ),
966 wgpu::ShaderStages::FRAGMENT,
967 )],
968 );
969 Ok(Self(shader_id))
970 }
971}
972
973#[cfg(all(feature = "debug_layers", feature = "wgpu"))]
974pub(crate) struct DebugDownloads<'a> {
975 pub lines: wgpu::BufferSlice<'a>,
976}
977
978#[cfg(all(feature = "debug_layers", feature = "wgpu"))]
979impl<'a> DebugDownloads<'a> {
980 pub async fn map(
981 engine: &'a WgpuEngine,
982 captured: &render::CapturedBuffers,
983 bump: &BumpAllocators,
984 ) -> Result<DebugDownloads<'a>> {
985 use vello_encoding::LineSoup;
986
987 let Some(lines_buf) = engine.get_download(captured.lines) else {
988 return Err(Error::DownloadError("linesoup"));
989 };
990
991 let lines = lines_buf.slice(..bump.lines as u64 * size_of::<LineSoup>() as u64);
992 let (sender, receiver) = futures_intrusive::channel::shared::oneshot_channel();
993 lines.map_async(wgpu::MapMode::Read, move |v| sender.send(v).unwrap());
994 receiver.receive().await.expect("channel was closed")?;
995 Ok(Self { lines })
996 }
997}