re_renderer/context.rs
1use std::sync::Arc;
2use std::sync::atomic::{AtomicU64, Ordering};
3
4use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard};
5use re_mutex::Mutex;
6use type_map::concurrent::TypeMap;
7
8use crate::allocator::{CpuWriteGpuReadBelt, GpuReadbackBelt};
9use crate::device_caps::DeviceCaps;
10use crate::error_handling::ErrorTracker;
11use crate::global_bindings::GlobalBindings;
12use crate::renderer::{Renderer, RendererExt};
13use crate::resource_managers::TextureManager2D;
14use crate::wgpu_resources::WgpuResourcePools;
15use crate::{FileServer, RecommendedFileResolver};
16
17/// Frame idx used before starting the first frame.
18const STARTUP_FRAME_IDX: u64 = u64::MAX;
19
20#[derive(thiserror::Error, Debug)]
21pub enum RenderContextError {
22 #[error(
23 "The GPU/graphics driver is lacking some abilities: {0}. \
24 Check the troubleshooting guide at https://rerun.io/docs/overview/installing-rerun/troubleshooting and consider updating your graphics driver."
25 )]
26 InsufficientDeviceCapabilities(#[from] crate::device_caps::InsufficientDeviceCapabilities),
27}
28
29/// Controls MSAA (Multi-Sampling Anti-Aliasing)
30#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
31pub enum MsaaMode {
32 /// Disabled MSAA.
33 ///
34 /// Preferred option for testing since MSAA implementations vary across devices,
35 /// especially in alpha-to-coverage cases.
36 ///
37 /// Note that this doesn't necessarily mean that we never use any multisampled targets,
38 /// merely that the main render target is not multisampled.
39 /// Some renderers/postprocessing effects may still incorporate textures with a sample count higher than 1.
40 Off,
41
42 /// 4x MSAA.
43 ///
44 /// As of writing 4 samples is the only option (other than _Off_) that works with `WebGPU`,
45 /// and it is guaranteed to be always available.
46 // TODO(andreas): On native we could offer higher counts.
47 #[default]
48 Msaa4x,
49}
50
51impl MsaaMode {
52 /// Returns the number of samples for this MSAA mode.
53 pub const fn sample_count(&self) -> u32 {
54 match self {
55 Self::Off => 1,
56 Self::Msaa4x => 4,
57 }
58 }
59}
60
61/// Configures global properties of the renderer.
62///
63/// For simplicity, we don't allow changing any of these properties without tearing down the [`RenderContext`],
64/// even though it may be possible.
65#[derive(Clone, Copy, Debug, Default)]
66pub struct RenderConfig {
67 pub msaa_mode: MsaaMode,
68 // TODO(andreas): Add a way to force the render tier?
69}
70
71impl RenderConfig {
72 /// Returns the best config for the given [`DeviceCaps`].
73 pub fn best_for_device_caps(_device_caps: &DeviceCaps) -> Self {
74 Self {
75 msaa_mode: MsaaMode::Msaa4x,
76 }
77 }
78
79 /// Render config preferred for running most tests.
80 ///
81 /// This is optimized for low discrepancy between devices in order to
82 /// to keep image comparison thresholds low.
83 pub fn testing() -> Self {
84 Self {
85 // we use "testing" also for generating nice looking screenshots
86 msaa_mode: MsaaMode::Msaa4x,
87 }
88 }
89}
90
91/// Any resource involving wgpu rendering which can be re-used across different scenes.
92/// I.e. render pipelines, resource pools, etc.
93pub struct RenderContext {
94 pub device: wgpu::Device,
95 pub queue: wgpu::Queue,
96
97 device_caps: DeviceCaps,
98 config: RenderConfig,
99 output_format_color: wgpu::TextureFormat,
100
101 /// Global bindings, always bound to 0 bind group slot zero.
102 /// [`Renderer`] are not allowed to use bind group 0 themselves!
103 pub global_bindings: GlobalBindings,
104
105 renderers: RwLock<Renderers>,
106 pub(crate) resolver: RecommendedFileResolver,
107
108 pub texture_manager_2d: TextureManager2D,
109 pub cpu_write_gpu_read_belt: Mutex<CpuWriteGpuReadBelt>,
110 pub gpu_readback_belt: Mutex<GpuReadbackBelt>,
111
112 /// List of unfinished queue submission via this context.
113 ///
114 /// This is currently only about submissions we do via the global encoder in [`ActiveFrameContext`]
115 /// TODO(andreas): We rely on egui for the "primary" submissions in `re_viewer`. It would be nice to take full control over all submissions.
116 inflight_queue_submissions: Vec<wgpu::SubmissionIndex>,
117
118 pub active_frame: ActiveFrameContext,
119
120 /// Frame index used for [`wgpu::Device::on_uncaptured_error`] callbacks.
121 ///
122 /// Today, when using wgpu-core (== native & webgl) this is equal to the current [`ActiveFrameContext::frame_index`]
123 /// since the content timeline is in sync with the device timeline,
124 /// meaning everything done on [`wgpu::Device`] happens right away.
125 /// On WebGPU however, the `content timeline` may be arbitrarily behind the `device timeline`!
126 /// See <https://www.w3.org/TR/webgpu/#programming-model-timelines>.
127 frame_index_for_uncaptured_errors: Arc<AtomicU64>,
128
129 /// Error tracker used for [`wgpu::Device::on_uncaptured_error`].
130 top_level_error_tracker: Arc<ErrorTracker>,
131
132 pub gpu_resources: WgpuResourcePools, // Last due to drop order.
133}
134
135/// Struct owning *all* [`Renderer`].
136/// [`Renderer`] are created lazily and stay around indefinitely.
137#[derive(Default)]
138pub struct Renderers {
139 renderers: TypeMap,
140 renderers_by_key: Vec<Arc<dyn RendererExt>>,
141}
142
143/// Unique identifier for a [`Renderer`] type.
144///
145/// We generally don't expect many different distinct types of renderers,
146/// therefore 255 should be more than enough.
147/// This limitation simplifies sorting of drawables a bit.
148#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
149pub struct RendererTypeId(u8);
150
151impl RendererTypeId {
152 #[inline]
153 pub const fn bits(&self) -> u8 {
154 self.0
155 }
156
157 #[inline]
158 pub const fn from_bits(bits: u8) -> Self {
159 Self(bits)
160 }
161}
162
163pub struct RendererWithKey<T: Renderer> {
164 renderer: Arc<T>,
165 key: RendererTypeId,
166}
167
168impl<T: Renderer> std::ops::Deref for RendererWithKey<T> {
169 type Target = T;
170
171 #[inline]
172 fn deref(&self) -> &Self::Target {
173 self.renderer.as_ref()
174 }
175}
176
177impl<T: Renderer> RendererWithKey<T> {
178 /// Returns the key of the renderer.
179 ///
180 /// The key is guaranteed to be unique and constant for the lifetime of the renderer.
181 /// It is kept as small as possible to aid with sorting drawables.
182 #[inline]
183 pub fn key(&self) -> RendererTypeId {
184 self.key
185 }
186}
187
188impl Renderers {
189 pub fn get_or_create<R: 'static + Renderer + Send + Sync>(
190 &mut self,
191 ctx: &RenderContext,
192 ) -> &RendererWithKey<R> {
193 self.renderers.entry().or_insert_with(|| {
194 re_tracing::profile_scope!("create_renderer", std::any::type_name::<R>());
195
196 let key = RendererTypeId(u8::try_from(self.renderers_by_key.len()).unwrap_or_else(
197 |_| {
198 re_log::error!("Supporting at most {} distinct renderer types.", u8::MAX);
199 u8::MAX
200 },
201 ));
202
203 let renderer = Arc::new(R::create_renderer(ctx));
204 self.renderers_by_key.push(renderer.clone());
205
206 RendererWithKey { renderer, key }
207 })
208 }
209
210 pub fn get<R: 'static + Renderer>(&self) -> Option<&RendererWithKey<R>> {
211 self.renderers.get::<RendererWithKey<R>>()
212 }
213
214 /// Gets a renderer by its key.
215 ///
216 /// For this to succeed, the renderer must have been initialized prior.
217 /// (there would be no key otherwise anyways!)
218 /// The returned type is the type erased [`RendererExt`] rather than a concrete renderer type.
219 pub(crate) fn get_by_key(&self, key: RendererTypeId) -> Option<&dyn RendererExt> {
220 self.renderers_by_key
221 .get(key.0 as usize)
222 .map(|r| r.as_ref())
223 }
224}
225
226impl RenderContext {
227 /// Chunk size for our cpu->gpu buffer manager.
228 ///
229 /// 32MiB chunk size (as big as a for instance a 2048x1024 float4 texture)
230 /// (it's tempting to use something smaller on Web, but this may just cause more
231 /// buffers to be allocated the moment we want to upload a bigger chunk)
232 pub const CPU_WRITE_GPU_READ_BELT_DEFAULT_CHUNK_SIZE: wgpu::BufferSize =
233 wgpu::BufferSize::new(1024 * 1024 * 32).unwrap();
234
235 /// Chunk size for our gpu->cpu buffer manager.
236 ///
237 /// We expect large screenshots to be rare occurrences, so we go with fairly small chunks of just 64 kiB.
238 /// (this is as much memory as a 128x128 rgba8 texture, or a little bit less than a 64x64 picking target with depth)
239 /// I.e. screenshots will end up in dedicated chunks.
240 const GPU_READBACK_BELT_DEFAULT_CHUNK_SIZE: wgpu::BufferSize =
241 wgpu::BufferSize::new(1024 * 64).unwrap();
242
243 /// Limit maximum number of in flight submissions to this number.
244 ///
245 /// By limiting the number of submissions we have on the queue we ensure that GPU stalls do not
246 /// cause us to request more and more memory to prepare more and more submissions.
247 ///
248 /// Note that this is only indirectly related to number of buffered frames,
249 /// since buffered frames/blit strategy are all about the display<->gpu interface,
250 /// whereas this is about a *particular aspect* of the cpu<->gpu interface.
251 ///
252 /// Should be somewhere between 1-4, too high and we use up more memory and introduce latency,
253 /// too low and we may starve the GPU.
254 const MAX_NUM_INFLIGHT_QUEUE_SUBMISSIONS: usize = 4;
255
256 pub fn new(
257 adapter: &wgpu::Adapter,
258 device: wgpu::Device,
259 queue: wgpu::Queue,
260 output_format_color: wgpu::TextureFormat,
261 config_provider: impl FnOnce(&DeviceCaps) -> RenderConfig,
262 ) -> Result<Self, RenderContextError> {
263 re_tracing::profile_function!();
264
265 #[cfg(not(load_shaders_from_disk))]
266 {
267 // Make sure `workspace_shaders::init()` is called at least once, which will
268 // register all shaders defined in the workspace into the run-time in-memory
269 // filesystem.
270 crate::workspace_shaders::init();
271 }
272
273 let device_caps = DeviceCaps::from_adapter(adapter)?;
274 let config = config_provider(&device_caps);
275
276 let frame_index_for_uncaptured_errors = Arc::new(AtomicU64::new(STARTUP_FRAME_IDX));
277
278 // Make sure to catch all errors, never crash, and deduplicate reported errors.
279 // See https://www.w3.org/TR/webgpu/#telemetry
280 let top_level_error_tracker = {
281 let err_tracker = Arc::new(ErrorTracker::default());
282 device.on_uncaptured_error({
283 let err_tracker = Arc::clone(&err_tracker);
284 let frame_index_for_uncaptured_errors = frame_index_for_uncaptured_errors.clone();
285 Arc::new(move |err| {
286 err_tracker.handle_error(
287 err,
288 frame_index_for_uncaptured_errors.load(Ordering::Acquire),
289 );
290 })
291 });
292 err_tracker
293 };
294
295 log_adapter_info(&adapter.get_info());
296
297 let mut gpu_resources = WgpuResourcePools::default();
298 let global_bindings = GlobalBindings::new(&gpu_resources, &device);
299
300 let resolver = crate::new_recommended_file_resolver();
301 let texture_manager_2d = TextureManager2D::new(&device, &queue, &gpu_resources.textures);
302
303 let active_frame = ActiveFrameContext {
304 before_view_builder_encoder: Mutex::new(FrameGlobalCommandEncoder::new(&device)),
305 frame_index: STARTUP_FRAME_IDX,
306 num_view_builders_created: AtomicU64::new(0),
307 };
308
309 // Register shader workarounds for the current device.
310 if adapter.get_info().backend == wgpu::Backend::BrowserWebGpu {
311 // Chrome/Tint does not support `@invariant` when targeting Metal.
312 // https://bugs.chromium.org/p/chromium/issues/detail?id=1439273
313 // (bug is fixed as of writing, but hasn't hit any public released version yet)
314 // Ignoring it is fine in the cases we use it, it's mostly there to avoid a (correct!) warning in wgpu.
315 gpu_resources
316 .shader_modules
317 .shader_text_workaround_replacements
318 .push((
319 "@invariant @builtin(position)".to_owned(),
320 "@builtin(position)".to_owned(),
321 ));
322 }
323
324 let cpu_write_gpu_read_belt = Mutex::new(CpuWriteGpuReadBelt::new(
325 Self::CPU_WRITE_GPU_READ_BELT_DEFAULT_CHUNK_SIZE,
326 ));
327 let gpu_readback_belt = Mutex::new(GpuReadbackBelt::new(
328 Self::GPU_READBACK_BELT_DEFAULT_CHUNK_SIZE,
329 ));
330
331 Ok(Self {
332 device,
333 queue,
334 device_caps,
335 config,
336 output_format_color,
337 global_bindings,
338 renderers: RwLock::new(Renderers::default()),
339 resolver,
340 top_level_error_tracker,
341 texture_manager_2d,
342 cpu_write_gpu_read_belt,
343 gpu_readback_belt,
344 inflight_queue_submissions: Vec::new(),
345 active_frame,
346 frame_index_for_uncaptured_errors,
347 gpu_resources,
348 })
349 }
350
351 fn poll_device(&mut self) {
352 re_tracing::profile_function!();
353
354 // Ensure not too many queue submissions are in flight.
355
356 let num_submissions_to_wait_for = self
357 .inflight_queue_submissions
358 .len()
359 .saturating_sub(Self::MAX_NUM_INFLIGHT_QUEUE_SUBMISSIONS);
360
361 if let Some(_newest_submission_to_wait_for) = self
362 .inflight_queue_submissions
363 .drain(0..num_submissions_to_wait_for)
364 .next_back()
365 {
366 // Disable error reporting on Web:
367 // * On WebGPU poll is a no-op.
368 // * On WebGL we'd just immediately timeout since we can't actually wait for frames.
369 #[cfg(not(target_arch = "wasm32"))]
370 {
371 if let Err(err) = self.device.poll(wgpu::PollType::Wait {
372 submission_index: Some(_newest_submission_to_wait_for),
373 timeout: None,
374 }) {
375 re_log::warn_once!(
376 "Failed to limit number of in-flight GPU frames to {}: {:?}",
377 Self::MAX_NUM_INFLIGHT_QUEUE_SUBMISSIONS,
378 err
379 );
380 }
381 }
382 }
383 }
384
385 /// Call this at the beginning of a new frame.
386 ///
387 /// Updates internal book-keeping, frame allocators and executes delayed events like shader reloading.
388 pub fn begin_frame(&mut self) {
389 re_tracing::profile_function!();
390
391 // If the currently active frame still has an encoder, we need to finish it and queue it.
392 // This should only ever happen for the first frame where we created an encoder for preparatory work. Every other frame we take the encoder at submit!
393 if self
394 .active_frame
395 .before_view_builder_encoder
396 .lock()
397 .0
398 .is_some()
399 {
400 if self.active_frame.frame_index != STARTUP_FRAME_IDX {
401 re_log::error!("There was still a command encoder from the previous frame at the beginning of the current.
402This means, either a call to RenderContext::before_submit was omitted, or the previous frame was unexpectedly cancelled.");
403 }
404 self.before_submit();
405 }
406
407 // Request write-staging buffers back.
408 // Ideally we'd do this as closely as possible to the last submission containing any cpu->gpu operations as possible.
409 self.cpu_write_gpu_read_belt.get_mut().after_queue_submit();
410
411 // Schedule mapping for all read staging buffers.
412 // Ideally we'd do this as closely as possible to the last submission containing any gpu->cpu operations as possible.
413 self.gpu_readback_belt.get_mut().after_queue_submit();
414
415 // Mark the previous frame as finished on the device timeline.
416 // This retains only errors that occurred during this frame; older ones are removed
417 // so they can be re-reported if they recur after a gap.
418 //
419 // On native (wgpu-core), the device timeline is always in sync with the content timeline,
420 // so we can update this directly. On WebGPU the device timeline may lag behind,
421 // but we don't currently have a good async mechanism for tracking it.
422 self.top_level_error_tracker
423 .on_device_timeline_frame_finished(self.active_frame.frame_index);
424
425 // New active frame!
426 self.active_frame = ActiveFrameContext {
427 before_view_builder_encoder: Mutex::new(FrameGlobalCommandEncoder::new(&self.device)),
428 frame_index: self.active_frame.frame_index.wrapping_add(1),
429 num_view_builders_created: AtomicU64::new(0),
430 };
431 let frame_index = self.active_frame.frame_index;
432
433 // Update the frame index used by the on_uncaptured_error callback to tag new errors.
434 // Must happen after incrementing so errors during this frame are tagged with the correct index.
435 self.frame_index_for_uncaptured_errors
436 .store(frame_index, Ordering::Release);
437
438 // The set of files on disk that were modified in any way since last frame,
439 // ignoring deletions.
440 // Always an empty set in release builds.
441 let modified_paths = FileServer::get_mut(|fs| fs.collect(&self.resolver));
442 if !modified_paths.is_empty() {
443 re_log::debug!(?modified_paths, "got some filesystem events");
444 }
445
446 self.texture_manager_2d.begin_frame(frame_index);
447 self.gpu_readback_belt.get_mut().begin_frame(frame_index);
448
449 {
450 let WgpuResourcePools {
451 bind_group_layouts,
452 bind_groups,
453 pipeline_layouts,
454 render_pipelines,
455 samplers,
456 shader_modules,
457 textures,
458 buffers,
459 } = &mut self.gpu_resources; // not all pools require maintenance
460
461 // Shader module maintenance must come before render pipelines because render pipeline
462 // recompilation picks up all shaders that have been recompiled this frame.
463 shader_modules.begin_frame(&self.device, &self.resolver, frame_index, &modified_paths);
464 render_pipelines.begin_frame(
465 &self.device,
466 frame_index,
467 shader_modules,
468 pipeline_layouts,
469 );
470
471 bind_groups.begin_frame(frame_index, textures, buffers, samplers);
472
473 textures.begin_frame(frame_index);
474 buffers.begin_frame(frame_index);
475
476 pipeline_layouts.begin_frame(frame_index);
477 bind_group_layouts.begin_frame(frame_index);
478 samplers.begin_frame(frame_index);
479 }
480
481 // Poll device *after* resource pool `begin_frame` since resource pools may each decide drop resources.
482 // Wgpu internally may then internally decide to let go of these buffers.
483 self.poll_device();
484 }
485
486 /// Call this at the end of a frame but before submitting command buffers (e.g. from [`crate::view_builder::ViewBuilder`])
487 pub fn before_submit(&mut self) {
488 re_tracing::profile_function!();
489
490 // Unmap all write staging buffers, so we don't get validation errors about buffers still being mapped
491 // that the gpu wants to read from.
492 self.cpu_write_gpu_read_belt.lock().before_queue_submit();
493
494 if let Some(command_encoder) = self
495 .active_frame
496 .before_view_builder_encoder
497 .lock()
498 .0
499 .take()
500 {
501 re_tracing::profile_scope!("finish & submit frame-global encoder");
502 let command_buffer = command_encoder.finish();
503
504 // TODO(andreas): For better performance, we should try to bundle this with the single submit call that is currently happening in eframe.
505 // How do we hook in there and make sure this buffer is submitted first?
506 self.inflight_queue_submissions
507 .push(self.queue.submit([command_buffer]));
508 }
509 }
510
511 /// Gets a renderer with the specified type, initializing it if necessary.
512 pub fn renderer<R: 'static + Renderer + Send + Sync>(
513 &self,
514 ) -> MappedRwLockReadGuard<'_, RendererWithKey<R>> {
515 // Most likely we already have the renderer. Take a read lock and return it.
516 if let Ok(renderer) =
517 parking_lot::RwLockReadGuard::try_map(self.renderers.read(), |r| r.get::<R>())
518 {
519 return renderer;
520 }
521
522 // If it wasn't there we have to add it.
523 // This path is rare since it happens only once per renderer type in the lifetime of the ctx.
524 // (we don't discard renderers ever)
525 self.renderers.write().get_or_create::<R>(self);
526
527 // Release write lock again and only take a read lock.
528 // safe to unwrap since we just created it and nobody removes elements from the renderer.
529 parking_lot::RwLockReadGuard::map(self.renderers.read(), |r| r.get::<R>().unwrap())
530 }
531
532 /// Read access to renderers.
533 pub(crate) fn read_lock_renderers(&self) -> RwLockReadGuard<'_, Renderers> {
534 self.renderers.read()
535 }
536
537 /// Returns the global frame index of the active frame.
538 pub fn active_frame_idx(&self) -> u64 {
539 self.active_frame.frame_index
540 }
541
542 /// Returns the device's capabilities.
543 pub fn device_caps(&self) -> &DeviceCaps {
544 &self.device_caps
545 }
546
547 /// Returns the active render config.
548 pub fn render_config(&self) -> &RenderConfig {
549 &self.config
550 }
551
552 /// Returns the final output format for color (i.e. the surface's format).
553 pub fn output_format_color(&self) -> wgpu::TextureFormat {
554 self.output_format_color
555 }
556}
557
558pub struct FrameGlobalCommandEncoder(Option<wgpu::CommandEncoder>);
559
560impl FrameGlobalCommandEncoder {
561 fn new(device: &wgpu::Device) -> Self {
562 Self(Some(device.create_command_encoder(
563 &wgpu::CommandEncoderDescriptor {
564 label: Some(
565 crate::Label::from("global \"before viewbuilder\" command encoder").get(),
566 ),
567 },
568 )))
569 }
570
571 /// Gets the global encoder for a frame. Only valid within a frame.
572 pub fn get(&mut self) -> &mut wgpu::CommandEncoder {
573 self.0
574 .as_mut()
575 .expect("Frame global encoder can't be accessed outside of a frame!")
576 }
577}
578
579impl Drop for FrameGlobalCommandEncoder {
580 fn drop(&mut self) {
581 // Close global command encoder if there is any pending.
582 // Not doing so before shutdown causes errors!
583 if let Some(encoder) = self.0.take() {
584 encoder.finish();
585 }
586 }
587}
588
589pub struct ActiveFrameContext {
590 /// Command encoder for all commands that should go in before view builder are submitted.
591 ///
592 /// This should be used for any gpu copy operation outside of a renderer or view builder.
593 /// (i.e. typically in [`crate::renderer::DrawData`] creation!)
594 pub before_view_builder_encoder: Mutex<FrameGlobalCommandEncoder>,
595
596 /// Index of this frame. Is incremented for every render frame.
597 ///
598 /// Keep in mind that all operations on WebGPU are asynchronous:
599 /// This counter is part of the `content timeline` and may be arbitrarily
600 /// behind both of the `device timeline` and `queue timeline`.
601 /// See <https://www.w3.org/TR/webgpu/#programming-model-timelines>
602 pub frame_index: u64,
603
604 /// Number of view builders created in this frame so far.
605 pub num_view_builders_created: AtomicU64,
606}
607
608impl ActiveFrameContext {
609 /// Returns the number of view builders created in this frame so far.
610 pub fn num_view_builders_created(&self) -> u64 {
611 // Uses acquire semenatics to be on the safe side (side effects from the ViewBuilder creation is visible to the caller).
612 self.num_view_builders_created.load(Ordering::Acquire)
613 }
614}
615
616fn log_adapter_info(info: &wgpu::AdapterInfo) {
617 re_tracing::profile_function!();
618
619 // See https://github.com/rerun-io/rerun/issues/3089
620 let is_software_rasterizer_with_known_crashes = {
621 // `llvmpipe` is Mesa's software rasterizer.
622 // It may describe EITHER a Vulkan or OpenGL software rasterizer.
623 // `lavapipe` is the name given to the Vulkan software rasterizer,
624 // but this name doesn't seem to show up in the info string.
625 let is_mesa_software_rasterizer = info.driver == "llvmpipe";
626
627 // TODO(andreas):
628 // Some versions of lavapipe are problematic (we observed crashes in the past),
629 // but we haven't isolated for what versions this happens.
630 // (we are happily using lavapipe without any issues on CI)
631 // However, there's reason to be more skeptical of OpenGL software rasterizers,
632 // so we mark those as problematic regardless.
633 // A user might as well just use Vulkan software rasterizer if they're in a situation where they
634 // can't use a GPU for which we do have test coverage.
635 info.backend == wgpu::Backend::Gl && is_mesa_software_rasterizer
636 };
637
638 let human_readable_summary = adapter_info_summary(info);
639
640 if cfg!(test) {
641 // If we're testing then software rasterizers are just fine, preferred even!
642 re_log::debug_once!("wgpu adapter {human_readable_summary}");
643 } else if is_software_rasterizer_with_known_crashes {
644 re_log::warn_once!(
645 "Bad software rasterizer detected - expect poor performance and crashes. See: https://www.rerun.io/docs/overview/installing-rerun/troubleshooting#graphics-issues"
646 );
647 re_log::info_once!("wgpu adapter {human_readable_summary}");
648 } else if info.device_type == wgpu::DeviceType::Cpu {
649 re_log::warn_once!(
650 "Software rasterizer detected - expect poor performance. See: https://www.rerun.io/docs/overview/installing-rerun/troubleshooting#graphics-issues"
651 );
652 re_log::info_once!("wgpu adapter {human_readable_summary}");
653 } else {
654 re_log::debug_once!("wgpu adapter {human_readable_summary}");
655 }
656}
657
658/// A human-readable summary about an adapter
659pub fn adapter_info_summary(info: &wgpu::AdapterInfo) -> String {
660 let wgpu::AdapterInfo {
661 name,
662 vendor: _, // skip integer id
663 device: _, // skip integer id
664 device_type,
665 device_pci_bus_id: _,
666 driver,
667 driver_info,
668 backend,
669 subgroup_min_size: _,
670 subgroup_max_size: _,
671 transient_saves_memory: _,
672 } = &info;
673
674 // Example values:
675 // > name: "llvmpipe (LLVM 16.0.6, 256 bits)", device_type: Cpu, backend: Vulkan, driver: "llvmpipe", driver_info: "Mesa 23.1.6-arch1.4 (LLVM 16.0.6)"
676 // > name: "Apple M1 Pro", device_type: IntegratedGpu, backend: Metal, driver: "", driver_info: ""
677 // > name: "ANGLE (Apple, Apple M1 Pro, OpenGL 4.1)", device_type: IntegratedGpu, backend: Gl, driver: "", driver_info: ""
678
679 let mut summary = format!("backend: {backend:?}, device_type: {device_type:?}");
680
681 if !name.is_empty() {
682 summary += &format!(", name: {name:?}");
683 }
684 if !driver.is_empty() {
685 summary += &format!(", driver: {driver:?}");
686 }
687 if !driver_info.is_empty() {
688 summary += &format!(", driver_info: {driver_info:?}");
689 }
690
691 summary
692}