Skip to main content

wsi_dicom/
report.rs

1//! Report types returned by export and route profiling APIs.
2
3use std::path::PathBuf;
4use std::time::Duration;
5
6use serde::{ser::SerializeStruct, Serialize};
7
8use crate::encode;
9use crate::tile::PixelProfile;
10
11#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
12pub struct DicomExportReport {
13    pub output_dir: PathBuf,
14    pub instances: Vec<DicomInstanceReport>,
15    pub metrics: DicomExportMetrics,
16}
17
18/// Finished compressed frame bytes ready for DICOM encapsulated Pixel Data insertion.
19#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
20pub struct DicomEncodedFrame {
21    pub transfer_syntax_uid: &'static str,
22    pub bytes: Vec<u8>,
23    pub used_device_encode: bool,
24    pub used_device_validation: bool,
25    pub encode_micros: u128,
26    pub validation_micros: u128,
27}
28
29#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
30pub struct DicomRouteProfileReport {
31    pub source_path: PathBuf,
32    pub transfer_syntax_uid: &'static str,
33    pub level: u32,
34    pub requested_frames: u64,
35    pub available_frames: u64,
36    pub metrics: DicomExportMetrics,
37    pub elapsed_micros: u128,
38}
39
40#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
41pub struct DicomRouteCoverageReport {
42    pub source_path: PathBuf,
43    pub transfer_syntax_uid: &'static str,
44    pub requested_frames_per_level: u64,
45    pub available_frames: u64,
46    pub complete_frame_coverage: bool,
47    pub levels: Vec<DicomRouteProfileReport>,
48    pub metrics: DicomExportMetrics,
49    pub elapsed_micros: u128,
50}
51
52#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
53pub struct DicomRouteCorpusCoverageFailure {
54    pub source_path: PathBuf,
55    pub message: String,
56}
57
58#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
59pub struct DicomRouteCorpusCoverageReport {
60    pub source_root: PathBuf,
61    pub transfer_syntax_uid: &'static str,
62    pub requested_frames_per_level: u64,
63    pub max_levels: Option<u32>,
64    pub sources_considered: usize,
65    pub available_frames: u64,
66    pub complete_frame_coverage: bool,
67    pub reports: Vec<DicomRouteCoverageReport>,
68    pub failures: Vec<DicomRouteCorpusCoverageFailure>,
69    pub metrics: DicomExportMetrics,
70    pub elapsed_micros: u128,
71}
72
73#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
74pub struct DicomInstanceReport {
75    pub path: PathBuf,
76    pub sop_instance_uid: String,
77    pub series_instance_uid: String,
78    pub transfer_syntax_uid: &'static str,
79    pub level: u32,
80    pub z: u32,
81    pub c: u32,
82    pub t: u32,
83    pub frame_count: u32,
84    pub metrics: DicomExportMetrics,
85}
86
87#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
88pub struct DicomExportMetrics {
89    pub total_frames: u64,
90    pub cpu_input_frames: u64,
91    pub gpu_input_decode_frames: u64,
92    pub gpu_encode_frames: u64,
93    pub gpu_validation_frames: u64,
94    pub gray_frames: u64,
95    pub rgb_like_frames: u64,
96    pub other_component_frames: u64,
97    pub unknown_pixel_profile_frames: u64,
98    pub bits8_frames: u64,
99    pub bits16_frames: u64,
100    pub other_bit_depth_frames: u64,
101    pub gpu_transcode_frames: u64,
102    pub resident_gpu_transcode_frames: u64,
103    pub partial_gpu_transcode_frames: u64,
104    pub gpu_input_decode_batches: u64,
105    pub gpu_compose_batches: u64,
106    pub gpu_encode_batches: u64,
107    pub auto_route_probe_frames: u64,
108    pub auto_route_probe_gpu_batches: u64,
109    pub auto_route_probe_cpu_micros: u128,
110    pub auto_route_probe_gpu_micros: u128,
111    pub auto_route_probe_selected_gpu_input_frames: u64,
112    pub cpu_fallback_frames: u64,
113    pub jpeg_passthrough_frames: u64,
114    pub j2k_passthrough_frames: u64,
115    pub jpeg_cpu_encode_frames: u64,
116    pub jpeg_metal_encode_frames: u64,
117    pub jpeg_decode_fallback_frames: u64,
118    pub input_decode_micros: u128,
119    pub compose_micros: u128,
120    pub encode_micros: u128,
121    pub validation_micros: u128,
122    pub gpu_dispatch_micros: u128,
123    pub gpu_encode_configured_inflight_tiles: u64,
124    pub gpu_encode_effective_inflight_tiles: u64,
125    pub gpu_encode_max_observed_inflight_tiles: u64,
126    pub gpu_encode_configured_memory_mib: u64,
127    pub gpu_encode_effective_memory_mib: u64,
128    pub gpu_encode_wall_micros: u128,
129    pub gpu_encode_hardware_micros: u128,
130    pub gpu_encode_dispatch_overhead_micros: u128,
131    pub write_micros: u128,
132}
133
134impl Serialize for DicomExportMetrics {
135    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
136    where
137        S: serde::Serializer,
138    {
139        let mut state = serializer.serialize_struct("DicomExportMetrics", 43)?;
140        state.serialize_field("total_frames", &self.total_frames)?;
141        state.serialize_field("cpu_input_frames", &self.cpu_input_frames)?;
142        state.serialize_field("gpu_input_decode_frames", &self.gpu_input_decode_frames)?;
143        state.serialize_field("gpu_encode_frames", &self.gpu_encode_frames)?;
144        state.serialize_field("gpu_validation_frames", &self.gpu_validation_frames)?;
145        state.serialize_field("gray_frames", &self.gray_frames)?;
146        state.serialize_field("rgb_like_frames", &self.rgb_like_frames)?;
147        state.serialize_field("other_component_frames", &self.other_component_frames)?;
148        state.serialize_field(
149            "unknown_pixel_profile_frames",
150            &self.unknown_pixel_profile_frames,
151        )?;
152        state.serialize_field("bits8_frames", &self.bits8_frames)?;
153        state.serialize_field("bits16_frames", &self.bits16_frames)?;
154        state.serialize_field("other_bit_depth_frames", &self.other_bit_depth_frames)?;
155        state.serialize_field("gpu_transcode_frames", &self.gpu_transcode_frames)?;
156        state.serialize_field(
157            "resident_gpu_transcode_frames",
158            &self.resident_gpu_transcode_frames,
159        )?;
160        state.serialize_field(
161            "partial_gpu_transcode_frames",
162            &self.partial_gpu_transcode_frames,
163        )?;
164        state.serialize_field("gpu_input_decode_batches", &self.gpu_input_decode_batches)?;
165        state.serialize_field("gpu_compose_batches", &self.gpu_compose_batches)?;
166        state.serialize_field("gpu_encode_batches", &self.gpu_encode_batches)?;
167        state.serialize_field("auto_route_probe_frames", &self.auto_route_probe_frames)?;
168        state.serialize_field(
169            "auto_route_probe_gpu_batches",
170            &self.auto_route_probe_gpu_batches,
171        )?;
172        state.serialize_field(
173            "auto_route_probe_cpu_micros",
174            &self.auto_route_probe_cpu_micros,
175        )?;
176        state.serialize_field(
177            "auto_route_probe_gpu_micros",
178            &self.auto_route_probe_gpu_micros,
179        )?;
180        state.serialize_field(
181            "auto_route_probe_selected_gpu_input_frames",
182            &self.auto_route_probe_selected_gpu_input_frames,
183        )?;
184        state.serialize_field("cpu_fallback_frames", &self.cpu_fallback_frames)?;
185        state.serialize_field("jpeg_passthrough_frames", &self.jpeg_passthrough_frames)?;
186        state.serialize_field("j2k_passthrough_frames", &self.j2k_passthrough_frames)?;
187        state.serialize_field("jpeg_cpu_encode_frames", &self.jpeg_cpu_encode_frames)?;
188        state.serialize_field("jpeg_metal_encode_frames", &self.jpeg_metal_encode_frames)?;
189        state.serialize_field(
190            "jpeg_decode_fallback_frames",
191            &self.jpeg_decode_fallback_frames,
192        )?;
193        state.serialize_field("input_decode_micros", &self.input_decode_micros)?;
194        state.serialize_field("compose_micros", &self.compose_micros)?;
195        state.serialize_field("encode_micros", &self.encode_micros)?;
196        state.serialize_field("validation_micros", &self.validation_micros)?;
197        state.serialize_field("gpu_dispatch_micros", &self.gpu_dispatch_micros)?;
198        state.serialize_field(
199            "gpu_encode_configured_inflight_tiles",
200            &self.gpu_encode_configured_inflight_tiles,
201        )?;
202        state.serialize_field(
203            "gpu_encode_effective_inflight_tiles",
204            &self.gpu_encode_effective_inflight_tiles,
205        )?;
206        state.serialize_field(
207            "gpu_encode_max_observed_inflight_tiles",
208            &self.gpu_encode_max_observed_inflight_tiles,
209        )?;
210        state.serialize_field(
211            "gpu_encode_configured_memory_mib",
212            &self.gpu_encode_configured_memory_mib,
213        )?;
214        state.serialize_field(
215            "gpu_encode_effective_memory_mib",
216            &self.gpu_encode_effective_memory_mib,
217        )?;
218        state.serialize_field("gpu_encode_wall_micros", &self.gpu_encode_wall_micros)?;
219        state.serialize_field(
220            "gpu_encode_effective_parallelism",
221            &self.gpu_encode_effective_parallelism(),
222        )?;
223        state.serialize_field(
224            "gpu_encode_hardware_micros",
225            &self.gpu_encode_hardware_micros,
226        )?;
227        state.serialize_field(
228            "gpu_encode_dispatch_overhead_micros",
229            &self.gpu_encode_dispatch_overhead_micros,
230        )?;
231        state.serialize_field("write_micros", &self.write_micros)?;
232        state.end()
233    }
234}
235
236impl DicomExportMetrics {
237    pub fn route_passthrough_frames(&self) -> u64 {
238        self.jpeg_passthrough_frames
239            .saturating_add(self.j2k_passthrough_frames)
240    }
241
242    pub fn route_unclassified_frames(&self) -> u64 {
243        self.total_frames
244            .saturating_sub(self.route_passthrough_frames())
245            .saturating_sub(self.gpu_transcode_frames)
246            .saturating_sub(self.cpu_fallback_frames)
247    }
248
249    pub(crate) fn add_assign(&mut self, other: Self) {
250        self.total_frames = self.total_frames.saturating_add(other.total_frames);
251        self.cpu_input_frames = self.cpu_input_frames.saturating_add(other.cpu_input_frames);
252        self.gpu_input_decode_frames = self
253            .gpu_input_decode_frames
254            .saturating_add(other.gpu_input_decode_frames);
255        self.gpu_encode_frames = self
256            .gpu_encode_frames
257            .saturating_add(other.gpu_encode_frames);
258        self.gpu_validation_frames = self
259            .gpu_validation_frames
260            .saturating_add(other.gpu_validation_frames);
261        self.gray_frames = self.gray_frames.saturating_add(other.gray_frames);
262        self.rgb_like_frames = self.rgb_like_frames.saturating_add(other.rgb_like_frames);
263        self.other_component_frames = self
264            .other_component_frames
265            .saturating_add(other.other_component_frames);
266        self.unknown_pixel_profile_frames = self
267            .unknown_pixel_profile_frames
268            .saturating_add(other.unknown_pixel_profile_frames);
269        self.bits8_frames = self.bits8_frames.saturating_add(other.bits8_frames);
270        self.bits16_frames = self.bits16_frames.saturating_add(other.bits16_frames);
271        self.other_bit_depth_frames = self
272            .other_bit_depth_frames
273            .saturating_add(other.other_bit_depth_frames);
274        self.gpu_transcode_frames = self
275            .gpu_transcode_frames
276            .saturating_add(other.gpu_transcode_frames);
277        self.resident_gpu_transcode_frames = self
278            .resident_gpu_transcode_frames
279            .saturating_add(other.resident_gpu_transcode_frames);
280        self.partial_gpu_transcode_frames = self
281            .partial_gpu_transcode_frames
282            .saturating_add(other.partial_gpu_transcode_frames);
283        self.gpu_input_decode_batches = self
284            .gpu_input_decode_batches
285            .saturating_add(other.gpu_input_decode_batches);
286        self.gpu_compose_batches = self
287            .gpu_compose_batches
288            .saturating_add(other.gpu_compose_batches);
289        self.gpu_encode_batches = self
290            .gpu_encode_batches
291            .saturating_add(other.gpu_encode_batches);
292        self.auto_route_probe_frames = self
293            .auto_route_probe_frames
294            .saturating_add(other.auto_route_probe_frames);
295        self.auto_route_probe_gpu_batches = self
296            .auto_route_probe_gpu_batches
297            .saturating_add(other.auto_route_probe_gpu_batches);
298        self.auto_route_probe_cpu_micros = self
299            .auto_route_probe_cpu_micros
300            .saturating_add(other.auto_route_probe_cpu_micros);
301        self.auto_route_probe_gpu_micros = self
302            .auto_route_probe_gpu_micros
303            .saturating_add(other.auto_route_probe_gpu_micros);
304        self.auto_route_probe_selected_gpu_input_frames = self
305            .auto_route_probe_selected_gpu_input_frames
306            .saturating_add(other.auto_route_probe_selected_gpu_input_frames);
307        self.cpu_fallback_frames = self
308            .cpu_fallback_frames
309            .saturating_add(other.cpu_fallback_frames);
310        self.jpeg_passthrough_frames = self
311            .jpeg_passthrough_frames
312            .saturating_add(other.jpeg_passthrough_frames);
313        self.j2k_passthrough_frames = self
314            .j2k_passthrough_frames
315            .saturating_add(other.j2k_passthrough_frames);
316        self.jpeg_cpu_encode_frames = self
317            .jpeg_cpu_encode_frames
318            .saturating_add(other.jpeg_cpu_encode_frames);
319        self.jpeg_metal_encode_frames = self
320            .jpeg_metal_encode_frames
321            .saturating_add(other.jpeg_metal_encode_frames);
322        self.jpeg_decode_fallback_frames = self
323            .jpeg_decode_fallback_frames
324            .saturating_add(other.jpeg_decode_fallback_frames);
325        self.input_decode_micros = self
326            .input_decode_micros
327            .saturating_add(other.input_decode_micros);
328        self.compose_micros = self.compose_micros.saturating_add(other.compose_micros);
329        self.encode_micros = self.encode_micros.saturating_add(other.encode_micros);
330        self.validation_micros = self
331            .validation_micros
332            .saturating_add(other.validation_micros);
333        self.gpu_dispatch_micros = self
334            .gpu_dispatch_micros
335            .saturating_add(other.gpu_dispatch_micros);
336        self.gpu_encode_configured_inflight_tiles = self
337            .gpu_encode_configured_inflight_tiles
338            .max(other.gpu_encode_configured_inflight_tiles);
339        self.gpu_encode_effective_inflight_tiles = self
340            .gpu_encode_effective_inflight_tiles
341            .max(other.gpu_encode_effective_inflight_tiles);
342        self.gpu_encode_max_observed_inflight_tiles = self
343            .gpu_encode_max_observed_inflight_tiles
344            .max(other.gpu_encode_max_observed_inflight_tiles);
345        self.gpu_encode_configured_memory_mib = self
346            .gpu_encode_configured_memory_mib
347            .max(other.gpu_encode_configured_memory_mib);
348        self.gpu_encode_effective_memory_mib = self
349            .gpu_encode_effective_memory_mib
350            .max(other.gpu_encode_effective_memory_mib);
351        self.gpu_encode_wall_micros = self
352            .gpu_encode_wall_micros
353            .saturating_add(other.gpu_encode_wall_micros);
354        self.gpu_encode_hardware_micros = self
355            .gpu_encode_hardware_micros
356            .saturating_add(other.gpu_encode_hardware_micros);
357        self.gpu_encode_dispatch_overhead_micros = self
358            .gpu_encode_dispatch_overhead_micros
359            .saturating_add(other.gpu_encode_dispatch_overhead_micros);
360        self.write_micros = self.write_micros.saturating_add(other.write_micros);
361    }
362
363    pub(crate) fn record_cpu_input(&mut self) {
364        self.total_frames = self.total_frames.saturating_add(1);
365        self.cpu_input_frames = self.cpu_input_frames.saturating_add(1);
366    }
367
368    pub(crate) fn record_gpu_input(&mut self) {
369        self.total_frames = self.total_frames.saturating_add(1);
370        self.gpu_input_decode_frames = self.gpu_input_decode_frames.saturating_add(1);
371    }
372
373    pub(crate) fn record_passthrough_frame(&mut self) {
374        self.total_frames = self.total_frames.saturating_add(1);
375        self.jpeg_passthrough_frames = self.jpeg_passthrough_frames.saturating_add(1);
376    }
377
378    pub(crate) fn record_j2k_passthrough_frame(&mut self) {
379        self.total_frames = self.total_frames.saturating_add(1);
380        self.j2k_passthrough_frames = self.j2k_passthrough_frames.saturating_add(1);
381    }
382
383    pub(crate) fn record_pixel_profile(&mut self, profile: PixelProfile) {
384        match profile.components {
385            1 => self.gray_frames = self.gray_frames.saturating_add(1),
386            3 => self.rgb_like_frames = self.rgb_like_frames.saturating_add(1),
387            _ => {
388                self.other_component_frames = self.other_component_frames.saturating_add(1);
389            }
390        }
391        match profile.bits_allocated {
392            8 => self.bits8_frames = self.bits8_frames.saturating_add(1),
393            16 => self.bits16_frames = self.bits16_frames.saturating_add(1),
394            _ => self.other_bit_depth_frames = self.other_bit_depth_frames.saturating_add(1),
395        }
396    }
397
398    pub(crate) fn record_unknown_pixel_profile(&mut self) {
399        self.unknown_pixel_profile_frames = self.unknown_pixel_profile_frames.saturating_add(1);
400    }
401
402    pub(crate) fn record_transcode_route(&mut self, used_gpu_input: bool, used_gpu_encode: bool) {
403        if used_gpu_input || used_gpu_encode {
404            self.gpu_transcode_frames = self.gpu_transcode_frames.saturating_add(1);
405            if used_gpu_input && used_gpu_encode {
406                self.resident_gpu_transcode_frames =
407                    self.resident_gpu_transcode_frames.saturating_add(1);
408            } else {
409                self.partial_gpu_transcode_frames =
410                    self.partial_gpu_transcode_frames.saturating_add(1);
411            }
412        } else {
413            self.cpu_fallback_frames = self.cpu_fallback_frames.saturating_add(1);
414        }
415    }
416
417    pub(crate) fn record_gpu_batches(
418        &mut self,
419        input_decode_batches: u64,
420        compose_batches: u64,
421        encode_batches: u64,
422    ) {
423        self.gpu_input_decode_batches = self
424            .gpu_input_decode_batches
425            .saturating_add(input_decode_batches);
426        self.gpu_compose_batches = self.gpu_compose_batches.saturating_add(compose_batches);
427        self.gpu_encode_batches = self.gpu_encode_batches.saturating_add(encode_batches);
428    }
429
430    #[cfg(all(feature = "metal", target_os = "macos"))]
431    pub(crate) fn record_gpu_encode_batch_stats(
432        &mut self,
433        stats: encode::DicomJ2kGpuEncodeBatchStats,
434    ) {
435        self.gpu_encode_configured_inflight_tiles = self
436            .gpu_encode_configured_inflight_tiles
437            .max(stats.configured_inflight_tiles.unwrap_or(0) as u64);
438        self.gpu_encode_effective_inflight_tiles = self
439            .gpu_encode_effective_inflight_tiles
440            .max(stats.effective_inflight_tiles as u64);
441        self.gpu_encode_max_observed_inflight_tiles = self
442            .gpu_encode_max_observed_inflight_tiles
443            .max(stats.max_observed_inflight_tiles as u64);
444        self.gpu_encode_configured_memory_mib = self
445            .gpu_encode_configured_memory_mib
446            .max(stats.configured_memory_mib.unwrap_or(0));
447        self.gpu_encode_effective_memory_mib = self
448            .gpu_encode_effective_memory_mib
449            .max(stats.effective_memory_mib);
450        self.gpu_encode_wall_micros = self
451            .gpu_encode_wall_micros
452            .saturating_add(duration_as_reported_micros(stats.encode_wall_duration));
453    }
454
455    #[cfg(all(feature = "metal", target_os = "macos"))]
456    pub(crate) fn record_auto_route_probe(
457        &mut self,
458        frames: u64,
459        cpu_duration: Duration,
460        gpu_duration: Duration,
461        gpu_batches: u64,
462        selected_gpu_input: bool,
463    ) {
464        self.auto_route_probe_frames = self.auto_route_probe_frames.saturating_add(frames);
465        self.auto_route_probe_gpu_batches = self
466            .auto_route_probe_gpu_batches
467            .saturating_add(gpu_batches);
468        self.auto_route_probe_cpu_micros = self
469            .auto_route_probe_cpu_micros
470            .saturating_add(duration_as_reported_micros(cpu_duration));
471        self.auto_route_probe_gpu_micros = self
472            .auto_route_probe_gpu_micros
473            .saturating_add(duration_as_reported_micros(gpu_duration));
474        if selected_gpu_input {
475            self.auto_route_probe_selected_gpu_input_frames = self
476                .auto_route_probe_selected_gpu_input_frames
477                .saturating_add(frames);
478        }
479    }
480
481    pub(crate) fn record_jpeg_decode_fallback(&mut self) {
482        self.jpeg_decode_fallback_frames = self.jpeg_decode_fallback_frames.saturating_add(1);
483    }
484
485    pub(crate) fn record_jpeg_cpu_fallback_route_classification(&mut self) {
486        self.total_frames = self.total_frames.saturating_add(1);
487        self.cpu_fallback_frames = self.cpu_fallback_frames.saturating_add(1);
488        self.jpeg_decode_fallback_frames = self.jpeg_decode_fallback_frames.saturating_add(1);
489        self.record_unknown_pixel_profile();
490    }
491
492    pub(crate) fn record_j2k_passthrough_only_fallback_classification(&mut self) {
493        self.total_frames = self.total_frames.saturating_add(1);
494        self.cpu_fallback_frames = self.cpu_fallback_frames.saturating_add(1);
495        self.record_unknown_pixel_profile();
496    }
497
498    pub(crate) fn record_jpeg_cpu_encode(&mut self, duration: Duration) {
499        self.jpeg_cpu_encode_frames = self.jpeg_cpu_encode_frames.saturating_add(1);
500        self.record_encode_duration(duration);
501    }
502
503    pub(crate) fn record_jpeg_metal_batch_encode(&mut self, frames: u64, duration: Duration) {
504        self.jpeg_metal_encode_frames = self.jpeg_metal_encode_frames.saturating_add(frames);
505        self.record_encode_duration(duration);
506        self.record_gpu_dispatch_duration(duration);
507    }
508
509    pub(crate) fn record_encoded_frame(&mut self, encoded: &encode::EncodedDicomJ2kFrame) {
510        if encoded.used_device_encode {
511            self.gpu_encode_frames = self.gpu_encode_frames.saturating_add(1);
512            self.record_gpu_dispatch_duration(encoded.encode_duration);
513            self.record_gpu_encode_hardware_duration(
514                encoded.device_gpu_duration,
515                encoded.encode_duration,
516            );
517        }
518        if encoded.used_device_validation {
519            self.gpu_validation_frames = self.gpu_validation_frames.saturating_add(1);
520            self.record_gpu_dispatch_duration(encoded.validation_duration);
521        }
522        self.record_encode_duration(encoded.encode_duration);
523        self.record_validation_duration(encoded.validation_duration);
524    }
525
526    pub(crate) fn record_input_decode_duration(&mut self, duration: Duration) {
527        self.input_decode_micros = self
528            .input_decode_micros
529            .saturating_add(duration_as_reported_micros(duration));
530    }
531
532    pub(crate) fn record_gpu_input_decode_duration(&mut self, duration: Duration) {
533        self.record_input_decode_duration(duration);
534        self.record_gpu_dispatch_duration(duration);
535    }
536
537    pub(crate) fn record_compose_duration(&mut self, duration: Duration) {
538        self.compose_micros = self
539            .compose_micros
540            .saturating_add(duration_as_reported_micros(duration));
541    }
542
543    #[cfg(all(feature = "metal", target_os = "macos"))]
544    pub(crate) fn record_gpu_compose_duration(&mut self, duration: Duration) {
545        self.record_compose_duration(duration);
546        self.record_gpu_dispatch_duration(duration);
547    }
548
549    pub(crate) fn record_encode_duration(&mut self, duration: Duration) {
550        self.encode_micros = self
551            .encode_micros
552            .saturating_add(duration_as_reported_micros(duration));
553    }
554
555    pub(crate) fn record_validation_duration(&mut self, duration: Duration) {
556        self.validation_micros = self
557            .validation_micros
558            .saturating_add(duration_as_reported_micros(duration));
559    }
560
561    pub(crate) fn record_gpu_dispatch_duration(&mut self, duration: Duration) {
562        self.gpu_dispatch_micros = self
563            .gpu_dispatch_micros
564            .saturating_add(duration_as_reported_micros(duration));
565    }
566
567    pub(crate) fn record_gpu_encode_hardware_duration(
568        &mut self,
569        gpu_duration: Option<Duration>,
570        dispatch_duration: Duration,
571    ) {
572        let Some(gpu_duration) = gpu_duration else {
573            return;
574        };
575        let hardware_micros = duration_as_reported_micros(gpu_duration);
576        self.gpu_encode_hardware_micros = self
577            .gpu_encode_hardware_micros
578            .saturating_add(hardware_micros);
579        let overhead = dispatch_duration.saturating_sub(gpu_duration);
580        self.gpu_encode_dispatch_overhead_micros = self
581            .gpu_encode_dispatch_overhead_micros
582            .saturating_add(duration_as_reported_micros(overhead));
583    }
584
585    pub(crate) fn record_write_duration(&mut self, duration: Duration) {
586        self.write_micros = self
587            .write_micros
588            .saturating_add(duration_as_reported_micros(duration));
589    }
590
591    pub fn gpu_encode_effective_parallelism(&self) -> f64 {
592        if self.gpu_encode_wall_micros == 0 {
593            0.0
594        } else {
595            self.gpu_encode_hardware_micros as f64 / self.gpu_encode_wall_micros as f64
596        }
597    }
598}
599
600pub(crate) fn duration_as_reported_micros(duration: Duration) -> u128 {
601    match duration.as_micros() {
602        0 if duration > Duration::ZERO => 1,
603        micros => micros,
604    }
605}