Skip to main content

volren_gpu/
renderer.rs

1//! Main volume renderer: wgpu pipeline creation and render execution.
2
3use std::sync::Arc;
4
5use bytemuck::{Pod, Zeroable};
6use glam::{DMat3, DMat4, DVec2, DVec3, DVec4};
7use half::f16;
8use volren_core::{
9    camera::Camera,
10    render_params::{BlendMode, ClipPlane, VolumeRenderParams},
11    reslice::{SlicePlane, ThickSlabMode, ThickSlabParams},
12    transfer_function::{OpacityTransferFunction, TransferFunctionLut},
13    volume::{DynVolume, VolumeInfo},
14    window_level::WindowLevel,
15};
16
17use crate::{
18    texture::GpuVolumeTexture,
19    uniforms::{blend_mode as bm, VolumeUniforms},
20};
21
22const VOLUME_SHADER_SRC: &str = concat!(
23    include_str!("shaders/common.wgsl"),
24    "\n",
25    include_str!("shaders/fullscreen_quad.wgsl"),
26    "\n",
27    include_str!("shaders/gradient.wgsl"),
28    "\n",
29    include_str!("shaders/shading.wgsl"),
30    "\n",
31    include_str!("shaders/volume_raycast.wgsl"),
32);
33const RESLICE_SHADER_SRC: &str = include_str!("shaders/reslice.wgsl");
34const CROSSHAIR_SHADER_SRC: &str = include_str!("shaders/crosshair.wgsl");
35const BLIT_SHADER_SRC: &str = include_str!("shaders/blit_rgba.wgsl");
36
37/// Rectangular sub-region of the render target, in pixels.
38#[derive(Debug, Clone, Copy)]
39pub struct Viewport {
40    /// Horizontal offset from the left edge.
41    pub x: u32,
42    /// Vertical offset from the top edge.
43    pub y: u32,
44    /// Width in pixels.
45    pub width: u32,
46    /// Height in pixels.
47    pub height: u32,
48}
49
50impl Viewport {
51    /// Create a viewport covering the full render target.
52    #[must_use]
53    pub fn full(width: u32, height: u32) -> Self {
54        Self {
55            x: 0,
56            y: 0,
57            width,
58            height,
59        }
60    }
61}
62
63/// Errors that can occur during rendering.
64#[derive(Debug, thiserror::Error)]
65pub enum RenderError {
66    /// No volume has been uploaded yet.
67    #[error("no volume data uploaded — call `set_volume()` first")]
68    NoVolume,
69    /// No transfer-function data has been uploaded yet.
70    #[error("no render parameters uploaded — call `set_render_params()` first")]
71    NoTransferFunction,
72    /// Viewport has zero area.
73    #[error("viewport has zero area")]
74    ZeroViewport,
75}
76
77/// Parameters for rendering crosshair overlay lines on a 2D slice viewport.
78#[derive(Debug, Clone)]
79pub struct CrosshairParams {
80    /// Normalised position in `[0, 1] × [0, 1]` on the slice.
81    pub position: [f32; 2],
82    /// Line colour for the horizontal line (RGBA, 0–1).
83    pub horizontal_color: [f32; 4],
84    /// Line colour for the vertical line (RGBA, 0–1).
85    pub vertical_color: [f32; 4],
86    /// Line thickness in pixels.
87    pub thickness: f32,
88}
89
90impl Default for CrosshairParams {
91    fn default() -> Self {
92        Self {
93            position: [0.5, 0.5],
94            horizontal_color: [1.0, 0.0, 0.0, 1.0],
95            vertical_color: [0.0, 1.0, 0.0, 1.0],
96            thickness: 1.0,
97        }
98    }
99}
100
101/// Patient orientation labels for the orientation marker.
102#[derive(Debug, Clone)]
103pub struct OrientationLabels {
104    /// Label for the right direction (+X).
105    pub right: String,
106    /// Label for the left direction (−X).
107    pub left: String,
108    /// Label for the anterior direction (+Y).
109    pub anterior: String,
110    /// Label for the posterior direction (−Y).
111    pub posterior: String,
112    /// Label for the superior direction (+Z).
113    pub superior: String,
114    /// Label for the inferior direction (−Z).
115    pub inferior: String,
116}
117
118impl Default for OrientationLabels {
119    fn default() -> Self {
120        Self {
121            right: "R".into(),
122            left: "L".into(),
123            anterior: "A".into(),
124            posterior: "P".into(),
125            superior: "S".into(),
126            inferior: "I".into(),
127        }
128    }
129}
130
131#[derive(Debug, Clone, Copy)]
132struct VolumeMetadata {
133    world_to_volume: [[f32; 4]; 4],
134    volume_to_world: [[f32; 4]; 4],
135    dimensions: [f32; 3],
136    spacing: [f32; 3],
137    scalar_range: [f32; 2],
138}
139
140impl VolumeMetadata {
141    fn from_volume(volume: &DynVolume) -> Self {
142        let dimensions = volume.dimensions().as_dvec3();
143        let spacing = volume.spacing();
144        let direction = mat4_from_direction(volume.direction());
145        let scale = DVec3::new(
146            (dimensions.x - 1.0).max(1.0) * spacing.x,
147            (dimensions.y - 1.0).max(1.0) * spacing.y,
148            (dimensions.z - 1.0).max(1.0) * spacing.z,
149        );
150        let volume_to_world =
151            DMat4::from_translation(volume.origin()) * direction * DMat4::from_scale(scale);
152        let world_to_volume = volume_to_world.inverse();
153        let (scalar_min, scalar_max) = volume.scalar_range();
154
155        Self {
156            world_to_volume: world_to_volume.as_mat4().to_cols_array_2d(),
157            volume_to_world: volume_to_world.as_mat4().to_cols_array_2d(),
158            dimensions: [
159                dimensions.x as f32,
160                dimensions.y as f32,
161                dimensions.z as f32,
162            ],
163            spacing: [spacing.x as f32, spacing.y as f32, spacing.z as f32],
164            scalar_range: [scalar_min as f32, scalar_max as f32],
165        }
166    }
167}
168
169#[repr(C)]
170#[derive(Debug, Clone, Copy, Pod, Zeroable)]
171struct SliceUniforms {
172    world_to_volume: [[f32; 4]; 4],
173    slice_origin: [f32; 4],
174    slice_right: [f32; 4],
175    slice_up: [f32; 4],
176    slice_normal: [f32; 4],
177    slice_extent: [f32; 4],
178    window_level: [f32; 4],
179    slab_params: [u32; 4],
180}
181
182#[repr(C)]
183#[derive(Debug, Clone, Copy, Pod, Zeroable)]
184struct CrosshairUniforms {
185    position: [f32; 4],
186    horizontal_color: [f32; 4],
187    vertical_color: [f32; 4],
188    viewport: [f32; 4],
189}
190
191/// A fully GPU-resident volume renderer.
192///
193/// The renderer stores the uploaded 3D texture plus the metadata needed for
194/// raycasting and reslicing. Call [`VolumeRenderer::set_render_params`] after
195/// uploading a volume to bake the active transfer functions.
196pub struct VolumeRenderer {
197    device: Arc<wgpu::Device>,
198    queue: Arc<wgpu::Queue>,
199
200    volume_pipeline: wgpu::RenderPipeline,
201    volume_bind_group_layout: wgpu::BindGroupLayout,
202    volume_uniform_buffer: wgpu::Buffer,
203
204    slice_pipeline: wgpu::RenderPipeline,
205    slice_bind_group_layout: wgpu::BindGroupLayout,
206    slice_uniform_buffer: wgpu::Buffer,
207
208    crosshair_pipeline: wgpu::RenderPipeline,
209    crosshair_uniform_buffer: wgpu::Buffer,
210    crosshair_bind_group: wgpu::BindGroup,
211
212    blit_pipeline: wgpu::RenderPipeline,
213    blit_bind_group_layout: wgpu::BindGroupLayout,
214    blit_sampler: wgpu::Sampler,
215
216    lut_texture: wgpu::Texture,
217    lut_view: wgpu::TextureView,
218    lut_sampler: wgpu::Sampler,
219    gradient_lut_texture: wgpu::Texture,
220    gradient_lut_view: wgpu::TextureView,
221    gradient_lut_sampler: wgpu::Sampler,
222
223    volume_texture: Option<GpuVolumeTexture>,
224    volume_bind_group: Option<wgpu::BindGroup>,
225    slice_bind_group: Option<wgpu::BindGroup>,
226    volume_metadata: Option<VolumeMetadata>,
227    has_render_params: bool,
228
229    output_format: wgpu::TextureFormat,
230    viewport_size: (u32, u32),
231}
232
233impl VolumeRenderer {
234    /// Create a new renderer for the given device and output format.
235    #[must_use]
236    pub fn new(
237        device: &wgpu::Device,
238        queue: &wgpu::Queue,
239        output_format: wgpu::TextureFormat,
240    ) -> Self {
241        Self::from_arc(
242            Arc::new(device.clone()),
243            Arc::new(queue.clone()),
244            output_format,
245        )
246    }
247
248    /// Create a renderer from `Arc`-wrapped device and queue.
249    #[must_use]
250    pub fn from_arc(
251        device: Arc<wgpu::Device>,
252        queue: Arc<wgpu::Queue>,
253        output_format: wgpu::TextureFormat,
254    ) -> Self {
255        let volume_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
256            label: Some("volren_volume_shader"),
257            source: wgpu::ShaderSource::Wgsl(VOLUME_SHADER_SRC.into()),
258        });
259        let slice_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
260            label: Some("volren_reslice_shader"),
261            source: wgpu::ShaderSource::Wgsl(RESLICE_SHADER_SRC.into()),
262        });
263        let crosshair_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
264            label: Some("volren_crosshair_shader"),
265            source: wgpu::ShaderSource::Wgsl(CROSSHAIR_SHADER_SRC.into()),
266        });
267        let blit_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
268            label: Some("volren_blit_shader"),
269            source: wgpu::ShaderSource::Wgsl(BLIT_SHADER_SRC.into()),
270        });
271
272        let volume_bind_group_layout = Self::create_volume_bind_group_layout(&device);
273        let slice_bind_group_layout = Self::create_slice_bind_group_layout(&device);
274        let crosshair_bind_group_layout = Self::create_crosshair_bind_group_layout(&device);
275        let blit_bind_group_layout = Self::create_blit_bind_group_layout(&device);
276
277        let volume_pipeline = Self::create_pipeline(
278            &device,
279            &volume_shader,
280            &volume_bind_group_layout,
281            output_format,
282            Some(wgpu::BlendState::ALPHA_BLENDING),
283        );
284        let slice_pipeline = Self::create_pipeline(
285            &device,
286            &slice_shader,
287            &slice_bind_group_layout,
288            output_format,
289            Some(wgpu::BlendState::ALPHA_BLENDING),
290        );
291        let crosshair_pipeline = Self::create_pipeline(
292            &device,
293            &crosshair_shader,
294            &crosshair_bind_group_layout,
295            output_format,
296            Some(wgpu::BlendState::ALPHA_BLENDING),
297        );
298        let blit_pipeline = Self::create_pipeline(
299            &device,
300            &blit_shader,
301            &blit_bind_group_layout,
302            output_format,
303            Some(wgpu::BlendState::ALPHA_BLENDING),
304        );
305
306        let volume_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
307            label: Some("volren_volume_uniforms"),
308            size: std::mem::size_of::<VolumeUniforms>() as u64,
309            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
310            mapped_at_creation: false,
311        });
312        let slice_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
313            label: Some("volren_slice_uniforms"),
314            size: std::mem::size_of::<SliceUniforms>() as u64,
315            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
316            mapped_at_creation: false,
317        });
318        let crosshair_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
319            label: Some("volren_crosshair_uniforms"),
320            size: std::mem::size_of::<CrosshairUniforms>() as u64,
321            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
322            mapped_at_creation: false,
323        });
324
325        let crosshair_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
326            label: Some("volren_crosshair_bind_group"),
327            layout: &crosshair_bind_group_layout,
328            entries: &[wgpu::BindGroupEntry {
329                binding: 0,
330                resource: crosshair_uniform_buffer.as_entire_binding(),
331            }],
332        });
333
334        let (lut_texture, lut_view, lut_sampler) = Self::create_lut_texture(&device, 4096);
335        let (gradient_lut_texture, gradient_lut_view, gradient_lut_sampler) =
336            Self::create_lut_texture(&device, 1024);
337        let blit_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
338            label: Some("volren_blit_sampler"),
339            address_mode_u: wgpu::AddressMode::ClampToEdge,
340            address_mode_v: wgpu::AddressMode::ClampToEdge,
341            mag_filter: wgpu::FilterMode::Linear,
342            min_filter: wgpu::FilterMode::Linear,
343            ..Default::default()
344        });
345
346        Self {
347            device,
348            queue,
349            volume_pipeline,
350            volume_bind_group_layout,
351            volume_uniform_buffer,
352            slice_pipeline,
353            slice_bind_group_layout,
354            slice_uniform_buffer,
355            crosshair_pipeline,
356            crosshair_uniform_buffer,
357            crosshair_bind_group,
358            blit_pipeline,
359            blit_bind_group_layout,
360            blit_sampler,
361            lut_texture,
362            lut_view,
363            lut_sampler,
364            gradient_lut_texture,
365            gradient_lut_view,
366            gradient_lut_sampler,
367            volume_texture: None,
368            volume_bind_group: None,
369            slice_bind_group: None,
370            volume_metadata: None,
371            has_render_params: false,
372            output_format,
373            viewport_size: (0, 0),
374        }
375    }
376
377    /// The texture format this renderer outputs into.
378    #[must_use]
379    pub fn output_format(&self) -> wgpu::TextureFormat {
380        self.output_format
381    }
382
383    /// Upload (or replace) volume data as a 3D GPU texture.
384    pub fn set_volume(&mut self, volume: &DynVolume, linear_interpolation: bool) {
385        self.volume_texture = Some(GpuVolumeTexture::upload(
386            &self.device,
387            &self.queue,
388            volume,
389            linear_interpolation,
390        ));
391        self.volume_metadata = Some(VolumeMetadata::from_volume(volume));
392        self.rebuild_bind_groups();
393    }
394
395    /// Upload a baked transfer-function LUT to the GPU.
396    pub fn set_transfer_function(&mut self, lut: &TransferFunctionLut) {
397        let (texture, view, sampler) = Self::create_lut_texture(&self.device, lut.lut_size());
398        let f16_bytes = f32_slice_to_f16_bytes(lut.as_rgba_f32());
399        self.queue.write_texture(
400            texture.as_image_copy(),
401            &f16_bytes,
402            wgpu::TexelCopyBufferLayout {
403                offset: 0,
404                bytes_per_row: Some(lut.lut_size() * 4 * 2),
405                rows_per_image: None,
406            },
407            wgpu::Extent3d {
408                width: lut.lut_size(),
409                height: 1,
410                depth_or_array_layers: 1,
411            },
412        );
413        self.lut_texture = texture;
414        self.lut_view = view;
415        self.lut_sampler = sampler;
416        self.has_render_params = true;
417        self.rebuild_bind_groups();
418    }
419
420    /// Bake and upload transfer functions from the current render parameters.
421    ///
422    /// # Errors
423    /// Returns [`RenderError::NoVolume`] if no volume metadata has been uploaded yet.
424    pub fn set_render_params(&mut self, params: &VolumeRenderParams) -> Result<(), RenderError> {
425        let metadata = self.volume_metadata.ok_or(RenderError::NoVolume)?;
426        let lut = TransferFunctionLut::bake(
427            &params.color_tf,
428            &params.opacity_tf,
429            f64::from(metadata.scalar_range[0]),
430            f64::from(metadata.scalar_range[1]),
431            4096,
432        );
433        self.set_transfer_function(&lut);
434        let gradient_tf = params
435            .gradient_opacity_tf
436            .clone()
437            .unwrap_or_else(opaque_unit_ramp);
438        self.upload_gradient_lut(&gradient_tf);
439        Ok(())
440    }
441
442    /// Handle viewport resize.
443    pub fn resize(&mut self, width: u32, height: u32) {
444        self.viewport_size = (width, height);
445    }
446
447    /// Create an off-screen render target texture.
448    #[must_use]
449    pub fn create_render_target(&self, width: u32, height: u32) -> wgpu::Texture {
450        self.device.create_texture(&wgpu::TextureDescriptor {
451            label: Some("volren_offscreen_target"),
452            size: wgpu::Extent3d {
453                width,
454                height,
455                depth_or_array_layers: 1,
456            },
457            mip_level_count: 1,
458            sample_count: 1,
459            dimension: wgpu::TextureDimension::D2,
460            format: self.output_format,
461            usage: wgpu::TextureUsages::RENDER_ATTACHMENT
462                | wgpu::TextureUsages::TEXTURE_BINDING
463                | wgpu::TextureUsages::COPY_SRC,
464            view_formats: &[],
465        })
466    }
467
468    /// Render the volume into the given color attachment.
469    ///
470    /// The caller owns the command encoder and submits it.
471    ///
472    /// # Errors
473    /// Returns [`RenderError`] if prerequisites are missing or the viewport is invalid.
474    pub fn render_volume(
475        &self,
476        encoder: &mut wgpu::CommandEncoder,
477        target: &wgpu::TextureView,
478        camera: &Camera,
479        params: &VolumeRenderParams,
480        viewport: Viewport,
481    ) -> Result<(), RenderError> {
482        let metadata = self.volume_metadata.ok_or(RenderError::NoVolume)?;
483        let bind_group = self
484            .volume_bind_group
485            .as_ref()
486            .ok_or(RenderError::NoVolume)?;
487        validate_viewport(viewport)?;
488        if !self.has_render_params {
489            return Err(RenderError::NoTransferFunction);
490        }
491
492        let uniforms = self.build_uniforms(camera, metadata, params, viewport);
493        self.queue.write_buffer(
494            &self.volume_uniform_buffer,
495            0,
496            bytemuck::bytes_of(&uniforms),
497        );
498
499        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
500            label: Some("volren_volume_pass"),
501            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
502                view: target,
503                resolve_target: None,
504                ops: wgpu::Operations {
505                    load: wgpu::LoadOp::Load,
506                    store: wgpu::StoreOp::Store,
507                },
508            })],
509            depth_stencil_attachment: None,
510            timestamp_writes: None,
511            occlusion_query_set: None,
512        });
513        pass.set_pipeline(&self.volume_pipeline);
514        pass.set_bind_group(0, bind_group, &[]);
515        pass.set_viewport(
516            viewport.x as f32,
517            viewport.y as f32,
518            viewport.width as f32,
519            viewport.height as f32,
520            0.0,
521            1.0,
522        );
523        pass.draw(0..6, 0..1);
524        Ok(())
525    }
526
527    /// Render the volume into a newly-created off-screen texture.
528    ///
529    /// # Errors
530    /// Propagates the same errors as [`Self::render_volume`].
531    pub fn render_volume_to_texture(
532        &self,
533        encoder: &mut wgpu::CommandEncoder,
534        camera: &Camera,
535        params: &VolumeRenderParams,
536        width: u32,
537        height: u32,
538    ) -> Result<wgpu::Texture, RenderError> {
539        let texture = self.create_render_target(width, height);
540        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
541        self.render_volume(
542            encoder,
543            &view,
544            camera,
545            params,
546            Viewport::full(width, height),
547        )?;
548        Ok(texture)
549    }
550
551    /// Render the volume at reduced resolution and upscale the result into `target`.
552    ///
553    /// This is intended for interactive manipulation where responsiveness matters
554    /// more than final image quality. Passing `1` disables downsampling.
555    ///
556    /// # Errors
557    /// Propagates the same errors as [`Self::render_volume`].
558    pub fn render_volume_interactive(
559        &self,
560        encoder: &mut wgpu::CommandEncoder,
561        target: &wgpu::TextureView,
562        camera: &Camera,
563        params: &VolumeRenderParams,
564        viewport: Viewport,
565        downsample_factor: u32,
566    ) -> Result<(), RenderError> {
567        validate_viewport(viewport)?;
568        let factor = downsample_factor.max(1);
569        if factor == 1 {
570            return self.render_volume(encoder, target, camera, params, viewport);
571        }
572
573        let lod_width = (viewport.width / factor).max(1);
574        let lod_height = (viewport.height / factor).max(1);
575        let texture = self.create_render_target(lod_width, lod_height);
576        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
577
578        self.render_volume(
579            encoder,
580            &view,
581            camera,
582            params,
583            Viewport::full(lod_width, lod_height),
584        )?;
585        self.blit_texture_view(encoder, target, viewport, &view);
586        Ok(())
587    }
588
589    /// Render a 2D reslice (MPR slice) into the given color attachment.
590    ///
591    /// # Errors
592    /// Returns [`RenderError::NoVolume`] when no volume has been uploaded.
593    pub fn render_slice(
594        &self,
595        encoder: &mut wgpu::CommandEncoder,
596        target: &wgpu::TextureView,
597        slice_plane: &SlicePlane,
598        window_level: &WindowLevel,
599        viewport: Viewport,
600        thick_slab: Option<&ThickSlabParams>,
601    ) -> Result<(), RenderError> {
602        let metadata = self.volume_metadata.ok_or(RenderError::NoVolume)?;
603        let bind_group = self
604            .slice_bind_group
605            .as_ref()
606            .ok_or(RenderError::NoVolume)?;
607        validate_viewport(viewport)?;
608
609        let uniforms = self.build_slice_uniforms(metadata, slice_plane, window_level, thick_slab);
610        self.queue
611            .write_buffer(&self.slice_uniform_buffer, 0, bytemuck::bytes_of(&uniforms));
612
613        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
614            label: Some("volren_slice_pass"),
615            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
616                view: target,
617                resolve_target: None,
618                ops: wgpu::Operations {
619                    load: wgpu::LoadOp::Load,
620                    store: wgpu::StoreOp::Store,
621                },
622            })],
623            depth_stencil_attachment: None,
624            timestamp_writes: None,
625            occlusion_query_set: None,
626        });
627        pass.set_pipeline(&self.slice_pipeline);
628        pass.set_bind_group(0, bind_group, &[]);
629        pass.set_viewport(
630            viewport.x as f32,
631            viewport.y as f32,
632            viewport.width as f32,
633            viewport.height as f32,
634            0.0,
635            1.0,
636        );
637        pass.draw(0..6, 0..1);
638        Ok(())
639    }
640
641    /// Render a slice into a newly-created off-screen texture.
642    ///
643    /// # Errors
644    /// Propagates the same errors as [`Self::render_slice`].
645    pub fn render_slice_to_texture(
646        &self,
647        encoder: &mut wgpu::CommandEncoder,
648        slice_plane: &SlicePlane,
649        window_level: &WindowLevel,
650        width: u32,
651        height: u32,
652        thick_slab: Option<&ThickSlabParams>,
653    ) -> Result<wgpu::Texture, RenderError> {
654        let texture = self.create_render_target(width, height);
655        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
656        self.render_slice(
657            encoder,
658            &view,
659            slice_plane,
660            window_level,
661            Viewport::full(width, height),
662            thick_slab,
663        )?;
664        Ok(texture)
665    }
666
667    /// Render crosshair overlay lines on a slice viewport.
668    ///
669    /// # Errors
670    /// Returns [`RenderError::ZeroViewport`] for an empty viewport.
671    pub fn render_crosshair(
672        &self,
673        encoder: &mut wgpu::CommandEncoder,
674        target: &wgpu::TextureView,
675        viewport: Viewport,
676        crosshair: &CrosshairParams,
677    ) -> Result<(), RenderError> {
678        validate_viewport(viewport)?;
679        let uniforms = CrosshairUniforms {
680            position: [
681                crosshair.position[0],
682                crosshair.position[1],
683                crosshair.thickness,
684                0.0,
685            ],
686            horizontal_color: crosshair.horizontal_color,
687            vertical_color: crosshair.vertical_color,
688            viewport: [viewport.width as f32, viewport.height as f32, 0.0, 0.0],
689        };
690        self.queue.write_buffer(
691            &self.crosshair_uniform_buffer,
692            0,
693            bytemuck::bytes_of(&uniforms),
694        );
695
696        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
697            label: Some("volren_crosshair_pass"),
698            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
699                view: target,
700                resolve_target: None,
701                ops: wgpu::Operations {
702                    load: wgpu::LoadOp::Load,
703                    store: wgpu::StoreOp::Store,
704                },
705            })],
706            depth_stencil_attachment: None,
707            timestamp_writes: None,
708            occlusion_query_set: None,
709        });
710        pass.set_pipeline(&self.crosshair_pipeline);
711        pass.set_bind_group(0, &self.crosshair_bind_group, &[]);
712        pass.set_viewport(
713            viewport.x as f32,
714            viewport.y as f32,
715            viewport.width as f32,
716            viewport.height as f32,
717            0.0,
718            1.0,
719        );
720        pass.draw(0..6, 0..1);
721        Ok(())
722    }
723
724    /// Render an orientation marker in the given viewport.
725    ///
726    /// The marker is generated on the CPU as a small RGBA image, then composited
727    /// over the target with a lightweight textured-quad pass.
728    ///
729    /// # Errors
730    /// Returns [`RenderError::ZeroViewport`] for an empty viewport.
731    pub fn render_orientation_marker(
732        &self,
733        encoder: &mut wgpu::CommandEncoder,
734        target: &wgpu::TextureView,
735        camera: &Camera,
736        viewport: Viewport,
737        labels: &OrientationLabels,
738    ) -> Result<(), RenderError> {
739        validate_viewport(viewport)?;
740        let width = viewport.width.max(1);
741        let height = viewport.height.max(1);
742        let image = build_orientation_marker_image(width, height, camera, labels);
743        self.blit_rgba8(encoder, target, viewport, width, height, &image);
744        Ok(())
745    }
746
747    fn build_uniforms(
748        &self,
749        camera: &Camera,
750        metadata: VolumeMetadata,
751        params: &VolumeRenderParams,
752        viewport: Viewport,
753    ) -> VolumeUniforms {
754        let aspect = f64::from(viewport.width) / f64::from(viewport.height.max(1));
755        let view = camera.view_matrix();
756        let proj = camera.projection_matrix(aspect);
757        let mvp = (proj * view).as_mat4();
758        let inv_mvp = (proj * view).inverse().as_mat4();
759
760        let blend_mode = match params.blend_mode {
761            BlendMode::Composite => bm::COMPOSITE,
762            BlendMode::MaximumIntensity => bm::MAXIMUM_INTENSITY,
763            BlendMode::MinimumIntensity => bm::MINIMUM_INTENSITY,
764            BlendMode::AverageIntensity => bm::AVERAGE_INTENSITY,
765            BlendMode::Additive => bm::ADDITIVE,
766            BlendMode::Isosurface { .. } => bm::ISOSURFACE,
767            _ => bm::COMPOSITE,
768        };
769        let (window_center, window_width) = params.window_level.map_or_else(
770            || {
771                let wl = WindowLevel::from_scalar_range(
772                    f64::from(metadata.scalar_range[0]),
773                    f64::from(metadata.scalar_range[1]),
774                );
775                (wl.center as f32, wl.width as f32)
776            },
777            |wl| (wl.center as f32, wl.width as f32),
778        );
779        let (shading_enabled, ambient, diffuse, specular, specular_power) =
780            if let Some(shading) = params.shading {
781                (
782                    1u32,
783                    shading.ambient,
784                    shading.diffuse,
785                    shading.specular,
786                    shading.specular_power,
787                )
788            } else {
789                (0u32, 0.0, 0.0, 0.0, 0.0)
790            };
791        let (clip_planes, num_clip_planes) = combined_clip_planes(params);
792        let iso_value = match params.blend_mode {
793            BlendMode::Isosurface { iso_value } => iso_value as f32,
794            _ => 0.0,
795        };
796        let camera_position = camera.position().as_vec3();
797
798        VolumeUniforms {
799            mvp: mvp.to_cols_array_2d(),
800            inv_mvp: inv_mvp.to_cols_array_2d(),
801            world_to_volume: metadata.world_to_volume,
802            volume_to_world: metadata.volume_to_world,
803            dimensions: [
804                metadata.dimensions[0],
805                metadata.dimensions[1],
806                metadata.dimensions[2],
807                0.0,
808            ],
809            spacing: [
810                metadata.spacing[0],
811                metadata.spacing[1],
812                metadata.spacing[2],
813                0.0,
814            ],
815            scalar_range: [
816                metadata.scalar_range[0],
817                metadata.scalar_range[1],
818                iso_value,
819                0.0,
820            ],
821            step_size: params.step_size_factor.max(1e-3),
822            opacity_correction: 1.0 / params.step_size_factor.max(1e-3),
823            blend_mode,
824            shading_enabled,
825            ambient,
826            diffuse,
827            specular,
828            specular_power,
829            light_position: [camera_position.x, camera_position.y, camera_position.z, 0.0],
830            camera_position: [camera_position.x, camera_position.y, camera_position.z, 0.0],
831            window_center,
832            window_width,
833            num_clip_planes,
834            _pad0: 0,
835            clip_planes,
836            background: params.background,
837        }
838    }
839
840    fn build_slice_uniforms(
841        &self,
842        metadata: VolumeMetadata,
843        slice_plane: &SlicePlane,
844        window_level: &WindowLevel,
845        thick_slab: Option<&ThickSlabParams>,
846    ) -> SliceUniforms {
847        let slab_mode = thick_slab.map_or(ThickSlabMode::Mip, |params| params.mode);
848        let (half_thickness, num_samples) = thick_slab.map_or((0.0f32, 1u32), |params| {
849            (params.half_thickness as f32, params.num_samples.max(1))
850        });
851
852        SliceUniforms {
853            world_to_volume: metadata.world_to_volume,
854            slice_origin: [
855                slice_plane.origin.x as f32,
856                slice_plane.origin.y as f32,
857                slice_plane.origin.z as f32,
858                0.0,
859            ],
860            slice_right: [
861                slice_plane.right.x as f32,
862                slice_plane.right.y as f32,
863                slice_plane.right.z as f32,
864                0.0,
865            ],
866            slice_up: [
867                slice_plane.up.x as f32,
868                slice_plane.up.y as f32,
869                slice_plane.up.z as f32,
870                0.0,
871            ],
872            slice_normal: [
873                slice_plane.normal().x as f32,
874                slice_plane.normal().y as f32,
875                slice_plane.normal().z as f32,
876                0.0,
877            ],
878            slice_extent: [
879                slice_plane.width as f32,
880                slice_plane.height as f32,
881                half_thickness,
882                0.0,
883            ],
884            window_level: [
885                window_level.center as f32,
886                window_level.width as f32,
887                0.0,
888                0.0,
889            ],
890            slab_params: [thick_slab_mode_code(slab_mode), num_samples, 0, 0],
891        }
892    }
893
894    fn upload_gradient_lut(&mut self, tf: &OpacityTransferFunction) {
895        let resolution = 1024u32;
896        let f32_bytes = bake_opacity_lut_bytes(tf, resolution);
897        let f32_slice: &[f32] = bytemuck::cast_slice(&f32_bytes);
898        let f16_bytes = f32_slice_to_f16_bytes(f32_slice);
899        let (texture, view, sampler) = Self::create_lut_texture(&self.device, resolution);
900        self.queue.write_texture(
901            texture.as_image_copy(),
902            &f16_bytes,
903            wgpu::TexelCopyBufferLayout {
904                offset: 0,
905                bytes_per_row: Some(resolution * 4 * 2),
906                rows_per_image: None,
907            },
908            wgpu::Extent3d {
909                width: resolution,
910                height: 1,
911                depth_or_array_layers: 1,
912            },
913        );
914        self.gradient_lut_texture = texture;
915        self.gradient_lut_view = view;
916        self.gradient_lut_sampler = sampler;
917        self.rebuild_bind_groups();
918    }
919
920    fn rebuild_bind_groups(&mut self) {
921        let Some(volume_texture) = self.volume_texture.as_ref() else {
922            return;
923        };
924
925        self.volume_bind_group = Some(self.device.create_bind_group(&wgpu::BindGroupDescriptor {
926            label: Some("volren_volume_bind_group"),
927            layout: &self.volume_bind_group_layout,
928            entries: &[
929                wgpu::BindGroupEntry {
930                    binding: 0,
931                    resource: self.volume_uniform_buffer.as_entire_binding(),
932                },
933                wgpu::BindGroupEntry {
934                    binding: 1,
935                    resource: wgpu::BindingResource::TextureView(&volume_texture.view),
936                },
937                wgpu::BindGroupEntry {
938                    binding: 2,
939                    resource: wgpu::BindingResource::Sampler(&volume_texture.sampler),
940                },
941                wgpu::BindGroupEntry {
942                    binding: 3,
943                    resource: wgpu::BindingResource::TextureView(&self.lut_view),
944                },
945                wgpu::BindGroupEntry {
946                    binding: 4,
947                    resource: wgpu::BindingResource::Sampler(&self.lut_sampler),
948                },
949                wgpu::BindGroupEntry {
950                    binding: 5,
951                    resource: wgpu::BindingResource::TextureView(&self.gradient_lut_view),
952                },
953                wgpu::BindGroupEntry {
954                    binding: 6,
955                    resource: wgpu::BindingResource::Sampler(&self.gradient_lut_sampler),
956                },
957            ],
958        }));
959
960        self.slice_bind_group = Some(self.device.create_bind_group(&wgpu::BindGroupDescriptor {
961            label: Some("volren_slice_bind_group"),
962            layout: &self.slice_bind_group_layout,
963            entries: &[
964                wgpu::BindGroupEntry {
965                    binding: 0,
966                    resource: self.slice_uniform_buffer.as_entire_binding(),
967                },
968                wgpu::BindGroupEntry {
969                    binding: 1,
970                    resource: wgpu::BindingResource::TextureView(&volume_texture.view),
971                },
972                wgpu::BindGroupEntry {
973                    binding: 2,
974                    resource: wgpu::BindingResource::Sampler(&volume_texture.sampler),
975                },
976            ],
977        }));
978    }
979
980    fn blit_rgba8(
981        &self,
982        encoder: &mut wgpu::CommandEncoder,
983        target: &wgpu::TextureView,
984        viewport: Viewport,
985        width: u32,
986        height: u32,
987        rgba: &[u8],
988    ) {
989        let texture = self.device.create_texture(&wgpu::TextureDescriptor {
990            label: Some("volren_blit_texture"),
991            size: wgpu::Extent3d {
992                width,
993                height,
994                depth_or_array_layers: 1,
995            },
996            mip_level_count: 1,
997            sample_count: 1,
998            dimension: wgpu::TextureDimension::D2,
999            format: wgpu::TextureFormat::Rgba8Unorm,
1000            usage: wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::TEXTURE_BINDING,
1001            view_formats: &[],
1002        });
1003        self.queue.write_texture(
1004            texture.as_image_copy(),
1005            rgba,
1006            wgpu::TexelCopyBufferLayout {
1007                offset: 0,
1008                bytes_per_row: Some(width * 4),
1009                rows_per_image: Some(height),
1010            },
1011            wgpu::Extent3d {
1012                width,
1013                height,
1014                depth_or_array_layers: 1,
1015            },
1016        );
1017        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1018        self.blit_texture_view(encoder, target, viewport, &view);
1019    }
1020
1021    fn blit_texture_view(
1022        &self,
1023        encoder: &mut wgpu::CommandEncoder,
1024        target: &wgpu::TextureView,
1025        viewport: Viewport,
1026        source_view: &wgpu::TextureView,
1027    ) {
1028        let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1029            label: Some("volren_blit_bind_group"),
1030            layout: &self.blit_bind_group_layout,
1031            entries: &[
1032                wgpu::BindGroupEntry {
1033                    binding: 0,
1034                    resource: wgpu::BindingResource::TextureView(source_view),
1035                },
1036                wgpu::BindGroupEntry {
1037                    binding: 1,
1038                    resource: wgpu::BindingResource::Sampler(&self.blit_sampler),
1039                },
1040            ],
1041        });
1042
1043        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1044            label: Some("volren_blit_pass"),
1045            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1046                view: target,
1047                resolve_target: None,
1048                ops: wgpu::Operations {
1049                    load: wgpu::LoadOp::Load,
1050                    store: wgpu::StoreOp::Store,
1051                },
1052            })],
1053            depth_stencil_attachment: None,
1054            timestamp_writes: None,
1055            occlusion_query_set: None,
1056        });
1057        pass.set_pipeline(&self.blit_pipeline);
1058        pass.set_bind_group(0, &bind_group, &[]);
1059        pass.set_viewport(
1060            viewport.x as f32,
1061            viewport.y as f32,
1062            viewport.width as f32,
1063            viewport.height as f32,
1064            0.0,
1065            1.0,
1066        );
1067        pass.draw(0..6, 0..1);
1068    }
1069
1070    fn create_volume_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1071        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1072            label: Some("volren_volume_bgl"),
1073            entries: &[
1074                uniform_bgl_entry(0),
1075                texture_bgl_entry(1, wgpu::TextureViewDimension::D3),
1076                sampler_bgl_entry(2),
1077                texture_bgl_entry(3, wgpu::TextureViewDimension::D1),
1078                sampler_bgl_entry(4),
1079                texture_bgl_entry(5, wgpu::TextureViewDimension::D1),
1080                sampler_bgl_entry(6),
1081            ],
1082        })
1083    }
1084
1085    fn create_slice_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1086        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1087            label: Some("volren_slice_bgl"),
1088            entries: &[
1089                uniform_bgl_entry(0),
1090                texture_bgl_entry(1, wgpu::TextureViewDimension::D3),
1091                sampler_bgl_entry(2),
1092            ],
1093        })
1094    }
1095
1096    fn create_crosshair_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1097        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1098            label: Some("volren_crosshair_bgl"),
1099            entries: &[uniform_bgl_entry(0)],
1100        })
1101    }
1102
1103    fn create_blit_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1104        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1105            label: Some("volren_blit_bgl"),
1106            entries: &[texture_bgl_entry_2d(0), sampler_bgl_entry(1)],
1107        })
1108    }
1109
1110    fn create_pipeline(
1111        device: &wgpu::Device,
1112        shader: &wgpu::ShaderModule,
1113        bind_group_layout: &wgpu::BindGroupLayout,
1114        output_format: wgpu::TextureFormat,
1115        blend: Option<wgpu::BlendState>,
1116    ) -> wgpu::RenderPipeline {
1117        let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
1118            label: Some("volren_pipeline_layout"),
1119            bind_group_layouts: &[bind_group_layout],
1120            push_constant_ranges: &[],
1121        });
1122
1123        device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
1124            label: Some("volren_pipeline"),
1125            layout: Some(&layout),
1126            vertex: wgpu::VertexState {
1127                module: shader,
1128                entry_point: Some("vs_main"),
1129                buffers: &[],
1130                compilation_options: Default::default(),
1131            },
1132            fragment: Some(wgpu::FragmentState {
1133                module: shader,
1134                entry_point: Some("fs_main"),
1135                targets: &[Some(wgpu::ColorTargetState {
1136                    format: output_format,
1137                    blend,
1138                    write_mask: wgpu::ColorWrites::ALL,
1139                })],
1140                compilation_options: Default::default(),
1141            }),
1142            primitive: wgpu::PrimitiveState {
1143                topology: wgpu::PrimitiveTopology::TriangleList,
1144                ..Default::default()
1145            },
1146            depth_stencil: None,
1147            multisample: wgpu::MultisampleState::default(),
1148            multiview: None,
1149            cache: None,
1150        })
1151    }
1152
1153    fn create_lut_texture(
1154        device: &wgpu::Device,
1155        size: u32,
1156    ) -> (wgpu::Texture, wgpu::TextureView, wgpu::Sampler) {
1157        let texture = device.create_texture(&wgpu::TextureDescriptor {
1158            label: Some("volren_lut"),
1159            size: wgpu::Extent3d {
1160                width: size,
1161                height: 1,
1162                depth_or_array_layers: 1,
1163            },
1164            mip_level_count: 1,
1165            sample_count: 1,
1166            dimension: wgpu::TextureDimension::D1,
1167            format: wgpu::TextureFormat::Rgba16Float,
1168            usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
1169            view_formats: &[],
1170        });
1171        let view = texture.create_view(&wgpu::TextureViewDescriptor {
1172            dimension: Some(wgpu::TextureViewDimension::D1),
1173            ..Default::default()
1174        });
1175        let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
1176            label: Some("volren_lut_sampler"),
1177            address_mode_u: wgpu::AddressMode::ClampToEdge,
1178            mag_filter: wgpu::FilterMode::Linear,
1179            min_filter: wgpu::FilterMode::Linear,
1180            ..Default::default()
1181        });
1182        (texture, view, sampler)
1183    }
1184}
1185
1186fn validate_viewport(viewport: Viewport) -> Result<(), RenderError> {
1187    if viewport.width == 0 || viewport.height == 0 {
1188        Err(RenderError::ZeroViewport)
1189    } else {
1190        Ok(())
1191    }
1192}
1193
1194fn opaque_unit_ramp() -> OpacityTransferFunction {
1195    let mut tf = OpacityTransferFunction::new();
1196    tf.add_point(0.0, 1.0);
1197    tf.add_point(1.0, 1.0);
1198    tf
1199}
1200
1201fn bake_opacity_lut_bytes(tf: &OpacityTransferFunction, resolution: u32) -> Vec<u8> {
1202    let mut rgba = Vec::with_capacity((resolution * 4) as usize);
1203    for i in 0..resolution {
1204        let t = if resolution <= 1 {
1205            0.0
1206        } else {
1207            f64::from(i) / f64::from(resolution - 1)
1208        };
1209        let opacity = tf.evaluate(t) as f32;
1210        rgba.extend_from_slice(&[opacity, opacity, opacity, 1.0]);
1211    }
1212    bytemuck::cast_slice(&rgba).to_vec()
1213}
1214
1215/// Convert an f32 slice to packed f16 (little-endian) bytes for `Rgba16Float` upload.
1216fn f32_slice_to_f16_bytes(data: &[f32]) -> Vec<u8> {
1217    let mut bytes = Vec::with_capacity(data.len() * 2);
1218    for &val in data {
1219        bytes.extend_from_slice(&f16::from_f32(val).to_le_bytes());
1220    }
1221    bytes
1222}
1223
1224fn combined_clip_planes(params: &VolumeRenderParams) -> ([[f32; 4]; 6], u32) {
1225    let mut planes = params.clip_planes.clone();
1226    if let Some(bounds) = params.cropping_bounds {
1227        planes.extend([
1228            ClipPlane::from_point_and_normal(DVec3::new(bounds.min.x, 0.0, 0.0), DVec3::X),
1229            ClipPlane::from_point_and_normal(DVec3::new(bounds.max.x, 0.0, 0.0), DVec3::NEG_X),
1230            ClipPlane::from_point_and_normal(DVec3::new(0.0, bounds.min.y, 0.0), DVec3::Y),
1231            ClipPlane::from_point_and_normal(DVec3::new(0.0, bounds.max.y, 0.0), DVec3::NEG_Y),
1232            ClipPlane::from_point_and_normal(DVec3::new(0.0, 0.0, bounds.min.z), DVec3::Z),
1233            ClipPlane::from_point_and_normal(DVec3::new(0.0, 0.0, bounds.max.z), DVec3::NEG_Z),
1234        ]);
1235    }
1236
1237    let mut packed = [[0.0f32; 4]; 6];
1238    for (index, plane) in planes.iter().take(6).enumerate() {
1239        let eq = plane.equation;
1240        packed[index] = [eq.x as f32, eq.y as f32, eq.z as f32, eq.w as f32];
1241    }
1242    (packed, planes.len().min(6) as u32)
1243}
1244
1245fn thick_slab_mode_code(mode: ThickSlabMode) -> u32 {
1246    match mode {
1247        ThickSlabMode::Mip => 0,
1248        ThickSlabMode::MinIp => 1,
1249        ThickSlabMode::Mean => 2,
1250        _ => 0,
1251    }
1252}
1253
1254fn mat4_from_direction(direction: DMat3) -> DMat4 {
1255    DMat4::from_cols(
1256        direction.x_axis.extend(0.0),
1257        direction.y_axis.extend(0.0),
1258        direction.z_axis.extend(0.0),
1259        DVec4::W,
1260    )
1261}
1262
1263fn uniform_bgl_entry(binding: u32) -> wgpu::BindGroupLayoutEntry {
1264    wgpu::BindGroupLayoutEntry {
1265        binding,
1266        visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
1267        ty: wgpu::BindingType::Buffer {
1268            ty: wgpu::BufferBindingType::Uniform,
1269            has_dynamic_offset: false,
1270            min_binding_size: None,
1271        },
1272        count: None,
1273    }
1274}
1275
1276fn texture_bgl_entry(
1277    binding: u32,
1278    view_dimension: wgpu::TextureViewDimension,
1279) -> wgpu::BindGroupLayoutEntry {
1280    wgpu::BindGroupLayoutEntry {
1281        binding,
1282        visibility: wgpu::ShaderStages::FRAGMENT,
1283        ty: wgpu::BindingType::Texture {
1284            sample_type: wgpu::TextureSampleType::Float { filterable: true },
1285            view_dimension,
1286            multisampled: false,
1287        },
1288        count: None,
1289    }
1290}
1291
1292fn texture_bgl_entry_2d(binding: u32) -> wgpu::BindGroupLayoutEntry {
1293    texture_bgl_entry(binding, wgpu::TextureViewDimension::D2)
1294}
1295
1296fn sampler_bgl_entry(binding: u32) -> wgpu::BindGroupLayoutEntry {
1297    wgpu::BindGroupLayoutEntry {
1298        binding,
1299        visibility: wgpu::ShaderStages::FRAGMENT,
1300        ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
1301        count: None,
1302    }
1303}
1304
1305fn build_orientation_marker_image(
1306    width: u32,
1307    height: u32,
1308    camera: &Camera,
1309    labels: &OrientationLabels,
1310) -> Vec<u8> {
1311    let mut pixels = vec![0u8; (width as usize) * (height as usize) * 4];
1312    let center = DVec2::new(f64::from(width) * 0.5, f64::from(height) * 0.5);
1313    let radius = f64::from(width.min(height)) * 0.28;
1314    let view = camera.view_matrix();
1315
1316    let axes = [
1317        (DVec3::X, [255, 80, 80, 255], labels.right.as_str()),
1318        (-DVec3::X, [128, 40, 40, 220], labels.left.as_str()),
1319        (DVec3::Y, [80, 255, 80, 255], labels.anterior.as_str()),
1320        (-DVec3::Y, [40, 128, 40, 220], labels.posterior.as_str()),
1321        (DVec3::Z, [80, 160, 255, 255], labels.superior.as_str()),
1322        (-DVec3::Z, [40, 80, 128, 220], labels.inferior.as_str()),
1323    ];
1324
1325    for (axis, color, label) in axes {
1326        let projected = project_axis(view, axis);
1327        if projected.length_squared() < 1e-8 {
1328            continue;
1329        }
1330        let end = center + projected.normalize() * radius;
1331        draw_line(&mut pixels, width, height, center, end, color);
1332        draw_text(
1333            &mut pixels,
1334            width,
1335            height,
1336            end + projected.normalize() * 6.0,
1337            label,
1338            color,
1339        );
1340    }
1341
1342    draw_disc(
1343        &mut pixels,
1344        width,
1345        height,
1346        center,
1347        2.5,
1348        [255, 255, 255, 255],
1349    );
1350    pixels
1351}
1352
1353fn project_axis(view: DMat4, axis: DVec3) -> DVec2 {
1354    let camera_space = view.transform_vector3(axis);
1355    DVec2::new(camera_space.x, -camera_space.y)
1356}
1357
1358fn draw_line(pixels: &mut [u8], width: u32, height: u32, start: DVec2, end: DVec2, color: [u8; 4]) {
1359    let delta = end - start;
1360    let steps = delta.length().ceil().max(1.0) as u32;
1361    for step in 0..=steps {
1362        let t = f64::from(step) / f64::from(steps.max(1));
1363        let point = start + delta * t;
1364        alpha_plot(
1365            pixels,
1366            width,
1367            height,
1368            point.x.round() as i32,
1369            point.y.round() as i32,
1370            color,
1371        );
1372    }
1373}
1374
1375fn draw_disc(
1376    pixels: &mut [u8],
1377    width: u32,
1378    height: u32,
1379    center: DVec2,
1380    radius: f64,
1381    color: [u8; 4],
1382) {
1383    let min_x = (center.x - radius).floor() as i32;
1384    let max_x = (center.x + radius).ceil() as i32;
1385    let min_y = (center.y - radius).floor() as i32;
1386    let max_y = (center.y + radius).ceil() as i32;
1387
1388    for y in min_y..=max_y {
1389        for x in min_x..=max_x {
1390            let dx = f64::from(x) - center.x;
1391            let dy = f64::from(y) - center.y;
1392            if dx * dx + dy * dy <= radius * radius {
1393                alpha_plot(pixels, width, height, x, y, color);
1394            }
1395        }
1396    }
1397}
1398
1399fn draw_text(
1400    pixels: &mut [u8],
1401    width: u32,
1402    height: u32,
1403    position: DVec2,
1404    text: &str,
1405    color: [u8; 4],
1406) {
1407    let mut cursor_x = position.x.round() as i32;
1408    let cursor_y = position.y.round() as i32;
1409    for ch in text.chars() {
1410        draw_char(pixels, width, height, cursor_x, cursor_y, ch, color);
1411        cursor_x += 6;
1412    }
1413}
1414
1415fn draw_char(pixels: &mut [u8], width: u32, height: u32, x: i32, y: i32, ch: char, color: [u8; 4]) {
1416    let glyph = glyph_rows(ch);
1417    for (row_index, row_bits) in glyph.iter().enumerate() {
1418        for col in 0..5 {
1419            if (row_bits >> (4 - col)) & 1 == 1 {
1420                alpha_plot(pixels, width, height, x + col, y + row_index as i32, color);
1421            }
1422        }
1423    }
1424}
1425
1426fn alpha_plot(pixels: &mut [u8], width: u32, height: u32, x: i32, y: i32, color: [u8; 4]) {
1427    if x < 0 || y < 0 || x >= width as i32 || y >= height as i32 {
1428        return;
1429    }
1430    let index = ((y as u32 * width + x as u32) * 4) as usize;
1431    let src_a = f32::from(color[3]) / 255.0;
1432    let dst_a = f32::from(pixels[index + 3]) / 255.0;
1433    let out_a = src_a + dst_a * (1.0 - src_a);
1434    let blend = |src: u8, dst: u8| -> u8 {
1435        if out_a <= f32::EPSILON {
1436            0
1437        } else {
1438            (((f32::from(src) * src_a) + (f32::from(dst) * dst_a * (1.0 - src_a))) / out_a)
1439                .round()
1440                .clamp(0.0, 255.0) as u8
1441        }
1442    };
1443
1444    pixels[index] = blend(color[0], pixels[index]);
1445    pixels[index + 1] = blend(color[1], pixels[index + 1]);
1446    pixels[index + 2] = blend(color[2], pixels[index + 2]);
1447    pixels[index + 3] = (out_a * 255.0).round().clamp(0.0, 255.0) as u8;
1448}
1449
1450fn glyph_rows(ch: char) -> [u8; 7] {
1451    match ch.to_ascii_uppercase() {
1452        'A' => [
1453            0b01110, 0b10001, 0b10001, 0b11111, 0b10001, 0b10001, 0b10001,
1454        ],
1455        'I' => [
1456            0b11111, 0b00100, 0b00100, 0b00100, 0b00100, 0b00100, 0b11111,
1457        ],
1458        'L' => [
1459            0b10000, 0b10000, 0b10000, 0b10000, 0b10000, 0b10000, 0b11111,
1460        ],
1461        'P' => [
1462            0b11110, 0b10001, 0b10001, 0b11110, 0b10000, 0b10000, 0b10000,
1463        ],
1464        'R' => [
1465            0b11110, 0b10001, 0b10001, 0b11110, 0b10100, 0b10010, 0b10001,
1466        ],
1467        'S' => [
1468            0b01111, 0b10000, 0b10000, 0b01110, 0b00001, 0b00001, 0b11110,
1469        ],
1470        ' ' => [0, 0, 0, 0, 0, 0, 0],
1471        _ => [
1472            0b11111, 0b00001, 0b00010, 0b00100, 0b00100, 0b00000, 0b00100,
1473        ],
1474    }
1475}
1476
1477#[cfg(test)]
1478mod tests {
1479    use super::*;
1480
1481    #[test]
1482    fn glyph_table_contains_expected_rows() {
1483        assert_eq!(glyph_rows('R')[0], 0b11110);
1484        assert_eq!(glyph_rows('I')[6], 0b11111);
1485    }
1486
1487    #[test]
1488    fn thick_slab_mode_codes_are_stable() {
1489        assert_eq!(thick_slab_mode_code(ThickSlabMode::Mip), 0);
1490        assert_eq!(thick_slab_mode_code(ThickSlabMode::MinIp), 1);
1491        assert_eq!(thick_slab_mode_code(ThickSlabMode::Mean), 2);
1492    }
1493}
1494
1495#[cfg(all(test, feature = "snapshot-tests"))]
1496mod gpu_smoke_tests {
1497    use super::*;
1498    use std::sync::mpsc;
1499
1500    use glam::{DMat3, DVec3, UVec3};
1501    use volren_core::{Volume, VolumeRenderParams};
1502
1503    fn test_device() -> Option<(wgpu::Device, wgpu::Queue)> {
1504        pollster::block_on(async {
1505            let instance = wgpu::Instance::default();
1506            let adapter = instance
1507                .request_adapter(&wgpu::RequestAdapterOptions {
1508                    power_preference: wgpu::PowerPreference::LowPower,
1509                    compatible_surface: None,
1510                    force_fallback_adapter: false,
1511                })
1512                .await?;
1513            adapter
1514                .request_device(&wgpu::DeviceDescriptor::default(), None)
1515                .await
1516                .ok()
1517        })
1518    }
1519
1520    fn small_volume() -> DynVolume {
1521        let mut data = vec![0u16; 16 * 16 * 16];
1522        data[8 + 8 * 16 + 8 * 16 * 16] = 2048;
1523        Volume::from_data(
1524            data,
1525            UVec3::new(16, 16, 16),
1526            DVec3::ONE,
1527            DVec3::ZERO,
1528            DMat3::IDENTITY,
1529            1,
1530        )
1531        .expect("valid test volume")
1532        .into()
1533    }
1534
1535    fn sphere_volume() -> DynVolume {
1536        let dims = UVec3::new(32, 32, 32);
1537        let center = DVec3::new(15.5, 15.5, 15.5);
1538        let radius = 9.0;
1539        let mut data = vec![0u16; (dims.x * dims.y * dims.z) as usize];
1540
1541        for z in 0..dims.z {
1542            for y in 0..dims.y {
1543                for x in 0..dims.x {
1544                    let index = (z * dims.x * dims.y + y * dims.x + x) as usize;
1545                    let point = DVec3::new(f64::from(x), f64::from(y), f64::from(z));
1546                    data[index] = if (point - center).length() <= radius {
1547                        2048
1548                    } else {
1549                        0
1550                    };
1551                }
1552            }
1553        }
1554
1555        Volume::from_data(data, dims, DVec3::ONE, DVec3::ZERO, DMat3::IDENTITY, 1)
1556            .expect("valid sphere volume")
1557            .into()
1558    }
1559
1560    fn read_texture(
1561        device: &wgpu::Device,
1562        queue: &wgpu::Queue,
1563        texture: &wgpu::Texture,
1564        width: u32,
1565        height: u32,
1566    ) -> Vec<u8> {
1567        let unpadded_bytes_per_row = width * 4;
1568        let padded_bytes_per_row = unpadded_bytes_per_row.div_ceil(256) * 256;
1569        let buffer_size = u64::from(padded_bytes_per_row) * u64::from(height);
1570        let buffer = device.create_buffer(&wgpu::BufferDescriptor {
1571            label: Some("volren_test_readback"),
1572            size: buffer_size,
1573            usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
1574            mapped_at_creation: false,
1575        });
1576
1577        let mut encoder =
1578            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1579        encoder.copy_texture_to_buffer(
1580            texture.as_image_copy(),
1581            wgpu::TexelCopyBufferInfo {
1582                buffer: &buffer,
1583                layout: wgpu::TexelCopyBufferLayout {
1584                    offset: 0,
1585                    bytes_per_row: Some(padded_bytes_per_row),
1586                    rows_per_image: Some(height),
1587                },
1588            },
1589            wgpu::Extent3d {
1590                width,
1591                height,
1592                depth_or_array_layers: 1,
1593            },
1594        );
1595        queue.submit(std::iter::once(encoder.finish()));
1596
1597        let (sender, receiver) = mpsc::channel();
1598        buffer
1599            .slice(..)
1600            .map_async(wgpu::MapMode::Read, move |result| {
1601                let _ = sender.send(result);
1602            });
1603        let _ = device.poll(wgpu::MaintainBase::Wait);
1604        receiver.recv().expect("map callback").expect("map success");
1605
1606        let mapped = buffer.slice(..).get_mapped_range();
1607        let mut pixels = vec![0u8; (unpadded_bytes_per_row * height) as usize];
1608        for row in 0..height as usize {
1609            let src_offset = row * padded_bytes_per_row as usize;
1610            let dst_offset = row * unpadded_bytes_per_row as usize;
1611            pixels[dst_offset..dst_offset + unpadded_bytes_per_row as usize]
1612                .copy_from_slice(&mapped[src_offset..src_offset + unpadded_bytes_per_row as usize]);
1613        }
1614        drop(mapped);
1615        buffer.unmap();
1616        pixels
1617    }
1618
1619    fn checksum(bytes: &[u8]) -> u64 {
1620        bytes.iter().enumerate().fold(0u64, |acc, (index, value)| {
1621            acc.wrapping_add((index as u64 + 1) * u64::from(*value))
1622        })
1623    }
1624
1625    #[test]
1626    #[ignore = "requires a working GPU adapter"]
1627    fn render_volume_smoke_test() {
1628        let Some((device, queue)) = test_device() else {
1629            return;
1630        };
1631        let mut renderer = VolumeRenderer::new(&device, &queue, wgpu::TextureFormat::Rgba8Unorm);
1632        let volume = small_volume();
1633        renderer.set_volume(&volume, true);
1634        renderer
1635            .set_render_params(&VolumeRenderParams::default())
1636            .expect("render params upload");
1637
1638        let camera = Camera::new_perspective(DVec3::new(0.0, 0.0, 50.0), DVec3::ZERO, 30.0);
1639        let texture = renderer.create_render_target(64, 64);
1640        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1641        let mut encoder =
1642            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1643        renderer
1644            .render_volume(
1645                &mut encoder,
1646                &view,
1647                &camera,
1648                &VolumeRenderParams::default(),
1649                Viewport::full(64, 64),
1650            )
1651            .expect("volume render");
1652        queue.submit(std::iter::once(encoder.finish()));
1653    }
1654
1655    #[test]
1656    #[ignore = "requires a working GPU adapter"]
1657    fn render_sphere_snapshot_checksum() {
1658        let Some((device, queue)) = test_device() else {
1659            return;
1660        };
1661        let mut renderer = VolumeRenderer::new(&device, &queue, wgpu::TextureFormat::Rgba8Unorm);
1662        let volume = sphere_volume();
1663        renderer.set_volume(&volume, true);
1664        let params = VolumeRenderParams::default();
1665        renderer
1666            .set_render_params(&params)
1667            .expect("render params upload");
1668
1669        let camera = Camera::new_perspective(DVec3::new(0.0, 0.0, 60.0), DVec3::ZERO, 30.0);
1670        let texture = renderer.create_render_target(64, 64);
1671        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1672        let mut encoder =
1673            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1674        renderer
1675            .render_volume(
1676                &mut encoder,
1677                &view,
1678                &camera,
1679                &params,
1680                Viewport::full(64, 64),
1681            )
1682            .expect("volume render");
1683        queue.submit(std::iter::once(encoder.finish()));
1684
1685        let pixels = read_texture(&device, &queue, &texture, 64, 64);
1686        let image_checksum = checksum(&pixels);
1687        eprintln!("sphere checksum: {image_checksum}");
1688        assert!(image_checksum > 0, "rendered sphere should not be empty");
1689    }
1690
1691    #[test]
1692    #[ignore = "requires a working GPU adapter"]
1693    fn render_slice_and_crosshair_smoke_test() {
1694        let Some((device, queue)) = test_device() else {
1695            return;
1696        };
1697        let mut renderer = VolumeRenderer::new(&device, &queue, wgpu::TextureFormat::Rgba8Unorm);
1698        let volume = small_volume();
1699        renderer.set_volume(&volume, true);
1700        renderer
1701            .set_render_params(&VolumeRenderParams::default())
1702            .expect("render params upload");
1703
1704        let texture = renderer.create_render_target(64, 64);
1705        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1706        let mut encoder =
1707            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1708        renderer
1709            .render_slice(
1710                &mut encoder,
1711                &view,
1712                &SlicePlane::axial(0.0, 32.0),
1713                &WindowLevel::from_scalar_range(0.0, 2048.0),
1714                Viewport::full(64, 64),
1715                None,
1716            )
1717            .expect("slice render");
1718        renderer
1719            .render_crosshair(
1720                &mut encoder,
1721                &view,
1722                Viewport::full(64, 64),
1723                &CrosshairParams::default(),
1724            )
1725            .expect("crosshair render");
1726        queue.submit(std::iter::once(encoder.finish()));
1727    }
1728}