Skip to main content

volren_gpu/
renderer.rs

1//! Main volume renderer: wgpu pipeline creation and render execution.
2
3use std::sync::Arc;
4
5use bytemuck::{Pod, Zeroable};
6use glam::{DMat3, DMat4, DVec2, DVec3, DVec4};
7use volren_core::{
8    camera::Camera,
9    render_params::{BlendMode, ClipPlane, VolumeRenderParams},
10    reslice::{SlicePlane, ThickSlabMode, ThickSlabParams},
11    transfer_function::{OpacityTransferFunction, TransferFunctionLut},
12    volume::{DynVolume, VolumeInfo},
13    window_level::WindowLevel,
14};
15
16use crate::{
17    texture::GpuVolumeTexture,
18    uniforms::{blend_mode as bm, VolumeUniforms},
19};
20
21const VOLUME_SHADER_SRC: &str = concat!(
22    include_str!("shaders/common.wgsl"),
23    "\n",
24    include_str!("shaders/fullscreen_quad.wgsl"),
25    "\n",
26    include_str!("shaders/gradient.wgsl"),
27    "\n",
28    include_str!("shaders/shading.wgsl"),
29    "\n",
30    include_str!("shaders/volume_raycast.wgsl"),
31);
32const RESLICE_SHADER_SRC: &str = include_str!("shaders/reslice.wgsl");
33const CROSSHAIR_SHADER_SRC: &str = include_str!("shaders/crosshair.wgsl");
34const BLIT_SHADER_SRC: &str = include_str!("shaders/blit_rgba.wgsl");
35
36/// Rectangular sub-region of the render target, in pixels.
37#[derive(Debug, Clone, Copy)]
38pub struct Viewport {
39    /// Horizontal offset from the left edge.
40    pub x: u32,
41    /// Vertical offset from the top edge.
42    pub y: u32,
43    /// Width in pixels.
44    pub width: u32,
45    /// Height in pixels.
46    pub height: u32,
47}
48
49impl Viewport {
50    /// Create a viewport covering the full render target.
51    #[must_use]
52    pub fn full(width: u32, height: u32) -> Self {
53        Self {
54            x: 0,
55            y: 0,
56            width,
57            height,
58        }
59    }
60}
61
62/// Errors that can occur during rendering.
63#[derive(Debug, thiserror::Error)]
64pub enum RenderError {
65    /// No volume has been uploaded yet.
66    #[error("no volume data uploaded — call `set_volume()` first")]
67    NoVolume,
68    /// No transfer-function data has been uploaded yet.
69    #[error("no render parameters uploaded — call `set_render_params()` first")]
70    NoTransferFunction,
71    /// Viewport has zero area.
72    #[error("viewport has zero area")]
73    ZeroViewport,
74}
75
76/// Parameters for rendering crosshair overlay lines on a 2D slice viewport.
77#[derive(Debug, Clone)]
78pub struct CrosshairParams {
79    /// Normalised position in `[0, 1] × [0, 1]` on the slice.
80    pub position: [f32; 2],
81    /// Line colour for the horizontal line (RGBA, 0–1).
82    pub horizontal_color: [f32; 4],
83    /// Line colour for the vertical line (RGBA, 0–1).
84    pub vertical_color: [f32; 4],
85    /// Line thickness in pixels.
86    pub thickness: f32,
87}
88
89impl Default for CrosshairParams {
90    fn default() -> Self {
91        Self {
92            position: [0.5, 0.5],
93            horizontal_color: [1.0, 0.0, 0.0, 1.0],
94            vertical_color: [0.0, 1.0, 0.0, 1.0],
95            thickness: 1.0,
96        }
97    }
98}
99
100/// Patient orientation labels for the orientation marker.
101#[derive(Debug, Clone)]
102pub struct OrientationLabels {
103    /// Label for the right direction (+X).
104    pub right: String,
105    /// Label for the left direction (−X).
106    pub left: String,
107    /// Label for the anterior direction (+Y).
108    pub anterior: String,
109    /// Label for the posterior direction (−Y).
110    pub posterior: String,
111    /// Label for the superior direction (+Z).
112    pub superior: String,
113    /// Label for the inferior direction (−Z).
114    pub inferior: String,
115}
116
117impl Default for OrientationLabels {
118    fn default() -> Self {
119        Self {
120            right: "R".into(),
121            left: "L".into(),
122            anterior: "A".into(),
123            posterior: "P".into(),
124            superior: "S".into(),
125            inferior: "I".into(),
126        }
127    }
128}
129
130#[derive(Debug, Clone, Copy)]
131struct VolumeMetadata {
132    world_to_volume: [[f32; 4]; 4],
133    volume_to_world: [[f32; 4]; 4],
134    dimensions: [f32; 3],
135    spacing: [f32; 3],
136    scalar_range: [f32; 2],
137}
138
139impl VolumeMetadata {
140    fn from_volume(volume: &DynVolume) -> Self {
141        let dimensions = volume.dimensions().as_dvec3();
142        let spacing = volume.spacing();
143        let direction = mat4_from_direction(volume.direction());
144        let scale = DVec3::new(
145            (dimensions.x - 1.0).max(1.0) * spacing.x,
146            (dimensions.y - 1.0).max(1.0) * spacing.y,
147            (dimensions.z - 1.0).max(1.0) * spacing.z,
148        );
149        let volume_to_world =
150            DMat4::from_translation(volume.origin()) * direction * DMat4::from_scale(scale);
151        let world_to_volume = volume_to_world.inverse();
152        let (scalar_min, scalar_max) = volume.scalar_range();
153
154        Self {
155            world_to_volume: world_to_volume.as_mat4().to_cols_array_2d(),
156            volume_to_world: volume_to_world.as_mat4().to_cols_array_2d(),
157            dimensions: [
158                dimensions.x as f32,
159                dimensions.y as f32,
160                dimensions.z as f32,
161            ],
162            spacing: [spacing.x as f32, spacing.y as f32, spacing.z as f32],
163            scalar_range: [scalar_min as f32, scalar_max as f32],
164        }
165    }
166}
167
168#[repr(C)]
169#[derive(Debug, Clone, Copy, Pod, Zeroable)]
170struct SliceUniforms {
171    world_to_volume: [[f32; 4]; 4],
172    slice_origin: [f32; 4],
173    slice_right: [f32; 4],
174    slice_up: [f32; 4],
175    slice_normal: [f32; 4],
176    slice_extent: [f32; 4],
177    window_level: [f32; 4],
178    slab_params: [u32; 4],
179}
180
181#[repr(C)]
182#[derive(Debug, Clone, Copy, Pod, Zeroable)]
183struct CrosshairUniforms {
184    position: [f32; 4],
185    horizontal_color: [f32; 4],
186    vertical_color: [f32; 4],
187    viewport: [f32; 4],
188}
189
190/// A fully GPU-resident volume renderer.
191///
192/// The renderer stores the uploaded 3D texture plus the metadata needed for
193/// raycasting and reslicing. Call [`VolumeRenderer::set_render_params`] after
194/// uploading a volume to bake the active transfer functions.
195pub struct VolumeRenderer {
196    device: Arc<wgpu::Device>,
197    queue: Arc<wgpu::Queue>,
198
199    volume_pipeline: wgpu::RenderPipeline,
200    volume_bind_group_layout: wgpu::BindGroupLayout,
201    volume_uniform_buffer: wgpu::Buffer,
202
203    slice_pipeline: wgpu::RenderPipeline,
204    slice_bind_group_layout: wgpu::BindGroupLayout,
205    slice_uniform_buffer: wgpu::Buffer,
206
207    crosshair_pipeline: wgpu::RenderPipeline,
208    crosshair_uniform_buffer: wgpu::Buffer,
209    crosshair_bind_group: wgpu::BindGroup,
210
211    blit_pipeline: wgpu::RenderPipeline,
212    blit_bind_group_layout: wgpu::BindGroupLayout,
213    blit_sampler: wgpu::Sampler,
214
215    lut_texture: wgpu::Texture,
216    lut_view: wgpu::TextureView,
217    lut_sampler: wgpu::Sampler,
218    gradient_lut_texture: wgpu::Texture,
219    gradient_lut_view: wgpu::TextureView,
220    gradient_lut_sampler: wgpu::Sampler,
221
222    volume_texture: Option<GpuVolumeTexture>,
223    volume_bind_group: Option<wgpu::BindGroup>,
224    slice_bind_group: Option<wgpu::BindGroup>,
225    volume_metadata: Option<VolumeMetadata>,
226    has_render_params: bool,
227
228    output_format: wgpu::TextureFormat,
229    viewport_size: (u32, u32),
230}
231
232impl VolumeRenderer {
233    /// Create a new renderer for the given device and output format.
234    #[must_use]
235    pub fn new(
236        device: &wgpu::Device,
237        queue: &wgpu::Queue,
238        output_format: wgpu::TextureFormat,
239    ) -> Self {
240        Self::from_arc(
241            Arc::new(device.clone()),
242            Arc::new(queue.clone()),
243            output_format,
244        )
245    }
246
247    /// Create a renderer from `Arc`-wrapped device and queue.
248    #[must_use]
249    pub fn from_arc(
250        device: Arc<wgpu::Device>,
251        queue: Arc<wgpu::Queue>,
252        output_format: wgpu::TextureFormat,
253    ) -> Self {
254        let volume_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
255            label: Some("volren_volume_shader"),
256            source: wgpu::ShaderSource::Wgsl(VOLUME_SHADER_SRC.into()),
257        });
258        let slice_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
259            label: Some("volren_reslice_shader"),
260            source: wgpu::ShaderSource::Wgsl(RESLICE_SHADER_SRC.into()),
261        });
262        let crosshair_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
263            label: Some("volren_crosshair_shader"),
264            source: wgpu::ShaderSource::Wgsl(CROSSHAIR_SHADER_SRC.into()),
265        });
266        let blit_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
267            label: Some("volren_blit_shader"),
268            source: wgpu::ShaderSource::Wgsl(BLIT_SHADER_SRC.into()),
269        });
270
271        let volume_bind_group_layout = Self::create_volume_bind_group_layout(&device);
272        let slice_bind_group_layout = Self::create_slice_bind_group_layout(&device);
273        let crosshair_bind_group_layout = Self::create_crosshair_bind_group_layout(&device);
274        let blit_bind_group_layout = Self::create_blit_bind_group_layout(&device);
275
276        let volume_pipeline = Self::create_pipeline(
277            &device,
278            &volume_shader,
279            &volume_bind_group_layout,
280            output_format,
281            Some(wgpu::BlendState::ALPHA_BLENDING),
282        );
283        let slice_pipeline = Self::create_pipeline(
284            &device,
285            &slice_shader,
286            &slice_bind_group_layout,
287            output_format,
288            Some(wgpu::BlendState::ALPHA_BLENDING),
289        );
290        let crosshair_pipeline = Self::create_pipeline(
291            &device,
292            &crosshair_shader,
293            &crosshair_bind_group_layout,
294            output_format,
295            Some(wgpu::BlendState::ALPHA_BLENDING),
296        );
297        let blit_pipeline = Self::create_pipeline(
298            &device,
299            &blit_shader,
300            &blit_bind_group_layout,
301            output_format,
302            Some(wgpu::BlendState::ALPHA_BLENDING),
303        );
304
305        let volume_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
306            label: Some("volren_volume_uniforms"),
307            size: std::mem::size_of::<VolumeUniforms>() as u64,
308            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
309            mapped_at_creation: false,
310        });
311        let slice_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
312            label: Some("volren_slice_uniforms"),
313            size: std::mem::size_of::<SliceUniforms>() as u64,
314            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
315            mapped_at_creation: false,
316        });
317        let crosshair_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
318            label: Some("volren_crosshair_uniforms"),
319            size: std::mem::size_of::<CrosshairUniforms>() as u64,
320            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
321            mapped_at_creation: false,
322        });
323
324        let crosshair_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
325            label: Some("volren_crosshair_bind_group"),
326            layout: &crosshair_bind_group_layout,
327            entries: &[wgpu::BindGroupEntry {
328                binding: 0,
329                resource: crosshair_uniform_buffer.as_entire_binding(),
330            }],
331        });
332
333        let (lut_texture, lut_view, lut_sampler) = Self::create_lut_texture(&device, 4096);
334        let (gradient_lut_texture, gradient_lut_view, gradient_lut_sampler) =
335            Self::create_lut_texture(&device, 1024);
336        let blit_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
337            label: Some("volren_blit_sampler"),
338            address_mode_u: wgpu::AddressMode::ClampToEdge,
339            address_mode_v: wgpu::AddressMode::ClampToEdge,
340            mag_filter: wgpu::FilterMode::Linear,
341            min_filter: wgpu::FilterMode::Linear,
342            ..Default::default()
343        });
344
345        Self {
346            device,
347            queue,
348            volume_pipeline,
349            volume_bind_group_layout,
350            volume_uniform_buffer,
351            slice_pipeline,
352            slice_bind_group_layout,
353            slice_uniform_buffer,
354            crosshair_pipeline,
355            crosshair_uniform_buffer,
356            crosshair_bind_group,
357            blit_pipeline,
358            blit_bind_group_layout,
359            blit_sampler,
360            lut_texture,
361            lut_view,
362            lut_sampler,
363            gradient_lut_texture,
364            gradient_lut_view,
365            gradient_lut_sampler,
366            volume_texture: None,
367            volume_bind_group: None,
368            slice_bind_group: None,
369            volume_metadata: None,
370            has_render_params: false,
371            output_format,
372            viewport_size: (0, 0),
373        }
374    }
375
376    /// The texture format this renderer outputs into.
377    #[must_use]
378    pub fn output_format(&self) -> wgpu::TextureFormat {
379        self.output_format
380    }
381
382    /// Upload (or replace) volume data as a 3D GPU texture.
383    pub fn set_volume(&mut self, volume: &DynVolume, linear_interpolation: bool) {
384        self.volume_texture = Some(GpuVolumeTexture::upload(
385            &self.device,
386            &self.queue,
387            volume,
388            linear_interpolation,
389        ));
390        self.volume_metadata = Some(VolumeMetadata::from_volume(volume));
391        self.rebuild_bind_groups();
392    }
393
394    /// Upload a baked transfer-function LUT to the GPU.
395    pub fn set_transfer_function(&mut self, lut: &TransferFunctionLut) {
396        let (texture, view, sampler) = Self::create_lut_texture(&self.device, lut.lut_size());
397        self.queue.write_texture(
398            texture.as_image_copy(),
399            lut.as_bytes(),
400            wgpu::TexelCopyBufferLayout {
401                offset: 0,
402                bytes_per_row: Some(lut.lut_size() * 4 * 4),
403                rows_per_image: None,
404            },
405            wgpu::Extent3d {
406                width: lut.lut_size(),
407                height: 1,
408                depth_or_array_layers: 1,
409            },
410        );
411        self.lut_texture = texture;
412        self.lut_view = view;
413        self.lut_sampler = sampler;
414        self.has_render_params = true;
415        self.rebuild_bind_groups();
416    }
417
418    /// Bake and upload transfer functions from the current render parameters.
419    ///
420    /// # Errors
421    /// Returns [`RenderError::NoVolume`] if no volume metadata has been uploaded yet.
422    pub fn set_render_params(&mut self, params: &VolumeRenderParams) -> Result<(), RenderError> {
423        let metadata = self.volume_metadata.ok_or(RenderError::NoVolume)?;
424        let lut = TransferFunctionLut::bake(
425            &params.color_tf,
426            &params.opacity_tf,
427            f64::from(metadata.scalar_range[0]),
428            f64::from(metadata.scalar_range[1]),
429            4096,
430        );
431        self.set_transfer_function(&lut);
432        let gradient_tf = params
433            .gradient_opacity_tf
434            .clone()
435            .unwrap_or_else(opaque_unit_ramp);
436        self.upload_gradient_lut(&gradient_tf);
437        Ok(())
438    }
439
440    /// Handle viewport resize.
441    pub fn resize(&mut self, width: u32, height: u32) {
442        self.viewport_size = (width, height);
443    }
444
445    /// Create an off-screen render target texture.
446    #[must_use]
447    pub fn create_render_target(&self, width: u32, height: u32) -> wgpu::Texture {
448        self.device.create_texture(&wgpu::TextureDescriptor {
449            label: Some("volren_offscreen_target"),
450            size: wgpu::Extent3d {
451                width,
452                height,
453                depth_or_array_layers: 1,
454            },
455            mip_level_count: 1,
456            sample_count: 1,
457            dimension: wgpu::TextureDimension::D2,
458            format: self.output_format,
459            usage: wgpu::TextureUsages::RENDER_ATTACHMENT
460                | wgpu::TextureUsages::TEXTURE_BINDING
461                | wgpu::TextureUsages::COPY_SRC,
462            view_formats: &[],
463        })
464    }
465
466    /// Render the volume into the given color attachment.
467    ///
468    /// The caller owns the command encoder and submits it.
469    ///
470    /// # Errors
471    /// Returns [`RenderError`] if prerequisites are missing or the viewport is invalid.
472    pub fn render_volume(
473        &self,
474        encoder: &mut wgpu::CommandEncoder,
475        target: &wgpu::TextureView,
476        camera: &Camera,
477        params: &VolumeRenderParams,
478        viewport: Viewport,
479    ) -> Result<(), RenderError> {
480        let metadata = self.volume_metadata.ok_or(RenderError::NoVolume)?;
481        let bind_group = self
482            .volume_bind_group
483            .as_ref()
484            .ok_or(RenderError::NoVolume)?;
485        validate_viewport(viewport)?;
486        if !self.has_render_params {
487            return Err(RenderError::NoTransferFunction);
488        }
489
490        let uniforms = self.build_uniforms(camera, metadata, params, viewport);
491        self.queue.write_buffer(
492            &self.volume_uniform_buffer,
493            0,
494            bytemuck::bytes_of(&uniforms),
495        );
496
497        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
498            label: Some("volren_volume_pass"),
499            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
500                view: target,
501                resolve_target: None,
502                ops: wgpu::Operations {
503                    load: wgpu::LoadOp::Load,
504                    store: wgpu::StoreOp::Store,
505                },
506            })],
507            depth_stencil_attachment: None,
508            timestamp_writes: None,
509            occlusion_query_set: None,
510        });
511        pass.set_pipeline(&self.volume_pipeline);
512        pass.set_bind_group(0, bind_group, &[]);
513        pass.set_viewport(
514            viewport.x as f32,
515            viewport.y as f32,
516            viewport.width as f32,
517            viewport.height as f32,
518            0.0,
519            1.0,
520        );
521        pass.draw(0..6, 0..1);
522        Ok(())
523    }
524
525    /// Render the volume into a newly-created off-screen texture.
526    ///
527    /// # Errors
528    /// Propagates the same errors as [`Self::render_volume`].
529    pub fn render_volume_to_texture(
530        &self,
531        encoder: &mut wgpu::CommandEncoder,
532        camera: &Camera,
533        params: &VolumeRenderParams,
534        width: u32,
535        height: u32,
536    ) -> Result<wgpu::Texture, RenderError> {
537        let texture = self.create_render_target(width, height);
538        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
539        self.render_volume(
540            encoder,
541            &view,
542            camera,
543            params,
544            Viewport::full(width, height),
545        )?;
546        Ok(texture)
547    }
548
549    /// Render the volume at reduced resolution and upscale the result into `target`.
550    ///
551    /// This is intended for interactive manipulation where responsiveness matters
552    /// more than final image quality. Passing `1` disables downsampling.
553    ///
554    /// # Errors
555    /// Propagates the same errors as [`Self::render_volume`].
556    pub fn render_volume_interactive(
557        &self,
558        encoder: &mut wgpu::CommandEncoder,
559        target: &wgpu::TextureView,
560        camera: &Camera,
561        params: &VolumeRenderParams,
562        viewport: Viewport,
563        downsample_factor: u32,
564    ) -> Result<(), RenderError> {
565        validate_viewport(viewport)?;
566        let factor = downsample_factor.max(1);
567        if factor == 1 {
568            return self.render_volume(encoder, target, camera, params, viewport);
569        }
570
571        let lod_width = (viewport.width / factor).max(1);
572        let lod_height = (viewport.height / factor).max(1);
573        let texture = self.create_render_target(lod_width, lod_height);
574        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
575        let mut lod_params = params.clone();
576        lod_params.step_size_factor *= factor as f32;
577
578        self.render_volume(
579            encoder,
580            &view,
581            camera,
582            &lod_params,
583            Viewport::full(lod_width, lod_height),
584        )?;
585        self.blit_texture_view(encoder, target, viewport, &view);
586        Ok(())
587    }
588
589    /// Render a 2D reslice (MPR slice) into the given color attachment.
590    ///
591    /// # Errors
592    /// Returns [`RenderError::NoVolume`] when no volume has been uploaded.
593    pub fn render_slice(
594        &self,
595        encoder: &mut wgpu::CommandEncoder,
596        target: &wgpu::TextureView,
597        slice_plane: &SlicePlane,
598        window_level: &WindowLevel,
599        viewport: Viewport,
600        thick_slab: Option<&ThickSlabParams>,
601    ) -> Result<(), RenderError> {
602        let metadata = self.volume_metadata.ok_or(RenderError::NoVolume)?;
603        let bind_group = self
604            .slice_bind_group
605            .as_ref()
606            .ok_or(RenderError::NoVolume)?;
607        validate_viewport(viewport)?;
608
609        let uniforms = self.build_slice_uniforms(metadata, slice_plane, window_level, thick_slab);
610        self.queue
611            .write_buffer(&self.slice_uniform_buffer, 0, bytemuck::bytes_of(&uniforms));
612
613        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
614            label: Some("volren_slice_pass"),
615            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
616                view: target,
617                resolve_target: None,
618                ops: wgpu::Operations {
619                    load: wgpu::LoadOp::Load,
620                    store: wgpu::StoreOp::Store,
621                },
622            })],
623            depth_stencil_attachment: None,
624            timestamp_writes: None,
625            occlusion_query_set: None,
626        });
627        pass.set_pipeline(&self.slice_pipeline);
628        pass.set_bind_group(0, bind_group, &[]);
629        pass.set_viewport(
630            viewport.x as f32,
631            viewport.y as f32,
632            viewport.width as f32,
633            viewport.height as f32,
634            0.0,
635            1.0,
636        );
637        pass.draw(0..6, 0..1);
638        Ok(())
639    }
640
641    /// Render a slice into a newly-created off-screen texture.
642    ///
643    /// # Errors
644    /// Propagates the same errors as [`Self::render_slice`].
645    pub fn render_slice_to_texture(
646        &self,
647        encoder: &mut wgpu::CommandEncoder,
648        slice_plane: &SlicePlane,
649        window_level: &WindowLevel,
650        width: u32,
651        height: u32,
652        thick_slab: Option<&ThickSlabParams>,
653    ) -> Result<wgpu::Texture, RenderError> {
654        let texture = self.create_render_target(width, height);
655        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
656        self.render_slice(
657            encoder,
658            &view,
659            slice_plane,
660            window_level,
661            Viewport::full(width, height),
662            thick_slab,
663        )?;
664        Ok(texture)
665    }
666
667    /// Render crosshair overlay lines on a slice viewport.
668    ///
669    /// # Errors
670    /// Returns [`RenderError::ZeroViewport`] for an empty viewport.
671    pub fn render_crosshair(
672        &self,
673        encoder: &mut wgpu::CommandEncoder,
674        target: &wgpu::TextureView,
675        viewport: Viewport,
676        crosshair: &CrosshairParams,
677    ) -> Result<(), RenderError> {
678        validate_viewport(viewport)?;
679        let uniforms = CrosshairUniforms {
680            position: [
681                crosshair.position[0],
682                crosshair.position[1],
683                crosshair.thickness,
684                0.0,
685            ],
686            horizontal_color: crosshair.horizontal_color,
687            vertical_color: crosshair.vertical_color,
688            viewport: [viewport.width as f32, viewport.height as f32, 0.0, 0.0],
689        };
690        self.queue.write_buffer(
691            &self.crosshair_uniform_buffer,
692            0,
693            bytemuck::bytes_of(&uniforms),
694        );
695
696        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
697            label: Some("volren_crosshair_pass"),
698            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
699                view: target,
700                resolve_target: None,
701                ops: wgpu::Operations {
702                    load: wgpu::LoadOp::Load,
703                    store: wgpu::StoreOp::Store,
704                },
705            })],
706            depth_stencil_attachment: None,
707            timestamp_writes: None,
708            occlusion_query_set: None,
709        });
710        pass.set_pipeline(&self.crosshair_pipeline);
711        pass.set_bind_group(0, &self.crosshair_bind_group, &[]);
712        pass.set_viewport(
713            viewport.x as f32,
714            viewport.y as f32,
715            viewport.width as f32,
716            viewport.height as f32,
717            0.0,
718            1.0,
719        );
720        pass.draw(0..6, 0..1);
721        Ok(())
722    }
723
724    /// Render an orientation marker in the given viewport.
725    ///
726    /// The marker is generated on the CPU as a small RGBA image, then composited
727    /// over the target with a lightweight textured-quad pass.
728    ///
729    /// # Errors
730    /// Returns [`RenderError::ZeroViewport`] for an empty viewport.
731    pub fn render_orientation_marker(
732        &self,
733        encoder: &mut wgpu::CommandEncoder,
734        target: &wgpu::TextureView,
735        camera: &Camera,
736        viewport: Viewport,
737        labels: &OrientationLabels,
738    ) -> Result<(), RenderError> {
739        validate_viewport(viewport)?;
740        let width = viewport.width.max(1);
741        let height = viewport.height.max(1);
742        let image = build_orientation_marker_image(width, height, camera, labels);
743        self.blit_rgba8(encoder, target, viewport, width, height, &image);
744        Ok(())
745    }
746
747    fn build_uniforms(
748        &self,
749        camera: &Camera,
750        metadata: VolumeMetadata,
751        params: &VolumeRenderParams,
752        viewport: Viewport,
753    ) -> VolumeUniforms {
754        let aspect = f64::from(viewport.width) / f64::from(viewport.height.max(1));
755        let view = camera.view_matrix();
756        let proj = camera.projection_matrix(aspect);
757        let mvp = (proj * view).as_mat4();
758        let inv_mvp = (proj * view).inverse().as_mat4();
759
760        let blend_mode = match params.blend_mode {
761            BlendMode::Composite => bm::COMPOSITE,
762            BlendMode::MaximumIntensity => bm::MAXIMUM_INTENSITY,
763            BlendMode::MinimumIntensity => bm::MINIMUM_INTENSITY,
764            BlendMode::AverageIntensity => bm::AVERAGE_INTENSITY,
765            BlendMode::Additive => bm::ADDITIVE,
766            BlendMode::Isosurface { .. } => bm::ISOSURFACE,
767            _ => bm::COMPOSITE,
768        };
769        let (window_center, window_width) = params.window_level.map_or_else(
770            || {
771                let wl = WindowLevel::from_scalar_range(
772                    f64::from(metadata.scalar_range[0]),
773                    f64::from(metadata.scalar_range[1]),
774                );
775                (wl.center as f32, wl.width as f32)
776            },
777            |wl| (wl.center as f32, wl.width as f32),
778        );
779        let (shading_enabled, ambient, diffuse, specular, specular_power) =
780            if let Some(shading) = params.shading {
781                (
782                    1u32,
783                    shading.ambient,
784                    shading.diffuse,
785                    shading.specular,
786                    shading.specular_power,
787                )
788            } else {
789                (0u32, 0.0, 0.0, 0.0, 0.0)
790            };
791        let (clip_planes, num_clip_planes) = combined_clip_planes(params);
792        let iso_value = match params.blend_mode {
793            BlendMode::Isosurface { iso_value } => iso_value as f32,
794            _ => 0.0,
795        };
796        let camera_position = camera.position().as_vec3();
797
798        VolumeUniforms {
799            mvp: mvp.to_cols_array_2d(),
800            inv_mvp: inv_mvp.to_cols_array_2d(),
801            world_to_volume: metadata.world_to_volume,
802            volume_to_world: metadata.volume_to_world,
803            dimensions: [
804                metadata.dimensions[0],
805                metadata.dimensions[1],
806                metadata.dimensions[2],
807                0.0,
808            ],
809            spacing: [
810                metadata.spacing[0],
811                metadata.spacing[1],
812                metadata.spacing[2],
813                0.0,
814            ],
815            scalar_range: [
816                metadata.scalar_range[0],
817                metadata.scalar_range[1],
818                iso_value,
819                0.0,
820            ],
821            step_size: params.step_size_factor.max(1e-3),
822            opacity_correction: 1.0 / params.step_size_factor.max(1e-3),
823            blend_mode,
824            shading_enabled,
825            ambient,
826            diffuse,
827            specular,
828            specular_power,
829            light_position: [camera_position.x, camera_position.y, camera_position.z, 0.0],
830            camera_position: [camera_position.x, camera_position.y, camera_position.z, 0.0],
831            window_center,
832            window_width,
833            num_clip_planes,
834            _pad0: 0,
835            clip_planes,
836            background: params.background,
837        }
838    }
839
840    fn build_slice_uniforms(
841        &self,
842        metadata: VolumeMetadata,
843        slice_plane: &SlicePlane,
844        window_level: &WindowLevel,
845        thick_slab: Option<&ThickSlabParams>,
846    ) -> SliceUniforms {
847        let slab_mode = thick_slab.map_or(ThickSlabMode::Mip, |params| params.mode);
848        let (half_thickness, num_samples) = thick_slab.map_or((0.0f32, 1u32), |params| {
849            (params.half_thickness as f32, params.num_samples.max(1))
850        });
851
852        SliceUniforms {
853            world_to_volume: metadata.world_to_volume,
854            slice_origin: [
855                slice_plane.origin.x as f32,
856                slice_plane.origin.y as f32,
857                slice_plane.origin.z as f32,
858                0.0,
859            ],
860            slice_right: [
861                slice_plane.right.x as f32,
862                slice_plane.right.y as f32,
863                slice_plane.right.z as f32,
864                0.0,
865            ],
866            slice_up: [
867                slice_plane.up.x as f32,
868                slice_plane.up.y as f32,
869                slice_plane.up.z as f32,
870                0.0,
871            ],
872            slice_normal: [
873                slice_plane.normal().x as f32,
874                slice_plane.normal().y as f32,
875                slice_plane.normal().z as f32,
876                0.0,
877            ],
878            slice_extent: [
879                slice_plane.width as f32,
880                slice_plane.height as f32,
881                half_thickness,
882                0.0,
883            ],
884            window_level: [
885                window_level.center as f32,
886                window_level.width as f32,
887                0.0,
888                0.0,
889            ],
890            slab_params: [thick_slab_mode_code(slab_mode), num_samples, 0, 0],
891        }
892    }
893
894    fn upload_gradient_lut(&mut self, tf: &OpacityTransferFunction) {
895        let resolution = 1024u32;
896        let bytes = bake_opacity_lut_bytes(tf, resolution);
897        let (texture, view, sampler) = Self::create_lut_texture(&self.device, resolution);
898        self.queue.write_texture(
899            texture.as_image_copy(),
900            &bytes,
901            wgpu::TexelCopyBufferLayout {
902                offset: 0,
903                bytes_per_row: Some(resolution * 4 * 4),
904                rows_per_image: None,
905            },
906            wgpu::Extent3d {
907                width: resolution,
908                height: 1,
909                depth_or_array_layers: 1,
910            },
911        );
912        self.gradient_lut_texture = texture;
913        self.gradient_lut_view = view;
914        self.gradient_lut_sampler = sampler;
915        self.rebuild_bind_groups();
916    }
917
918    fn rebuild_bind_groups(&mut self) {
919        let Some(volume_texture) = self.volume_texture.as_ref() else {
920            return;
921        };
922
923        self.volume_bind_group = Some(self.device.create_bind_group(&wgpu::BindGroupDescriptor {
924            label: Some("volren_volume_bind_group"),
925            layout: &self.volume_bind_group_layout,
926            entries: &[
927                wgpu::BindGroupEntry {
928                    binding: 0,
929                    resource: self.volume_uniform_buffer.as_entire_binding(),
930                },
931                wgpu::BindGroupEntry {
932                    binding: 1,
933                    resource: wgpu::BindingResource::TextureView(&volume_texture.view),
934                },
935                wgpu::BindGroupEntry {
936                    binding: 2,
937                    resource: wgpu::BindingResource::Sampler(&volume_texture.sampler),
938                },
939                wgpu::BindGroupEntry {
940                    binding: 3,
941                    resource: wgpu::BindingResource::TextureView(&self.lut_view),
942                },
943                wgpu::BindGroupEntry {
944                    binding: 4,
945                    resource: wgpu::BindingResource::Sampler(&self.lut_sampler),
946                },
947                wgpu::BindGroupEntry {
948                    binding: 5,
949                    resource: wgpu::BindingResource::TextureView(&self.gradient_lut_view),
950                },
951                wgpu::BindGroupEntry {
952                    binding: 6,
953                    resource: wgpu::BindingResource::Sampler(&self.gradient_lut_sampler),
954                },
955            ],
956        }));
957
958        self.slice_bind_group = Some(self.device.create_bind_group(&wgpu::BindGroupDescriptor {
959            label: Some("volren_slice_bind_group"),
960            layout: &self.slice_bind_group_layout,
961            entries: &[
962                wgpu::BindGroupEntry {
963                    binding: 0,
964                    resource: self.slice_uniform_buffer.as_entire_binding(),
965                },
966                wgpu::BindGroupEntry {
967                    binding: 1,
968                    resource: wgpu::BindingResource::TextureView(&volume_texture.view),
969                },
970                wgpu::BindGroupEntry {
971                    binding: 2,
972                    resource: wgpu::BindingResource::Sampler(&volume_texture.sampler),
973                },
974            ],
975        }));
976    }
977
978    fn blit_rgba8(
979        &self,
980        encoder: &mut wgpu::CommandEncoder,
981        target: &wgpu::TextureView,
982        viewport: Viewport,
983        width: u32,
984        height: u32,
985        rgba: &[u8],
986    ) {
987        let texture = self.device.create_texture(&wgpu::TextureDescriptor {
988            label: Some("volren_blit_texture"),
989            size: wgpu::Extent3d {
990                width,
991                height,
992                depth_or_array_layers: 1,
993            },
994            mip_level_count: 1,
995            sample_count: 1,
996            dimension: wgpu::TextureDimension::D2,
997            format: wgpu::TextureFormat::Rgba8Unorm,
998            usage: wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::TEXTURE_BINDING,
999            view_formats: &[],
1000        });
1001        self.queue.write_texture(
1002            texture.as_image_copy(),
1003            rgba,
1004            wgpu::TexelCopyBufferLayout {
1005                offset: 0,
1006                bytes_per_row: Some(width * 4),
1007                rows_per_image: Some(height),
1008            },
1009            wgpu::Extent3d {
1010                width,
1011                height,
1012                depth_or_array_layers: 1,
1013            },
1014        );
1015        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1016        self.blit_texture_view(encoder, target, viewport, &view);
1017    }
1018
1019    fn blit_texture_view(
1020        &self,
1021        encoder: &mut wgpu::CommandEncoder,
1022        target: &wgpu::TextureView,
1023        viewport: Viewport,
1024        source_view: &wgpu::TextureView,
1025    ) {
1026        let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1027            label: Some("volren_blit_bind_group"),
1028            layout: &self.blit_bind_group_layout,
1029            entries: &[
1030                wgpu::BindGroupEntry {
1031                    binding: 0,
1032                    resource: wgpu::BindingResource::TextureView(source_view),
1033                },
1034                wgpu::BindGroupEntry {
1035                    binding: 1,
1036                    resource: wgpu::BindingResource::Sampler(&self.blit_sampler),
1037                },
1038            ],
1039        });
1040
1041        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1042            label: Some("volren_blit_pass"),
1043            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1044                view: target,
1045                resolve_target: None,
1046                ops: wgpu::Operations {
1047                    load: wgpu::LoadOp::Load,
1048                    store: wgpu::StoreOp::Store,
1049                },
1050            })],
1051            depth_stencil_attachment: None,
1052            timestamp_writes: None,
1053            occlusion_query_set: None,
1054        });
1055        pass.set_pipeline(&self.blit_pipeline);
1056        pass.set_bind_group(0, &bind_group, &[]);
1057        pass.set_viewport(
1058            viewport.x as f32,
1059            viewport.y as f32,
1060            viewport.width as f32,
1061            viewport.height as f32,
1062            0.0,
1063            1.0,
1064        );
1065        pass.draw(0..6, 0..1);
1066    }
1067
1068    fn create_volume_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1069        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1070            label: Some("volren_volume_bgl"),
1071            entries: &[
1072                uniform_bgl_entry(0),
1073                texture_bgl_entry(1, wgpu::TextureViewDimension::D3),
1074                sampler_bgl_entry(2),
1075                texture_bgl_entry_nonfilterable(3, wgpu::TextureViewDimension::D1),
1076                sampler_bgl_entry_nonfiltering(4),
1077                texture_bgl_entry_nonfilterable(5, wgpu::TextureViewDimension::D1),
1078                sampler_bgl_entry_nonfiltering(6),
1079            ],
1080        })
1081    }
1082
1083    fn create_slice_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1084        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1085            label: Some("volren_slice_bgl"),
1086            entries: &[
1087                uniform_bgl_entry(0),
1088                texture_bgl_entry(1, wgpu::TextureViewDimension::D3),
1089                sampler_bgl_entry(2),
1090            ],
1091        })
1092    }
1093
1094    fn create_crosshair_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1095        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1096            label: Some("volren_crosshair_bgl"),
1097            entries: &[uniform_bgl_entry(0)],
1098        })
1099    }
1100
1101    fn create_blit_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1102        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1103            label: Some("volren_blit_bgl"),
1104            entries: &[texture_bgl_entry_2d(0), sampler_bgl_entry(1)],
1105        })
1106    }
1107
1108    fn create_pipeline(
1109        device: &wgpu::Device,
1110        shader: &wgpu::ShaderModule,
1111        bind_group_layout: &wgpu::BindGroupLayout,
1112        output_format: wgpu::TextureFormat,
1113        blend: Option<wgpu::BlendState>,
1114    ) -> wgpu::RenderPipeline {
1115        let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
1116            label: Some("volren_pipeline_layout"),
1117            bind_group_layouts: &[bind_group_layout],
1118            push_constant_ranges: &[],
1119        });
1120
1121        device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
1122            label: Some("volren_pipeline"),
1123            layout: Some(&layout),
1124            vertex: wgpu::VertexState {
1125                module: shader,
1126                entry_point: Some("vs_main"),
1127                buffers: &[],
1128                compilation_options: Default::default(),
1129            },
1130            fragment: Some(wgpu::FragmentState {
1131                module: shader,
1132                entry_point: Some("fs_main"),
1133                targets: &[Some(wgpu::ColorTargetState {
1134                    format: output_format,
1135                    blend,
1136                    write_mask: wgpu::ColorWrites::ALL,
1137                })],
1138                compilation_options: Default::default(),
1139            }),
1140            primitive: wgpu::PrimitiveState {
1141                topology: wgpu::PrimitiveTopology::TriangleList,
1142                ..Default::default()
1143            },
1144            depth_stencil: None,
1145            multisample: wgpu::MultisampleState::default(),
1146            multiview: None,
1147            cache: None,
1148        })
1149    }
1150
1151    fn create_lut_texture(
1152        device: &wgpu::Device,
1153        size: u32,
1154    ) -> (wgpu::Texture, wgpu::TextureView, wgpu::Sampler) {
1155        let texture = device.create_texture(&wgpu::TextureDescriptor {
1156            label: Some("volren_lut"),
1157            size: wgpu::Extent3d {
1158                width: size,
1159                height: 1,
1160                depth_or_array_layers: 1,
1161            },
1162            mip_level_count: 1,
1163            sample_count: 1,
1164            dimension: wgpu::TextureDimension::D1,
1165            format: wgpu::TextureFormat::Rgba32Float,
1166            usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
1167            view_formats: &[],
1168        });
1169        let view = texture.create_view(&wgpu::TextureViewDescriptor {
1170            dimension: Some(wgpu::TextureViewDimension::D1),
1171            ..Default::default()
1172        });
1173        let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
1174            label: Some("volren_lut_sampler"),
1175            address_mode_u: wgpu::AddressMode::ClampToEdge,
1176            mag_filter: wgpu::FilterMode::Nearest,
1177            min_filter: wgpu::FilterMode::Nearest,
1178            ..Default::default()
1179        });
1180        (texture, view, sampler)
1181    }
1182}
1183
1184fn validate_viewport(viewport: Viewport) -> Result<(), RenderError> {
1185    if viewport.width == 0 || viewport.height == 0 {
1186        Err(RenderError::ZeroViewport)
1187    } else {
1188        Ok(())
1189    }
1190}
1191
1192fn opaque_unit_ramp() -> OpacityTransferFunction {
1193    let mut tf = OpacityTransferFunction::new();
1194    tf.add_point(0.0, 1.0);
1195    tf.add_point(1.0, 1.0);
1196    tf
1197}
1198
1199fn bake_opacity_lut_bytes(tf: &OpacityTransferFunction, resolution: u32) -> Vec<u8> {
1200    let mut rgba = Vec::with_capacity((resolution * 4) as usize);
1201    for i in 0..resolution {
1202        let t = if resolution <= 1 {
1203            0.0
1204        } else {
1205            f64::from(i) / f64::from(resolution - 1)
1206        };
1207        let opacity = tf.evaluate(t) as f32;
1208        rgba.extend_from_slice(&[opacity, opacity, opacity, 1.0]);
1209    }
1210    bytemuck::cast_slice(&rgba).to_vec()
1211}
1212
1213fn combined_clip_planes(params: &VolumeRenderParams) -> ([[f32; 4]; 6], u32) {
1214    let mut planes = params.clip_planes.clone();
1215    if let Some(bounds) = params.cropping_bounds {
1216        planes.extend([
1217            ClipPlane::from_point_and_normal(DVec3::new(bounds.min.x, 0.0, 0.0), DVec3::X),
1218            ClipPlane::from_point_and_normal(DVec3::new(bounds.max.x, 0.0, 0.0), DVec3::NEG_X),
1219            ClipPlane::from_point_and_normal(DVec3::new(0.0, bounds.min.y, 0.0), DVec3::Y),
1220            ClipPlane::from_point_and_normal(DVec3::new(0.0, bounds.max.y, 0.0), DVec3::NEG_Y),
1221            ClipPlane::from_point_and_normal(DVec3::new(0.0, 0.0, bounds.min.z), DVec3::Z),
1222            ClipPlane::from_point_and_normal(DVec3::new(0.0, 0.0, bounds.max.z), DVec3::NEG_Z),
1223        ]);
1224    }
1225
1226    let mut packed = [[0.0f32; 4]; 6];
1227    for (index, plane) in planes.iter().take(6).enumerate() {
1228        let eq = plane.equation;
1229        packed[index] = [eq.x as f32, eq.y as f32, eq.z as f32, eq.w as f32];
1230    }
1231    (packed, planes.len().min(6) as u32)
1232}
1233
1234fn thick_slab_mode_code(mode: ThickSlabMode) -> u32 {
1235    match mode {
1236        ThickSlabMode::Mip => 0,
1237        ThickSlabMode::MinIp => 1,
1238        ThickSlabMode::Mean => 2,
1239        _ => 0,
1240    }
1241}
1242
1243fn mat4_from_direction(direction: DMat3) -> DMat4 {
1244    DMat4::from_cols(
1245        direction.x_axis.extend(0.0),
1246        direction.y_axis.extend(0.0),
1247        direction.z_axis.extend(0.0),
1248        DVec4::W,
1249    )
1250}
1251
1252fn uniform_bgl_entry(binding: u32) -> wgpu::BindGroupLayoutEntry {
1253    wgpu::BindGroupLayoutEntry {
1254        binding,
1255        visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
1256        ty: wgpu::BindingType::Buffer {
1257            ty: wgpu::BufferBindingType::Uniform,
1258            has_dynamic_offset: false,
1259            min_binding_size: None,
1260        },
1261        count: None,
1262    }
1263}
1264
1265fn texture_bgl_entry(
1266    binding: u32,
1267    view_dimension: wgpu::TextureViewDimension,
1268) -> wgpu::BindGroupLayoutEntry {
1269    wgpu::BindGroupLayoutEntry {
1270        binding,
1271        visibility: wgpu::ShaderStages::FRAGMENT,
1272        ty: wgpu::BindingType::Texture {
1273            sample_type: wgpu::TextureSampleType::Float { filterable: true },
1274            view_dimension,
1275            multisampled: false,
1276        },
1277        count: None,
1278    }
1279}
1280
1281fn texture_bgl_entry_nonfilterable(
1282    binding: u32,
1283    view_dimension: wgpu::TextureViewDimension,
1284) -> wgpu::BindGroupLayoutEntry {
1285    wgpu::BindGroupLayoutEntry {
1286        binding,
1287        visibility: wgpu::ShaderStages::FRAGMENT,
1288        ty: wgpu::BindingType::Texture {
1289            sample_type: wgpu::TextureSampleType::Float { filterable: false },
1290            view_dimension,
1291            multisampled: false,
1292        },
1293        count: None,
1294    }
1295}
1296
1297fn texture_bgl_entry_2d(binding: u32) -> wgpu::BindGroupLayoutEntry {
1298    texture_bgl_entry(binding, wgpu::TextureViewDimension::D2)
1299}
1300
1301fn sampler_bgl_entry(binding: u32) -> wgpu::BindGroupLayoutEntry {
1302    wgpu::BindGroupLayoutEntry {
1303        binding,
1304        visibility: wgpu::ShaderStages::FRAGMENT,
1305        ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
1306        count: None,
1307    }
1308}
1309
1310fn sampler_bgl_entry_nonfiltering(binding: u32) -> wgpu::BindGroupLayoutEntry {
1311    wgpu::BindGroupLayoutEntry {
1312        binding,
1313        visibility: wgpu::ShaderStages::FRAGMENT,
1314        ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::NonFiltering),
1315        count: None,
1316    }
1317}
1318
1319fn build_orientation_marker_image(
1320    width: u32,
1321    height: u32,
1322    camera: &Camera,
1323    labels: &OrientationLabels,
1324) -> Vec<u8> {
1325    let mut pixels = vec![0u8; (width as usize) * (height as usize) * 4];
1326    let center = DVec2::new(f64::from(width) * 0.5, f64::from(height) * 0.5);
1327    let radius = f64::from(width.min(height)) * 0.28;
1328    let view = camera.view_matrix();
1329
1330    let axes = [
1331        (DVec3::X, [255, 80, 80, 255], labels.right.as_str()),
1332        (-DVec3::X, [128, 40, 40, 220], labels.left.as_str()),
1333        (DVec3::Y, [80, 255, 80, 255], labels.anterior.as_str()),
1334        (-DVec3::Y, [40, 128, 40, 220], labels.posterior.as_str()),
1335        (DVec3::Z, [80, 160, 255, 255], labels.superior.as_str()),
1336        (-DVec3::Z, [40, 80, 128, 220], labels.inferior.as_str()),
1337    ];
1338
1339    for (axis, color, label) in axes {
1340        let projected = project_axis(view, axis);
1341        if projected.length_squared() < 1e-8 {
1342            continue;
1343        }
1344        let end = center + projected.normalize() * radius;
1345        draw_line(&mut pixels, width, height, center, end, color);
1346        draw_text(
1347            &mut pixels,
1348            width,
1349            height,
1350            end + projected.normalize() * 6.0,
1351            label,
1352            color,
1353        );
1354    }
1355
1356    draw_disc(
1357        &mut pixels,
1358        width,
1359        height,
1360        center,
1361        2.5,
1362        [255, 255, 255, 255],
1363    );
1364    pixels
1365}
1366
1367fn project_axis(view: DMat4, axis: DVec3) -> DVec2 {
1368    let camera_space = view.transform_vector3(axis);
1369    DVec2::new(camera_space.x, -camera_space.y)
1370}
1371
1372fn draw_line(pixels: &mut [u8], width: u32, height: u32, start: DVec2, end: DVec2, color: [u8; 4]) {
1373    let delta = end - start;
1374    let steps = delta.length().ceil().max(1.0) as u32;
1375    for step in 0..=steps {
1376        let t = f64::from(step) / f64::from(steps.max(1));
1377        let point = start + delta * t;
1378        alpha_plot(
1379            pixels,
1380            width,
1381            height,
1382            point.x.round() as i32,
1383            point.y.round() as i32,
1384            color,
1385        );
1386    }
1387}
1388
1389fn draw_disc(
1390    pixels: &mut [u8],
1391    width: u32,
1392    height: u32,
1393    center: DVec2,
1394    radius: f64,
1395    color: [u8; 4],
1396) {
1397    let min_x = (center.x - radius).floor() as i32;
1398    let max_x = (center.x + radius).ceil() as i32;
1399    let min_y = (center.y - radius).floor() as i32;
1400    let max_y = (center.y + radius).ceil() as i32;
1401
1402    for y in min_y..=max_y {
1403        for x in min_x..=max_x {
1404            let dx = f64::from(x) - center.x;
1405            let dy = f64::from(y) - center.y;
1406            if dx * dx + dy * dy <= radius * radius {
1407                alpha_plot(pixels, width, height, x, y, color);
1408            }
1409        }
1410    }
1411}
1412
1413fn draw_text(
1414    pixels: &mut [u8],
1415    width: u32,
1416    height: u32,
1417    position: DVec2,
1418    text: &str,
1419    color: [u8; 4],
1420) {
1421    let mut cursor_x = position.x.round() as i32;
1422    let cursor_y = position.y.round() as i32;
1423    for ch in text.chars() {
1424        draw_char(pixels, width, height, cursor_x, cursor_y, ch, color);
1425        cursor_x += 6;
1426    }
1427}
1428
1429fn draw_char(pixels: &mut [u8], width: u32, height: u32, x: i32, y: i32, ch: char, color: [u8; 4]) {
1430    let glyph = glyph_rows(ch);
1431    for (row_index, row_bits) in glyph.iter().enumerate() {
1432        for col in 0..5 {
1433            if (row_bits >> (4 - col)) & 1 == 1 {
1434                alpha_plot(pixels, width, height, x + col, y + row_index as i32, color);
1435            }
1436        }
1437    }
1438}
1439
1440fn alpha_plot(pixels: &mut [u8], width: u32, height: u32, x: i32, y: i32, color: [u8; 4]) {
1441    if x < 0 || y < 0 || x >= width as i32 || y >= height as i32 {
1442        return;
1443    }
1444    let index = ((y as u32 * width + x as u32) * 4) as usize;
1445    let src_a = f32::from(color[3]) / 255.0;
1446    let dst_a = f32::from(pixels[index + 3]) / 255.0;
1447    let out_a = src_a + dst_a * (1.0 - src_a);
1448    let blend = |src: u8, dst: u8| -> u8 {
1449        if out_a <= f32::EPSILON {
1450            0
1451        } else {
1452            (((f32::from(src) * src_a) + (f32::from(dst) * dst_a * (1.0 - src_a))) / out_a)
1453                .round()
1454                .clamp(0.0, 255.0) as u8
1455        }
1456    };
1457
1458    pixels[index] = blend(color[0], pixels[index]);
1459    pixels[index + 1] = blend(color[1], pixels[index + 1]);
1460    pixels[index + 2] = blend(color[2], pixels[index + 2]);
1461    pixels[index + 3] = (out_a * 255.0).round().clamp(0.0, 255.0) as u8;
1462}
1463
1464fn glyph_rows(ch: char) -> [u8; 7] {
1465    match ch.to_ascii_uppercase() {
1466        'A' => [
1467            0b01110, 0b10001, 0b10001, 0b11111, 0b10001, 0b10001, 0b10001,
1468        ],
1469        'I' => [
1470            0b11111, 0b00100, 0b00100, 0b00100, 0b00100, 0b00100, 0b11111,
1471        ],
1472        'L' => [
1473            0b10000, 0b10000, 0b10000, 0b10000, 0b10000, 0b10000, 0b11111,
1474        ],
1475        'P' => [
1476            0b11110, 0b10001, 0b10001, 0b11110, 0b10000, 0b10000, 0b10000,
1477        ],
1478        'R' => [
1479            0b11110, 0b10001, 0b10001, 0b11110, 0b10100, 0b10010, 0b10001,
1480        ],
1481        'S' => [
1482            0b01111, 0b10000, 0b10000, 0b01110, 0b00001, 0b00001, 0b11110,
1483        ],
1484        ' ' => [0, 0, 0, 0, 0, 0, 0],
1485        _ => [
1486            0b11111, 0b00001, 0b00010, 0b00100, 0b00100, 0b00000, 0b00100,
1487        ],
1488    }
1489}
1490
1491#[cfg(test)]
1492mod tests {
1493    use super::*;
1494
1495    #[test]
1496    fn glyph_table_contains_expected_rows() {
1497        assert_eq!(glyph_rows('R')[0], 0b11110);
1498        assert_eq!(glyph_rows('I')[6], 0b11111);
1499    }
1500
1501    #[test]
1502    fn thick_slab_mode_codes_are_stable() {
1503        assert_eq!(thick_slab_mode_code(ThickSlabMode::Mip), 0);
1504        assert_eq!(thick_slab_mode_code(ThickSlabMode::MinIp), 1);
1505        assert_eq!(thick_slab_mode_code(ThickSlabMode::Mean), 2);
1506    }
1507}
1508
1509#[cfg(all(test, feature = "snapshot-tests"))]
1510mod gpu_smoke_tests {
1511    use super::*;
1512    use std::sync::mpsc;
1513
1514    use glam::{DMat3, DVec3, UVec3};
1515    use volren_core::{Volume, VolumeRenderParams};
1516
1517    fn test_device() -> Option<(wgpu::Device, wgpu::Queue)> {
1518        pollster::block_on(async {
1519            let instance = wgpu::Instance::default();
1520            let adapter = instance
1521                .request_adapter(&wgpu::RequestAdapterOptions {
1522                    power_preference: wgpu::PowerPreference::LowPower,
1523                    compatible_surface: None,
1524                    force_fallback_adapter: false,
1525                })
1526                .await?;
1527            adapter
1528                .request_device(&wgpu::DeviceDescriptor::default(), None)
1529                .await
1530                .ok()
1531        })
1532    }
1533
1534    fn small_volume() -> DynVolume {
1535        let mut data = vec![0u16; 16 * 16 * 16];
1536        data[8 + 8 * 16 + 8 * 16 * 16] = 2048;
1537        Volume::from_data(
1538            data,
1539            UVec3::new(16, 16, 16),
1540            DVec3::ONE,
1541            DVec3::ZERO,
1542            DMat3::IDENTITY,
1543            1,
1544        )
1545        .expect("valid test volume")
1546        .into()
1547    }
1548
1549    fn sphere_volume() -> DynVolume {
1550        let dims = UVec3::new(32, 32, 32);
1551        let center = DVec3::new(15.5, 15.5, 15.5);
1552        let radius = 9.0;
1553        let mut data = vec![0u16; (dims.x * dims.y * dims.z) as usize];
1554
1555        for z in 0..dims.z {
1556            for y in 0..dims.y {
1557                for x in 0..dims.x {
1558                    let index = (z * dims.x * dims.y + y * dims.x + x) as usize;
1559                    let point = DVec3::new(f64::from(x), f64::from(y), f64::from(z));
1560                    data[index] = if (point - center).length() <= radius {
1561                        2048
1562                    } else {
1563                        0
1564                    };
1565                }
1566            }
1567        }
1568
1569        Volume::from_data(data, dims, DVec3::ONE, DVec3::ZERO, DMat3::IDENTITY, 1)
1570            .expect("valid sphere volume")
1571            .into()
1572    }
1573
1574    fn read_texture(
1575        device: &wgpu::Device,
1576        queue: &wgpu::Queue,
1577        texture: &wgpu::Texture,
1578        width: u32,
1579        height: u32,
1580    ) -> Vec<u8> {
1581        let unpadded_bytes_per_row = width * 4;
1582        let padded_bytes_per_row = unpadded_bytes_per_row.div_ceil(256) * 256;
1583        let buffer_size = u64::from(padded_bytes_per_row) * u64::from(height);
1584        let buffer = device.create_buffer(&wgpu::BufferDescriptor {
1585            label: Some("volren_test_readback"),
1586            size: buffer_size,
1587            usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
1588            mapped_at_creation: false,
1589        });
1590
1591        let mut encoder =
1592            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1593        encoder.copy_texture_to_buffer(
1594            texture.as_image_copy(),
1595            wgpu::TexelCopyBufferInfo {
1596                buffer: &buffer,
1597                layout: wgpu::TexelCopyBufferLayout {
1598                    offset: 0,
1599                    bytes_per_row: Some(padded_bytes_per_row),
1600                    rows_per_image: Some(height),
1601                },
1602            },
1603            wgpu::Extent3d {
1604                width,
1605                height,
1606                depth_or_array_layers: 1,
1607            },
1608        );
1609        queue.submit(std::iter::once(encoder.finish()));
1610
1611        let (sender, receiver) = mpsc::channel();
1612        buffer
1613            .slice(..)
1614            .map_async(wgpu::MapMode::Read, move |result| {
1615                let _ = sender.send(result);
1616            });
1617        let _ = device.poll(wgpu::MaintainBase::Wait);
1618        receiver.recv().expect("map callback").expect("map success");
1619
1620        let mapped = buffer.slice(..).get_mapped_range();
1621        let mut pixels = vec![0u8; (unpadded_bytes_per_row * height) as usize];
1622        for row in 0..height as usize {
1623            let src_offset = row * padded_bytes_per_row as usize;
1624            let dst_offset = row * unpadded_bytes_per_row as usize;
1625            pixels[dst_offset..dst_offset + unpadded_bytes_per_row as usize]
1626                .copy_from_slice(&mapped[src_offset..src_offset + unpadded_bytes_per_row as usize]);
1627        }
1628        drop(mapped);
1629        buffer.unmap();
1630        pixels
1631    }
1632
1633    fn checksum(bytes: &[u8]) -> u64 {
1634        bytes.iter().enumerate().fold(0u64, |acc, (index, value)| {
1635            acc.wrapping_add((index as u64 + 1) * u64::from(*value))
1636        })
1637    }
1638
1639    #[test]
1640    #[ignore = "requires a working GPU adapter"]
1641    fn render_volume_smoke_test() {
1642        let Some((device, queue)) = test_device() else {
1643            return;
1644        };
1645        let mut renderer = VolumeRenderer::new(&device, &queue, wgpu::TextureFormat::Rgba8Unorm);
1646        let volume = small_volume();
1647        renderer.set_volume(&volume, true);
1648        renderer
1649            .set_render_params(&VolumeRenderParams::default())
1650            .expect("render params upload");
1651
1652        let camera = Camera::new_perspective(DVec3::new(0.0, 0.0, 50.0), DVec3::ZERO, 30.0);
1653        let texture = renderer.create_render_target(64, 64);
1654        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1655        let mut encoder =
1656            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1657        renderer
1658            .render_volume(
1659                &mut encoder,
1660                &view,
1661                &camera,
1662                &VolumeRenderParams::default(),
1663                Viewport::full(64, 64),
1664            )
1665            .expect("volume render");
1666        queue.submit(std::iter::once(encoder.finish()));
1667    }
1668
1669    #[test]
1670    #[ignore = "requires a working GPU adapter"]
1671    fn render_sphere_snapshot_checksum() {
1672        let Some((device, queue)) = test_device() else {
1673            return;
1674        };
1675        let mut renderer = VolumeRenderer::new(&device, &queue, wgpu::TextureFormat::Rgba8Unorm);
1676        let volume = sphere_volume();
1677        renderer.set_volume(&volume, true);
1678        let params = VolumeRenderParams::default();
1679        renderer
1680            .set_render_params(&params)
1681            .expect("render params upload");
1682
1683        let camera = Camera::new_perspective(DVec3::new(0.0, 0.0, 60.0), DVec3::ZERO, 30.0);
1684        let texture = renderer.create_render_target(64, 64);
1685        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1686        let mut encoder =
1687            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1688        renderer
1689            .render_volume(
1690                &mut encoder,
1691                &view,
1692                &camera,
1693                &params,
1694                Viewport::full(64, 64),
1695            )
1696            .expect("volume render");
1697        queue.submit(std::iter::once(encoder.finish()));
1698
1699        let pixels = read_texture(&device, &queue, &texture, 64, 64);
1700        let image_checksum = checksum(&pixels);
1701        eprintln!("sphere checksum: {image_checksum}");
1702        assert!(image_checksum > 0, "rendered sphere should not be empty");
1703    }
1704
1705    #[test]
1706    #[ignore = "requires a working GPU adapter"]
1707    fn render_slice_and_crosshair_smoke_test() {
1708        let Some((device, queue)) = test_device() else {
1709            return;
1710        };
1711        let mut renderer = VolumeRenderer::new(&device, &queue, wgpu::TextureFormat::Rgba8Unorm);
1712        let volume = small_volume();
1713        renderer.set_volume(&volume, true);
1714        renderer
1715            .set_render_params(&VolumeRenderParams::default())
1716            .expect("render params upload");
1717
1718        let texture = renderer.create_render_target(64, 64);
1719        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1720        let mut encoder =
1721            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1722        renderer
1723            .render_slice(
1724                &mut encoder,
1725                &view,
1726                &SlicePlane::axial(0.0, 32.0),
1727                &WindowLevel::from_scalar_range(0.0, 2048.0),
1728                Viewport::full(64, 64),
1729                None,
1730            )
1731            .expect("slice render");
1732        renderer
1733            .render_crosshair(
1734                &mut encoder,
1735                &view,
1736                Viewport::full(64, 64),
1737                &CrosshairParams::default(),
1738            )
1739            .expect("crosshair render");
1740        queue.submit(std::iter::once(encoder.finish()));
1741    }
1742}