Skip to main content

volren_gpu/
renderer.rs

1//! Main volume renderer: wgpu pipeline creation and render execution.
2
3use std::sync::Arc;
4
5use bytemuck::{Pod, Zeroable};
6use glam::{DMat3, DMat4, DVec2, DVec3, DVec4};
7use half::f16;
8use volren_core::{
9    camera::Camera,
10    render_params::{BlendMode, ClipPlane, VolumeRenderParams},
11    reslice::{SlicePlane, ThickSlabMode, ThickSlabParams},
12    transfer_function::{OpacityTransferFunction, TransferFunctionLut},
13    volume::{DynVolume, VolumeInfo},
14    window_level::WindowLevel,
15};
16
17use crate::{
18    texture::GpuVolumeTexture,
19    uniforms::{blend_mode as bm, VolumeUniforms},
20};
21
22const VOLUME_SHADER_SRC: &str = concat!(
23    include_str!("shaders/common.wgsl"),
24    "\n",
25    include_str!("shaders/fullscreen_quad.wgsl"),
26    "\n",
27    include_str!("shaders/gradient.wgsl"),
28    "\n",
29    include_str!("shaders/shading.wgsl"),
30    "\n",
31    include_str!("shaders/volume_raycast.wgsl"),
32);
33const RESLICE_SHADER_SRC: &str = include_str!("shaders/reslice.wgsl");
34const CROSSHAIR_SHADER_SRC: &str = include_str!("shaders/crosshair.wgsl");
35const BLIT_SHADER_SRC: &str = include_str!("shaders/blit_rgba.wgsl");
36
37/// Rectangular sub-region of the render target, in pixels.
38#[derive(Debug, Clone, Copy)]
39pub struct Viewport {
40    /// Horizontal offset from the left edge.
41    pub x: u32,
42    /// Vertical offset from the top edge.
43    pub y: u32,
44    /// Width in pixels.
45    pub width: u32,
46    /// Height in pixels.
47    pub height: u32,
48}
49
50impl Viewport {
51    /// Create a viewport covering the full render target.
52    #[must_use]
53    pub fn full(width: u32, height: u32) -> Self {
54        Self {
55            x: 0,
56            y: 0,
57            width,
58            height,
59        }
60    }
61}
62
63/// Errors that can occur during rendering.
64#[derive(Debug, thiserror::Error)]
65pub enum RenderError {
66    /// No volume has been uploaded yet.
67    #[error("no volume data uploaded — call `set_volume()` first")]
68    NoVolume,
69    /// No transfer-function data has been uploaded yet.
70    #[error("no render parameters uploaded — call `set_render_params()` first")]
71    NoTransferFunction,
72    /// Viewport has zero area.
73    #[error("viewport has zero area")]
74    ZeroViewport,
75    /// A progressive slice update referenced a texture slice that does not exist.
76    #[error("slice index {z_index} is out of bounds for depth {depth}")]
77    SliceOutOfBounds {
78        /// Slice index requested by the caller.
79        z_index: u32,
80        /// Texture depth currently allocated.
81        depth: u32,
82    },
83    /// A progressive slice update had the wrong number of voxels.
84    #[error("slice {z_index} has {actual} voxels, expected {expected}")]
85    SliceLengthMismatch {
86        /// Slice index requested by the caller.
87        z_index: u32,
88        /// Expected voxel count for one slice.
89        expected: usize,
90        /// Actual voxel count supplied.
91        actual: usize,
92    },
93}
94
95/// Parameters for rendering crosshair overlay lines on a 2D slice viewport.
96#[derive(Debug, Clone)]
97pub struct CrosshairParams {
98    /// Normalised position in `[0, 1] × [0, 1]` on the slice.
99    pub position: [f32; 2],
100    /// Line colour for the horizontal line (RGBA, 0–1).
101    pub horizontal_color: [f32; 4],
102    /// Line colour for the vertical line (RGBA, 0–1).
103    pub vertical_color: [f32; 4],
104    /// Line thickness in pixels.
105    pub thickness: f32,
106}
107
108impl Default for CrosshairParams {
109    fn default() -> Self {
110        Self {
111            position: [0.5, 0.5],
112            horizontal_color: [1.0, 0.0, 0.0, 1.0],
113            vertical_color: [0.0, 1.0, 0.0, 1.0],
114            thickness: 1.0,
115        }
116    }
117}
118
119/// Patient orientation labels for the orientation marker.
120#[derive(Debug, Clone)]
121pub struct OrientationLabels {
122    /// Label for the right direction (+X).
123    pub right: String,
124    /// Label for the left direction (−X).
125    pub left: String,
126    /// Label for the anterior direction (+Y).
127    pub anterior: String,
128    /// Label for the posterior direction (−Y).
129    pub posterior: String,
130    /// Label for the superior direction (+Z).
131    pub superior: String,
132    /// Label for the inferior direction (−Z).
133    pub inferior: String,
134}
135
136impl Default for OrientationLabels {
137    fn default() -> Self {
138        Self {
139            right: "R".into(),
140            left: "L".into(),
141            anterior: "A".into(),
142            posterior: "P".into(),
143            superior: "S".into(),
144            inferior: "I".into(),
145        }
146    }
147}
148
149#[derive(Debug, Clone, Copy)]
150struct VolumeMetadata {
151    world_to_volume: [[f32; 4]; 4],
152    volume_to_world: [[f32; 4]; 4],
153    dimensions: [f32; 3],
154    spacing: [f32; 3],
155    scalar_range: [f32; 2],
156}
157
158impl VolumeMetadata {
159    fn from_volume(volume: &DynVolume) -> Self {
160        let (scalar_min, scalar_max) = volume.scalar_range();
161        Self::from_parts(
162            volume.dimensions(),
163            volume.spacing(),
164            volume.origin(),
165            volume.direction(),
166            (scalar_min, scalar_max),
167        )
168    }
169
170    fn from_parts(
171        dimensions: glam::UVec3,
172        spacing: DVec3,
173        origin: DVec3,
174        direction: DMat3,
175        scalar_range: (f64, f64),
176    ) -> Self {
177        let dimensions_f64 = dimensions.as_dvec3();
178        let direction = mat4_from_direction(direction);
179        let scale = DVec3::new(
180            (dimensions_f64.x - 1.0).max(1.0) * spacing.x,
181            (dimensions_f64.y - 1.0).max(1.0) * spacing.y,
182            (dimensions_f64.z - 1.0).max(1.0) * spacing.z,
183        );
184        let volume_to_world =
185            DMat4::from_translation(origin) * direction * DMat4::from_scale(scale);
186        let world_to_volume = volume_to_world.inverse();
187
188        Self {
189            world_to_volume: world_to_volume.as_mat4().to_cols_array_2d(),
190            volume_to_world: volume_to_world.as_mat4().to_cols_array_2d(),
191            dimensions: [dimensions_f64.x as f32, dimensions_f64.y as f32, dimensions_f64.z as f32],
192            spacing: [spacing.x as f32, spacing.y as f32, spacing.z as f32],
193            scalar_range: [scalar_range.0 as f32, scalar_range.1 as f32],
194        }
195    }
196}
197
198#[repr(C)]
199#[derive(Debug, Clone, Copy, Pod, Zeroable)]
200struct SliceUniforms {
201    world_to_volume: [[f32; 4]; 4],
202    slice_origin: [f32; 4],
203    slice_right: [f32; 4],
204    slice_up: [f32; 4],
205    slice_normal: [f32; 4],
206    slice_extent: [f32; 4],
207    window_level: [f32; 4],
208    slab_params: [u32; 4],
209}
210
211#[repr(C)]
212#[derive(Debug, Clone, Copy, Pod, Zeroable)]
213struct CrosshairUniforms {
214    position: [f32; 4],
215    horizontal_color: [f32; 4],
216    vertical_color: [f32; 4],
217    viewport: [f32; 4],
218}
219
220/// A fully GPU-resident volume renderer.
221///
222/// The renderer stores the uploaded 3D texture plus the metadata needed for
223/// raycasting and reslicing. Call [`VolumeRenderer::set_render_params`] after
224/// uploading a volume to bake the active transfer functions.
225pub struct VolumeRenderer {
226    device: Arc<wgpu::Device>,
227    queue: Arc<wgpu::Queue>,
228
229    volume_pipeline: wgpu::RenderPipeline,
230    volume_bind_group_layout: wgpu::BindGroupLayout,
231    volume_uniform_buffer: wgpu::Buffer,
232
233    slice_pipeline: wgpu::RenderPipeline,
234    slice_bind_group_layout: wgpu::BindGroupLayout,
235    slice_uniform_buffer: wgpu::Buffer,
236
237    crosshair_pipeline: wgpu::RenderPipeline,
238    crosshair_uniform_buffer: wgpu::Buffer,
239    crosshair_bind_group: wgpu::BindGroup,
240
241    blit_pipeline: wgpu::RenderPipeline,
242    blit_bind_group_layout: wgpu::BindGroupLayout,
243    blit_sampler: wgpu::Sampler,
244
245    lut_texture: wgpu::Texture,
246    lut_view: wgpu::TextureView,
247    lut_sampler: wgpu::Sampler,
248    gradient_lut_texture: wgpu::Texture,
249    gradient_lut_view: wgpu::TextureView,
250    gradient_lut_sampler: wgpu::Sampler,
251
252    volume_texture: Option<GpuVolumeTexture>,
253    volume_bind_group: Option<wgpu::BindGroup>,
254    slice_bind_group: Option<wgpu::BindGroup>,
255    volume_metadata: Option<VolumeMetadata>,
256    has_render_params: bool,
257
258    output_format: wgpu::TextureFormat,
259    viewport_size: (u32, u32),
260}
261
262impl VolumeRenderer {
263    /// Create a new renderer for the given device and output format.
264    #[must_use]
265    pub fn new(
266        device: &wgpu::Device,
267        queue: &wgpu::Queue,
268        output_format: wgpu::TextureFormat,
269    ) -> Self {
270        Self::from_arc(
271            Arc::new(device.clone()),
272            Arc::new(queue.clone()),
273            output_format,
274        )
275    }
276
277    /// Create a renderer from `Arc`-wrapped device and queue.
278    #[must_use]
279    pub fn from_arc(
280        device: Arc<wgpu::Device>,
281        queue: Arc<wgpu::Queue>,
282        output_format: wgpu::TextureFormat,
283    ) -> Self {
284        let volume_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
285            label: Some("volren_volume_shader"),
286            source: wgpu::ShaderSource::Wgsl(VOLUME_SHADER_SRC.into()),
287        });
288        let slice_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
289            label: Some("volren_reslice_shader"),
290            source: wgpu::ShaderSource::Wgsl(RESLICE_SHADER_SRC.into()),
291        });
292        let crosshair_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
293            label: Some("volren_crosshair_shader"),
294            source: wgpu::ShaderSource::Wgsl(CROSSHAIR_SHADER_SRC.into()),
295        });
296        let blit_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
297            label: Some("volren_blit_shader"),
298            source: wgpu::ShaderSource::Wgsl(BLIT_SHADER_SRC.into()),
299        });
300
301        let volume_bind_group_layout = Self::create_volume_bind_group_layout(&device);
302        let slice_bind_group_layout = Self::create_slice_bind_group_layout(&device);
303        let crosshair_bind_group_layout = Self::create_crosshair_bind_group_layout(&device);
304        let blit_bind_group_layout = Self::create_blit_bind_group_layout(&device);
305
306        let volume_pipeline = Self::create_pipeline(
307            &device,
308            &volume_shader,
309            &volume_bind_group_layout,
310            output_format,
311            Some(wgpu::BlendState::ALPHA_BLENDING),
312        );
313        let slice_pipeline = Self::create_pipeline(
314            &device,
315            &slice_shader,
316            &slice_bind_group_layout,
317            output_format,
318            Some(wgpu::BlendState::ALPHA_BLENDING),
319        );
320        let crosshair_pipeline = Self::create_pipeline(
321            &device,
322            &crosshair_shader,
323            &crosshair_bind_group_layout,
324            output_format,
325            Some(wgpu::BlendState::ALPHA_BLENDING),
326        );
327        let blit_pipeline = Self::create_pipeline(
328            &device,
329            &blit_shader,
330            &blit_bind_group_layout,
331            output_format,
332            Some(wgpu::BlendState::ALPHA_BLENDING),
333        );
334
335        let volume_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
336            label: Some("volren_volume_uniforms"),
337            size: std::mem::size_of::<VolumeUniforms>() as u64,
338            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
339            mapped_at_creation: false,
340        });
341        let slice_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
342            label: Some("volren_slice_uniforms"),
343            size: std::mem::size_of::<SliceUniforms>() as u64,
344            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
345            mapped_at_creation: false,
346        });
347        let crosshair_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
348            label: Some("volren_crosshair_uniforms"),
349            size: std::mem::size_of::<CrosshairUniforms>() as u64,
350            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
351            mapped_at_creation: false,
352        });
353
354        let crosshair_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
355            label: Some("volren_crosshair_bind_group"),
356            layout: &crosshair_bind_group_layout,
357            entries: &[wgpu::BindGroupEntry {
358                binding: 0,
359                resource: crosshair_uniform_buffer.as_entire_binding(),
360            }],
361        });
362
363        let (lut_texture, lut_view, lut_sampler) = Self::create_lut_texture(&device, 4096);
364        let (gradient_lut_texture, gradient_lut_view, gradient_lut_sampler) =
365            Self::create_lut_texture(&device, 1024);
366        let blit_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
367            label: Some("volren_blit_sampler"),
368            address_mode_u: wgpu::AddressMode::ClampToEdge,
369            address_mode_v: wgpu::AddressMode::ClampToEdge,
370            mag_filter: wgpu::FilterMode::Linear,
371            min_filter: wgpu::FilterMode::Linear,
372            ..Default::default()
373        });
374
375        Self {
376            device,
377            queue,
378            volume_pipeline,
379            volume_bind_group_layout,
380            volume_uniform_buffer,
381            slice_pipeline,
382            slice_bind_group_layout,
383            slice_uniform_buffer,
384            crosshair_pipeline,
385            crosshair_uniform_buffer,
386            crosshair_bind_group,
387            blit_pipeline,
388            blit_bind_group_layout,
389            blit_sampler,
390            lut_texture,
391            lut_view,
392            lut_sampler,
393            gradient_lut_texture,
394            gradient_lut_view,
395            gradient_lut_sampler,
396            volume_texture: None,
397            volume_bind_group: None,
398            slice_bind_group: None,
399            volume_metadata: None,
400            has_render_params: false,
401            output_format,
402            viewport_size: (0, 0),
403        }
404    }
405
406    /// The texture format this renderer outputs into.
407    #[must_use]
408    pub fn output_format(&self) -> wgpu::TextureFormat {
409        self.output_format
410    }
411
412    /// Upload (or replace) volume data as a 3D GPU texture.
413    pub fn set_volume(&mut self, volume: &DynVolume, linear_interpolation: bool) {
414        self.volume_texture = Some(GpuVolumeTexture::upload(
415            &self.device,
416            &self.queue,
417            volume,
418            linear_interpolation,
419        ));
420        self.volume_metadata = Some(VolumeMetadata::from_volume(volume));
421        self.rebuild_bind_groups();
422    }
423
424    /// Allocate an empty 3D volume texture for progressive slice uploads.
425    pub fn allocate_volume(
426        &mut self,
427        dimensions: glam::UVec3,
428        spacing: DVec3,
429        origin: DVec3,
430        direction: DMat3,
431        scalar_range: (f64, f64),
432        linear_interpolation: bool,
433    ) {
434        self.volume_texture = Some(GpuVolumeTexture::allocate_empty(
435            &self.device,
436            dimensions,
437            linear_interpolation,
438        ));
439        self.volume_metadata = Some(VolumeMetadata::from_parts(
440            dimensions,
441            spacing,
442            origin,
443            direction,
444            scalar_range,
445        ));
446        self.rebuild_bind_groups();
447    }
448
449    /// Update one signed 16-bit volume slice inside an already allocated texture.
450    pub fn update_volume_slice_i16(
451        &mut self,
452        z_index: u32,
453        pixels: &[i16],
454        scalar_range: (f64, f64),
455    ) -> Result<(), RenderError> {
456        let texture = self.volume_texture.as_ref().ok_or(RenderError::NoVolume)?;
457        if z_index >= texture.dimensions.z {
458            return Err(RenderError::SliceOutOfBounds {
459                z_index,
460                depth: texture.dimensions.z,
461            });
462        }
463        let expected = (texture.dimensions.x * texture.dimensions.y) as usize;
464        if pixels.len() != expected {
465            return Err(RenderError::SliceLengthMismatch {
466                z_index,
467                expected,
468                actual: pixels.len(),
469            });
470        }
471        texture.update_i16_slice(&self.queue, z_index, pixels);
472        if let Some(metadata) = self.volume_metadata.as_mut() {
473            metadata.scalar_range = [scalar_range.0 as f32, scalar_range.1 as f32];
474        }
475        Ok(())
476    }
477
478    /// Upload a baked transfer-function LUT to the GPU.
479    pub fn set_transfer_function(&mut self, lut: &TransferFunctionLut) {
480        let (texture, view, sampler) = Self::create_lut_texture(&self.device, lut.lut_size());
481        let f16_bytes = f32_slice_to_f16_bytes(lut.as_rgba_f32());
482        self.queue.write_texture(
483            texture.as_image_copy(),
484            &f16_bytes,
485            wgpu::TexelCopyBufferLayout {
486                offset: 0,
487                bytes_per_row: Some(lut.lut_size() * 4 * 2),
488                rows_per_image: None,
489            },
490            wgpu::Extent3d {
491                width: lut.lut_size(),
492                height: 1,
493                depth_or_array_layers: 1,
494            },
495        );
496        self.lut_texture = texture;
497        self.lut_view = view;
498        self.lut_sampler = sampler;
499        self.has_render_params = true;
500        self.rebuild_bind_groups();
501    }
502
503    /// Bake and upload transfer functions from the current render parameters.
504    ///
505    /// # Errors
506    /// Returns [`RenderError::NoVolume`] if no volume metadata has been uploaded yet.
507    pub fn set_render_params(&mut self, params: &VolumeRenderParams) -> Result<(), RenderError> {
508        let metadata = self.volume_metadata.ok_or(RenderError::NoVolume)?;
509        let lut = TransferFunctionLut::bake(
510            &params.color_tf,
511            &params.opacity_tf,
512            f64::from(metadata.scalar_range[0]),
513            f64::from(metadata.scalar_range[1]),
514            4096,
515        );
516        self.set_transfer_function(&lut);
517        let gradient_tf = params
518            .gradient_opacity_tf
519            .clone()
520            .unwrap_or_else(opaque_unit_ramp);
521        self.upload_gradient_lut(&gradient_tf);
522        Ok(())
523    }
524
525    /// Handle viewport resize.
526    pub fn resize(&mut self, width: u32, height: u32) {
527        self.viewport_size = (width, height);
528    }
529
530    /// Create an off-screen render target texture.
531    #[must_use]
532    pub fn create_render_target(&self, width: u32, height: u32) -> wgpu::Texture {
533        self.device.create_texture(&wgpu::TextureDescriptor {
534            label: Some("volren_offscreen_target"),
535            size: wgpu::Extent3d {
536                width,
537                height,
538                depth_or_array_layers: 1,
539            },
540            mip_level_count: 1,
541            sample_count: 1,
542            dimension: wgpu::TextureDimension::D2,
543            format: self.output_format,
544            usage: wgpu::TextureUsages::RENDER_ATTACHMENT
545                | wgpu::TextureUsages::TEXTURE_BINDING
546                | wgpu::TextureUsages::COPY_SRC,
547            view_formats: &[],
548        })
549    }
550
551    /// Render the volume into the given color attachment.
552    ///
553    /// The caller owns the command encoder and submits it.
554    ///
555    /// # Errors
556    /// Returns [`RenderError`] if prerequisites are missing or the viewport is invalid.
557    pub fn render_volume(
558        &self,
559        encoder: &mut wgpu::CommandEncoder,
560        target: &wgpu::TextureView,
561        camera: &Camera,
562        params: &VolumeRenderParams,
563        viewport: Viewport,
564    ) -> Result<(), RenderError> {
565        let metadata = self.volume_metadata.ok_or(RenderError::NoVolume)?;
566        let bind_group = self
567            .volume_bind_group
568            .as_ref()
569            .ok_or(RenderError::NoVolume)?;
570        validate_viewport(viewport)?;
571        if !self.has_render_params {
572            return Err(RenderError::NoTransferFunction);
573        }
574
575        let uniforms = self.build_uniforms(camera, metadata, params, viewport);
576        self.queue.write_buffer(
577            &self.volume_uniform_buffer,
578            0,
579            bytemuck::bytes_of(&uniforms),
580        );
581
582        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
583            label: Some("volren_volume_pass"),
584            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
585                view: target,
586                resolve_target: None,
587                ops: wgpu::Operations {
588                    load: wgpu::LoadOp::Load,
589                    store: wgpu::StoreOp::Store,
590                },
591            })],
592            depth_stencil_attachment: None,
593            timestamp_writes: None,
594            occlusion_query_set: None,
595        });
596        pass.set_pipeline(&self.volume_pipeline);
597        pass.set_bind_group(0, bind_group, &[]);
598        pass.set_viewport(
599            viewport.x as f32,
600            viewport.y as f32,
601            viewport.width as f32,
602            viewport.height as f32,
603            0.0,
604            1.0,
605        );
606        pass.draw(0..6, 0..1);
607        Ok(())
608    }
609
610    /// Render the volume into a newly-created off-screen texture.
611    ///
612    /// # Errors
613    /// Propagates the same errors as [`Self::render_volume`].
614    pub fn render_volume_to_texture(
615        &self,
616        encoder: &mut wgpu::CommandEncoder,
617        camera: &Camera,
618        params: &VolumeRenderParams,
619        width: u32,
620        height: u32,
621    ) -> Result<wgpu::Texture, RenderError> {
622        let texture = self.create_render_target(width, height);
623        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
624        self.render_volume(
625            encoder,
626            &view,
627            camera,
628            params,
629            Viewport::full(width, height),
630        )?;
631        Ok(texture)
632    }
633
634    /// Render the volume at reduced resolution and upscale the result into `target`.
635    ///
636    /// This is intended for interactive manipulation where responsiveness matters
637    /// more than final image quality. Passing `1` disables downsampling.
638    ///
639    /// # Errors
640    /// Propagates the same errors as [`Self::render_volume`].
641    pub fn render_volume_interactive(
642        &self,
643        encoder: &mut wgpu::CommandEncoder,
644        target: &wgpu::TextureView,
645        camera: &Camera,
646        params: &VolumeRenderParams,
647        viewport: Viewport,
648        downsample_factor: u32,
649    ) -> Result<(), RenderError> {
650        validate_viewport(viewport)?;
651        let factor = downsample_factor.max(1);
652        if factor == 1 {
653            return self.render_volume(encoder, target, camera, params, viewport);
654        }
655
656        let lod_width = (viewport.width / factor).max(1);
657        let lod_height = (viewport.height / factor).max(1);
658        let texture = self.create_render_target(lod_width, lod_height);
659        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
660
661        self.render_volume(
662            encoder,
663            &view,
664            camera,
665            params,
666            Viewport::full(lod_width, lod_height),
667        )?;
668        self.blit_texture_view(encoder, target, viewport, &view);
669        Ok(())
670    }
671
672    /// Render a 2D reslice (MPR slice) into the given color attachment.
673    ///
674    /// # Errors
675    /// Returns [`RenderError::NoVolume`] when no volume has been uploaded.
676    pub fn render_slice(
677        &self,
678        encoder: &mut wgpu::CommandEncoder,
679        target: &wgpu::TextureView,
680        slice_plane: &SlicePlane,
681        window_level: &WindowLevel,
682        viewport: Viewport,
683        thick_slab: Option<&ThickSlabParams>,
684    ) -> Result<(), RenderError> {
685        let metadata = self.volume_metadata.ok_or(RenderError::NoVolume)?;
686        let bind_group = self
687            .slice_bind_group
688            .as_ref()
689            .ok_or(RenderError::NoVolume)?;
690        validate_viewport(viewport)?;
691
692        let uniforms = self.build_slice_uniforms(metadata, slice_plane, window_level, thick_slab);
693        self.queue
694            .write_buffer(&self.slice_uniform_buffer, 0, bytemuck::bytes_of(&uniforms));
695
696        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
697            label: Some("volren_slice_pass"),
698            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
699                view: target,
700                resolve_target: None,
701                ops: wgpu::Operations {
702                    load: wgpu::LoadOp::Load,
703                    store: wgpu::StoreOp::Store,
704                },
705            })],
706            depth_stencil_attachment: None,
707            timestamp_writes: None,
708            occlusion_query_set: None,
709        });
710        pass.set_pipeline(&self.slice_pipeline);
711        pass.set_bind_group(0, bind_group, &[]);
712        pass.set_viewport(
713            viewport.x as f32,
714            viewport.y as f32,
715            viewport.width as f32,
716            viewport.height as f32,
717            0.0,
718            1.0,
719        );
720        pass.draw(0..6, 0..1);
721        Ok(())
722    }
723
724    /// Render a slice into a newly-created off-screen texture.
725    ///
726    /// # Errors
727    /// Propagates the same errors as [`Self::render_slice`].
728    pub fn render_slice_to_texture(
729        &self,
730        encoder: &mut wgpu::CommandEncoder,
731        slice_plane: &SlicePlane,
732        window_level: &WindowLevel,
733        width: u32,
734        height: u32,
735        thick_slab: Option<&ThickSlabParams>,
736    ) -> Result<wgpu::Texture, RenderError> {
737        let texture = self.create_render_target(width, height);
738        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
739        self.render_slice(
740            encoder,
741            &view,
742            slice_plane,
743            window_level,
744            Viewport::full(width, height),
745            thick_slab,
746        )?;
747        Ok(texture)
748    }
749
750    /// Render crosshair overlay lines on a slice viewport.
751    ///
752    /// # Errors
753    /// Returns [`RenderError::ZeroViewport`] for an empty viewport.
754    pub fn render_crosshair(
755        &self,
756        encoder: &mut wgpu::CommandEncoder,
757        target: &wgpu::TextureView,
758        viewport: Viewport,
759        crosshair: &CrosshairParams,
760    ) -> Result<(), RenderError> {
761        validate_viewport(viewport)?;
762        let uniforms = CrosshairUniforms {
763            position: [
764                crosshair.position[0],
765                crosshair.position[1],
766                crosshair.thickness,
767                0.0,
768            ],
769            horizontal_color: crosshair.horizontal_color,
770            vertical_color: crosshair.vertical_color,
771            viewport: [viewport.width as f32, viewport.height as f32, 0.0, 0.0],
772        };
773        self.queue.write_buffer(
774            &self.crosshair_uniform_buffer,
775            0,
776            bytemuck::bytes_of(&uniforms),
777        );
778
779        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
780            label: Some("volren_crosshair_pass"),
781            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
782                view: target,
783                resolve_target: None,
784                ops: wgpu::Operations {
785                    load: wgpu::LoadOp::Load,
786                    store: wgpu::StoreOp::Store,
787                },
788            })],
789            depth_stencil_attachment: None,
790            timestamp_writes: None,
791            occlusion_query_set: None,
792        });
793        pass.set_pipeline(&self.crosshair_pipeline);
794        pass.set_bind_group(0, &self.crosshair_bind_group, &[]);
795        pass.set_viewport(
796            viewport.x as f32,
797            viewport.y as f32,
798            viewport.width as f32,
799            viewport.height as f32,
800            0.0,
801            1.0,
802        );
803        pass.draw(0..6, 0..1);
804        Ok(())
805    }
806
807    /// Render an orientation marker in the given viewport.
808    ///
809    /// The marker is generated on the CPU as a small RGBA image, then composited
810    /// over the target with a lightweight textured-quad pass.
811    ///
812    /// # Errors
813    /// Returns [`RenderError::ZeroViewport`] for an empty viewport.
814    pub fn render_orientation_marker(
815        &self,
816        encoder: &mut wgpu::CommandEncoder,
817        target: &wgpu::TextureView,
818        camera: &Camera,
819        viewport: Viewport,
820        labels: &OrientationLabels,
821    ) -> Result<(), RenderError> {
822        validate_viewport(viewport)?;
823        let width = viewport.width.max(1);
824        let height = viewport.height.max(1);
825        let image = build_orientation_marker_image(width, height, camera, labels);
826        self.blit_rgba8(encoder, target, viewport, width, height, &image);
827        Ok(())
828    }
829
830    fn build_uniforms(
831        &self,
832        camera: &Camera,
833        metadata: VolumeMetadata,
834        params: &VolumeRenderParams,
835        viewport: Viewport,
836    ) -> VolumeUniforms {
837        let aspect = f64::from(viewport.width) / f64::from(viewport.height.max(1));
838        let view = camera.view_matrix();
839        let proj = camera.projection_matrix(aspect);
840        let mvp = (proj * view).as_mat4();
841        let inv_mvp = (proj * view).inverse().as_mat4();
842
843        let blend_mode = match params.blend_mode {
844            BlendMode::Composite => bm::COMPOSITE,
845            BlendMode::MaximumIntensity => bm::MAXIMUM_INTENSITY,
846            BlendMode::MinimumIntensity => bm::MINIMUM_INTENSITY,
847            BlendMode::AverageIntensity => bm::AVERAGE_INTENSITY,
848            BlendMode::Additive => bm::ADDITIVE,
849            BlendMode::Isosurface { .. } => bm::ISOSURFACE,
850            _ => bm::COMPOSITE,
851        };
852        let (window_center, window_width) = params.window_level.map_or_else(
853            || {
854                let wl = WindowLevel::from_scalar_range(
855                    f64::from(metadata.scalar_range[0]),
856                    f64::from(metadata.scalar_range[1]),
857                );
858                (wl.center as f32, wl.width as f32)
859            },
860            |wl| (wl.center as f32, wl.width as f32),
861        );
862        let (shading_enabled, ambient, diffuse, specular, specular_power) =
863            if let Some(shading) = params.shading {
864                (
865                    1u32,
866                    shading.ambient,
867                    shading.diffuse,
868                    shading.specular,
869                    shading.specular_power,
870                )
871            } else {
872                (0u32, 0.0, 0.0, 0.0, 0.0)
873            };
874        let (clip_planes, num_clip_planes) = combined_clip_planes(params);
875        let iso_value = match params.blend_mode {
876            BlendMode::Isosurface { iso_value } => iso_value as f32,
877            _ => 0.0,
878        };
879        let camera_position = camera.position().as_vec3();
880
881        VolumeUniforms {
882            mvp: mvp.to_cols_array_2d(),
883            inv_mvp: inv_mvp.to_cols_array_2d(),
884            world_to_volume: metadata.world_to_volume,
885            volume_to_world: metadata.volume_to_world,
886            dimensions: [
887                metadata.dimensions[0],
888                metadata.dimensions[1],
889                metadata.dimensions[2],
890                0.0,
891            ],
892            spacing: [
893                metadata.spacing[0],
894                metadata.spacing[1],
895                metadata.spacing[2],
896                0.0,
897            ],
898            scalar_range: [
899                metadata.scalar_range[0],
900                metadata.scalar_range[1],
901                iso_value,
902                0.0,
903            ],
904            step_size: params.step_size_factor.max(1e-3),
905            opacity_correction: 1.0 / params.step_size_factor.max(1e-3),
906            blend_mode,
907            shading_enabled,
908            ambient,
909            diffuse,
910            specular,
911            specular_power,
912            light_position: [camera_position.x, camera_position.y, camera_position.z, 0.0],
913            camera_position: [camera_position.x, camera_position.y, camera_position.z, 0.0],
914            window_center,
915            window_width,
916            num_clip_planes,
917            _pad0: 0,
918            clip_planes,
919            background: params.background,
920        }
921    }
922
923    fn build_slice_uniforms(
924        &self,
925        metadata: VolumeMetadata,
926        slice_plane: &SlicePlane,
927        window_level: &WindowLevel,
928        thick_slab: Option<&ThickSlabParams>,
929    ) -> SliceUniforms {
930        let slab_mode = thick_slab.map_or(ThickSlabMode::Mip, |params| params.mode);
931        let (half_thickness, num_samples) = thick_slab.map_or((0.0f32, 1u32), |params| {
932            (params.half_thickness as f32, params.num_samples.max(1))
933        });
934
935        SliceUniforms {
936            world_to_volume: metadata.world_to_volume,
937            slice_origin: [
938                slice_plane.origin.x as f32,
939                slice_plane.origin.y as f32,
940                slice_plane.origin.z as f32,
941                0.0,
942            ],
943            slice_right: [
944                slice_plane.right.x as f32,
945                slice_plane.right.y as f32,
946                slice_plane.right.z as f32,
947                0.0,
948            ],
949            slice_up: [
950                slice_plane.up.x as f32,
951                slice_plane.up.y as f32,
952                slice_plane.up.z as f32,
953                0.0,
954            ],
955            slice_normal: [
956                slice_plane.normal().x as f32,
957                slice_plane.normal().y as f32,
958                slice_plane.normal().z as f32,
959                0.0,
960            ],
961            slice_extent: [
962                slice_plane.width as f32,
963                slice_plane.height as f32,
964                half_thickness,
965                0.0,
966            ],
967            window_level: [
968                window_level.center as f32,
969                window_level.width as f32,
970                0.0,
971                0.0,
972            ],
973            slab_params: [thick_slab_mode_code(slab_mode), num_samples, 0, 0],
974        }
975    }
976
977    fn upload_gradient_lut(&mut self, tf: &OpacityTransferFunction) {
978        let resolution = 1024u32;
979        let f32_bytes = bake_opacity_lut_bytes(tf, resolution);
980        let f32_slice: &[f32] = bytemuck::cast_slice(&f32_bytes);
981        let f16_bytes = f32_slice_to_f16_bytes(f32_slice);
982        let (texture, view, sampler) = Self::create_lut_texture(&self.device, resolution);
983        self.queue.write_texture(
984            texture.as_image_copy(),
985            &f16_bytes,
986            wgpu::TexelCopyBufferLayout {
987                offset: 0,
988                bytes_per_row: Some(resolution * 4 * 2),
989                rows_per_image: None,
990            },
991            wgpu::Extent3d {
992                width: resolution,
993                height: 1,
994                depth_or_array_layers: 1,
995            },
996        );
997        self.gradient_lut_texture = texture;
998        self.gradient_lut_view = view;
999        self.gradient_lut_sampler = sampler;
1000        self.rebuild_bind_groups();
1001    }
1002
1003    fn rebuild_bind_groups(&mut self) {
1004        let Some(volume_texture) = self.volume_texture.as_ref() else {
1005            return;
1006        };
1007
1008        self.volume_bind_group = Some(self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1009            label: Some("volren_volume_bind_group"),
1010            layout: &self.volume_bind_group_layout,
1011            entries: &[
1012                wgpu::BindGroupEntry {
1013                    binding: 0,
1014                    resource: self.volume_uniform_buffer.as_entire_binding(),
1015                },
1016                wgpu::BindGroupEntry {
1017                    binding: 1,
1018                    resource: wgpu::BindingResource::TextureView(&volume_texture.view),
1019                },
1020                wgpu::BindGroupEntry {
1021                    binding: 2,
1022                    resource: wgpu::BindingResource::Sampler(&volume_texture.sampler),
1023                },
1024                wgpu::BindGroupEntry {
1025                    binding: 3,
1026                    resource: wgpu::BindingResource::TextureView(&self.lut_view),
1027                },
1028                wgpu::BindGroupEntry {
1029                    binding: 4,
1030                    resource: wgpu::BindingResource::Sampler(&self.lut_sampler),
1031                },
1032                wgpu::BindGroupEntry {
1033                    binding: 5,
1034                    resource: wgpu::BindingResource::TextureView(&self.gradient_lut_view),
1035                },
1036                wgpu::BindGroupEntry {
1037                    binding: 6,
1038                    resource: wgpu::BindingResource::Sampler(&self.gradient_lut_sampler),
1039                },
1040            ],
1041        }));
1042
1043        self.slice_bind_group = Some(self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1044            label: Some("volren_slice_bind_group"),
1045            layout: &self.slice_bind_group_layout,
1046            entries: &[
1047                wgpu::BindGroupEntry {
1048                    binding: 0,
1049                    resource: self.slice_uniform_buffer.as_entire_binding(),
1050                },
1051                wgpu::BindGroupEntry {
1052                    binding: 1,
1053                    resource: wgpu::BindingResource::TextureView(&volume_texture.view),
1054                },
1055                wgpu::BindGroupEntry {
1056                    binding: 2,
1057                    resource: wgpu::BindingResource::Sampler(&volume_texture.sampler),
1058                },
1059            ],
1060        }));
1061    }
1062
1063    fn blit_rgba8(
1064        &self,
1065        encoder: &mut wgpu::CommandEncoder,
1066        target: &wgpu::TextureView,
1067        viewport: Viewport,
1068        width: u32,
1069        height: u32,
1070        rgba: &[u8],
1071    ) {
1072        let texture = self.device.create_texture(&wgpu::TextureDescriptor {
1073            label: Some("volren_blit_texture"),
1074            size: wgpu::Extent3d {
1075                width,
1076                height,
1077                depth_or_array_layers: 1,
1078            },
1079            mip_level_count: 1,
1080            sample_count: 1,
1081            dimension: wgpu::TextureDimension::D2,
1082            format: wgpu::TextureFormat::Rgba8Unorm,
1083            usage: wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::TEXTURE_BINDING,
1084            view_formats: &[],
1085        });
1086        self.queue.write_texture(
1087            texture.as_image_copy(),
1088            rgba,
1089            wgpu::TexelCopyBufferLayout {
1090                offset: 0,
1091                bytes_per_row: Some(width * 4),
1092                rows_per_image: Some(height),
1093            },
1094            wgpu::Extent3d {
1095                width,
1096                height,
1097                depth_or_array_layers: 1,
1098            },
1099        );
1100        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1101        self.blit_texture_view(encoder, target, viewport, &view);
1102    }
1103
1104    fn blit_texture_view(
1105        &self,
1106        encoder: &mut wgpu::CommandEncoder,
1107        target: &wgpu::TextureView,
1108        viewport: Viewport,
1109        source_view: &wgpu::TextureView,
1110    ) {
1111        let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1112            label: Some("volren_blit_bind_group"),
1113            layout: &self.blit_bind_group_layout,
1114            entries: &[
1115                wgpu::BindGroupEntry {
1116                    binding: 0,
1117                    resource: wgpu::BindingResource::TextureView(source_view),
1118                },
1119                wgpu::BindGroupEntry {
1120                    binding: 1,
1121                    resource: wgpu::BindingResource::Sampler(&self.blit_sampler),
1122                },
1123            ],
1124        });
1125
1126        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1127            label: Some("volren_blit_pass"),
1128            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1129                view: target,
1130                resolve_target: None,
1131                ops: wgpu::Operations {
1132                    load: wgpu::LoadOp::Load,
1133                    store: wgpu::StoreOp::Store,
1134                },
1135            })],
1136            depth_stencil_attachment: None,
1137            timestamp_writes: None,
1138            occlusion_query_set: None,
1139        });
1140        pass.set_pipeline(&self.blit_pipeline);
1141        pass.set_bind_group(0, &bind_group, &[]);
1142        pass.set_viewport(
1143            viewport.x as f32,
1144            viewport.y as f32,
1145            viewport.width as f32,
1146            viewport.height as f32,
1147            0.0,
1148            1.0,
1149        );
1150        pass.draw(0..6, 0..1);
1151    }
1152
1153    fn create_volume_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1154        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1155            label: Some("volren_volume_bgl"),
1156            entries: &[
1157                uniform_bgl_entry(0),
1158                texture_bgl_entry(1, wgpu::TextureViewDimension::D3),
1159                sampler_bgl_entry(2),
1160                texture_bgl_entry(3, wgpu::TextureViewDimension::D1),
1161                sampler_bgl_entry(4),
1162                texture_bgl_entry(5, wgpu::TextureViewDimension::D1),
1163                sampler_bgl_entry(6),
1164            ],
1165        })
1166    }
1167
1168    fn create_slice_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1169        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1170            label: Some("volren_slice_bgl"),
1171            entries: &[
1172                uniform_bgl_entry(0),
1173                texture_bgl_entry(1, wgpu::TextureViewDimension::D3),
1174                sampler_bgl_entry(2),
1175            ],
1176        })
1177    }
1178
1179    fn create_crosshair_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1180        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1181            label: Some("volren_crosshair_bgl"),
1182            entries: &[uniform_bgl_entry(0)],
1183        })
1184    }
1185
1186    fn create_blit_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1187        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1188            label: Some("volren_blit_bgl"),
1189            entries: &[texture_bgl_entry_2d(0), sampler_bgl_entry(1)],
1190        })
1191    }
1192
1193    fn create_pipeline(
1194        device: &wgpu::Device,
1195        shader: &wgpu::ShaderModule,
1196        bind_group_layout: &wgpu::BindGroupLayout,
1197        output_format: wgpu::TextureFormat,
1198        blend: Option<wgpu::BlendState>,
1199    ) -> wgpu::RenderPipeline {
1200        let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
1201            label: Some("volren_pipeline_layout"),
1202            bind_group_layouts: &[bind_group_layout],
1203            push_constant_ranges: &[],
1204        });
1205
1206        device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
1207            label: Some("volren_pipeline"),
1208            layout: Some(&layout),
1209            vertex: wgpu::VertexState {
1210                module: shader,
1211                entry_point: Some("vs_main"),
1212                buffers: &[],
1213                compilation_options: Default::default(),
1214            },
1215            fragment: Some(wgpu::FragmentState {
1216                module: shader,
1217                entry_point: Some("fs_main"),
1218                targets: &[Some(wgpu::ColorTargetState {
1219                    format: output_format,
1220                    blend,
1221                    write_mask: wgpu::ColorWrites::ALL,
1222                })],
1223                compilation_options: Default::default(),
1224            }),
1225            primitive: wgpu::PrimitiveState {
1226                topology: wgpu::PrimitiveTopology::TriangleList,
1227                ..Default::default()
1228            },
1229            depth_stencil: None,
1230            multisample: wgpu::MultisampleState::default(),
1231            multiview: None,
1232            cache: None,
1233        })
1234    }
1235
1236    fn create_lut_texture(
1237        device: &wgpu::Device,
1238        size: u32,
1239    ) -> (wgpu::Texture, wgpu::TextureView, wgpu::Sampler) {
1240        let texture = device.create_texture(&wgpu::TextureDescriptor {
1241            label: Some("volren_lut"),
1242            size: wgpu::Extent3d {
1243                width: size,
1244                height: 1,
1245                depth_or_array_layers: 1,
1246            },
1247            mip_level_count: 1,
1248            sample_count: 1,
1249            dimension: wgpu::TextureDimension::D1,
1250            format: wgpu::TextureFormat::Rgba16Float,
1251            usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
1252            view_formats: &[],
1253        });
1254        let view = texture.create_view(&wgpu::TextureViewDescriptor {
1255            dimension: Some(wgpu::TextureViewDimension::D1),
1256            ..Default::default()
1257        });
1258        let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
1259            label: Some("volren_lut_sampler"),
1260            address_mode_u: wgpu::AddressMode::ClampToEdge,
1261            mag_filter: wgpu::FilterMode::Linear,
1262            min_filter: wgpu::FilterMode::Linear,
1263            ..Default::default()
1264        });
1265        (texture, view, sampler)
1266    }
1267}
1268
1269fn validate_viewport(viewport: Viewport) -> Result<(), RenderError> {
1270    if viewport.width == 0 || viewport.height == 0 {
1271        Err(RenderError::ZeroViewport)
1272    } else {
1273        Ok(())
1274    }
1275}
1276
1277fn opaque_unit_ramp() -> OpacityTransferFunction {
1278    let mut tf = OpacityTransferFunction::new();
1279    tf.add_point(0.0, 1.0);
1280    tf.add_point(1.0, 1.0);
1281    tf
1282}
1283
1284fn bake_opacity_lut_bytes(tf: &OpacityTransferFunction, resolution: u32) -> Vec<u8> {
1285    let mut rgba = Vec::with_capacity((resolution * 4) as usize);
1286    for i in 0..resolution {
1287        let t = if resolution <= 1 {
1288            0.0
1289        } else {
1290            f64::from(i) / f64::from(resolution - 1)
1291        };
1292        let opacity = tf.evaluate(t) as f32;
1293        rgba.extend_from_slice(&[opacity, opacity, opacity, 1.0]);
1294    }
1295    bytemuck::cast_slice(&rgba).to_vec()
1296}
1297
1298/// Convert an f32 slice to packed f16 (little-endian) bytes for `Rgba16Float` upload.
1299fn f32_slice_to_f16_bytes(data: &[f32]) -> Vec<u8> {
1300    let mut bytes = Vec::with_capacity(data.len() * 2);
1301    for &val in data {
1302        bytes.extend_from_slice(&f16::from_f32(val).to_le_bytes());
1303    }
1304    bytes
1305}
1306
1307fn combined_clip_planes(params: &VolumeRenderParams) -> ([[f32; 4]; 6], u32) {
1308    let mut planes = params.clip_planes.clone();
1309    if let Some(bounds) = params.cropping_bounds {
1310        planes.extend([
1311            ClipPlane::from_point_and_normal(DVec3::new(bounds.min.x, 0.0, 0.0), DVec3::X),
1312            ClipPlane::from_point_and_normal(DVec3::new(bounds.max.x, 0.0, 0.0), DVec3::NEG_X),
1313            ClipPlane::from_point_and_normal(DVec3::new(0.0, bounds.min.y, 0.0), DVec3::Y),
1314            ClipPlane::from_point_and_normal(DVec3::new(0.0, bounds.max.y, 0.0), DVec3::NEG_Y),
1315            ClipPlane::from_point_and_normal(DVec3::new(0.0, 0.0, bounds.min.z), DVec3::Z),
1316            ClipPlane::from_point_and_normal(DVec3::new(0.0, 0.0, bounds.max.z), DVec3::NEG_Z),
1317        ]);
1318    }
1319
1320    let mut packed = [[0.0f32; 4]; 6];
1321    for (index, plane) in planes.iter().take(6).enumerate() {
1322        let eq = plane.equation;
1323        packed[index] = [eq.x as f32, eq.y as f32, eq.z as f32, eq.w as f32];
1324    }
1325    (packed, planes.len().min(6) as u32)
1326}
1327
1328fn thick_slab_mode_code(mode: ThickSlabMode) -> u32 {
1329    match mode {
1330        ThickSlabMode::Mip => 0,
1331        ThickSlabMode::MinIp => 1,
1332        ThickSlabMode::Mean => 2,
1333        _ => 0,
1334    }
1335}
1336
1337fn mat4_from_direction(direction: DMat3) -> DMat4 {
1338    DMat4::from_cols(
1339        direction.x_axis.extend(0.0),
1340        direction.y_axis.extend(0.0),
1341        direction.z_axis.extend(0.0),
1342        DVec4::W,
1343    )
1344}
1345
1346fn uniform_bgl_entry(binding: u32) -> wgpu::BindGroupLayoutEntry {
1347    wgpu::BindGroupLayoutEntry {
1348        binding,
1349        visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
1350        ty: wgpu::BindingType::Buffer {
1351            ty: wgpu::BufferBindingType::Uniform,
1352            has_dynamic_offset: false,
1353            min_binding_size: None,
1354        },
1355        count: None,
1356    }
1357}
1358
1359fn texture_bgl_entry(
1360    binding: u32,
1361    view_dimension: wgpu::TextureViewDimension,
1362) -> wgpu::BindGroupLayoutEntry {
1363    wgpu::BindGroupLayoutEntry {
1364        binding,
1365        visibility: wgpu::ShaderStages::FRAGMENT,
1366        ty: wgpu::BindingType::Texture {
1367            sample_type: wgpu::TextureSampleType::Float { filterable: true },
1368            view_dimension,
1369            multisampled: false,
1370        },
1371        count: None,
1372    }
1373}
1374
1375fn texture_bgl_entry_2d(binding: u32) -> wgpu::BindGroupLayoutEntry {
1376    texture_bgl_entry(binding, wgpu::TextureViewDimension::D2)
1377}
1378
1379fn sampler_bgl_entry(binding: u32) -> wgpu::BindGroupLayoutEntry {
1380    wgpu::BindGroupLayoutEntry {
1381        binding,
1382        visibility: wgpu::ShaderStages::FRAGMENT,
1383        ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
1384        count: None,
1385    }
1386}
1387
1388fn build_orientation_marker_image(
1389    width: u32,
1390    height: u32,
1391    camera: &Camera,
1392    labels: &OrientationLabels,
1393) -> Vec<u8> {
1394    let mut pixels = vec![0u8; (width as usize) * (height as usize) * 4];
1395    let center = DVec2::new(f64::from(width) * 0.5, f64::from(height) * 0.5);
1396    let radius = f64::from(width.min(height)) * 0.28;
1397    let view = camera.view_matrix();
1398
1399    let axes = [
1400        (DVec3::X, [255, 80, 80, 255], labels.right.as_str()),
1401        (-DVec3::X, [128, 40, 40, 220], labels.left.as_str()),
1402        (DVec3::Y, [80, 255, 80, 255], labels.anterior.as_str()),
1403        (-DVec3::Y, [40, 128, 40, 220], labels.posterior.as_str()),
1404        (DVec3::Z, [80, 160, 255, 255], labels.superior.as_str()),
1405        (-DVec3::Z, [40, 80, 128, 220], labels.inferior.as_str()),
1406    ];
1407
1408    for (axis, color, label) in axes {
1409        let projected = project_axis(view, axis);
1410        if projected.length_squared() < 1e-8 {
1411            continue;
1412        }
1413        let end = center + projected.normalize() * radius;
1414        draw_line(&mut pixels, width, height, center, end, color);
1415        draw_text(
1416            &mut pixels,
1417            width,
1418            height,
1419            end + projected.normalize() * 6.0,
1420            label,
1421            color,
1422        );
1423    }
1424
1425    draw_disc(
1426        &mut pixels,
1427        width,
1428        height,
1429        center,
1430        2.5,
1431        [255, 255, 255, 255],
1432    );
1433    pixels
1434}
1435
1436fn project_axis(view: DMat4, axis: DVec3) -> DVec2 {
1437    let camera_space = view.transform_vector3(axis);
1438    DVec2::new(camera_space.x, -camera_space.y)
1439}
1440
1441fn draw_line(pixels: &mut [u8], width: u32, height: u32, start: DVec2, end: DVec2, color: [u8; 4]) {
1442    let delta = end - start;
1443    let steps = delta.length().ceil().max(1.0) as u32;
1444    for step in 0..=steps {
1445        let t = f64::from(step) / f64::from(steps.max(1));
1446        let point = start + delta * t;
1447        alpha_plot(
1448            pixels,
1449            width,
1450            height,
1451            point.x.round() as i32,
1452            point.y.round() as i32,
1453            color,
1454        );
1455    }
1456}
1457
1458fn draw_disc(
1459    pixels: &mut [u8],
1460    width: u32,
1461    height: u32,
1462    center: DVec2,
1463    radius: f64,
1464    color: [u8; 4],
1465) {
1466    let min_x = (center.x - radius).floor() as i32;
1467    let max_x = (center.x + radius).ceil() as i32;
1468    let min_y = (center.y - radius).floor() as i32;
1469    let max_y = (center.y + radius).ceil() as i32;
1470
1471    for y in min_y..=max_y {
1472        for x in min_x..=max_x {
1473            let dx = f64::from(x) - center.x;
1474            let dy = f64::from(y) - center.y;
1475            if dx * dx + dy * dy <= radius * radius {
1476                alpha_plot(pixels, width, height, x, y, color);
1477            }
1478        }
1479    }
1480}
1481
1482fn draw_text(
1483    pixels: &mut [u8],
1484    width: u32,
1485    height: u32,
1486    position: DVec2,
1487    text: &str,
1488    color: [u8; 4],
1489) {
1490    let mut cursor_x = position.x.round() as i32;
1491    let cursor_y = position.y.round() as i32;
1492    for ch in text.chars() {
1493        draw_char(pixels, width, height, cursor_x, cursor_y, ch, color);
1494        cursor_x += 6;
1495    }
1496}
1497
1498fn draw_char(pixels: &mut [u8], width: u32, height: u32, x: i32, y: i32, ch: char, color: [u8; 4]) {
1499    let glyph = glyph_rows(ch);
1500    for (row_index, row_bits) in glyph.iter().enumerate() {
1501        for col in 0..5 {
1502            if (row_bits >> (4 - col)) & 1 == 1 {
1503                alpha_plot(pixels, width, height, x + col, y + row_index as i32, color);
1504            }
1505        }
1506    }
1507}
1508
1509fn alpha_plot(pixels: &mut [u8], width: u32, height: u32, x: i32, y: i32, color: [u8; 4]) {
1510    if x < 0 || y < 0 || x >= width as i32 || y >= height as i32 {
1511        return;
1512    }
1513    let index = ((y as u32 * width + x as u32) * 4) as usize;
1514    let src_a = f32::from(color[3]) / 255.0;
1515    let dst_a = f32::from(pixels[index + 3]) / 255.0;
1516    let out_a = src_a + dst_a * (1.0 - src_a);
1517    let blend = |src: u8, dst: u8| -> u8 {
1518        if out_a <= f32::EPSILON {
1519            0
1520        } else {
1521            (((f32::from(src) * src_a) + (f32::from(dst) * dst_a * (1.0 - src_a))) / out_a)
1522                .round()
1523                .clamp(0.0, 255.0) as u8
1524        }
1525    };
1526
1527    pixels[index] = blend(color[0], pixels[index]);
1528    pixels[index + 1] = blend(color[1], pixels[index + 1]);
1529    pixels[index + 2] = blend(color[2], pixels[index + 2]);
1530    pixels[index + 3] = (out_a * 255.0).round().clamp(0.0, 255.0) as u8;
1531}
1532
1533fn glyph_rows(ch: char) -> [u8; 7] {
1534    match ch.to_ascii_uppercase() {
1535        'A' => [
1536            0b01110, 0b10001, 0b10001, 0b11111, 0b10001, 0b10001, 0b10001,
1537        ],
1538        'I' => [
1539            0b11111, 0b00100, 0b00100, 0b00100, 0b00100, 0b00100, 0b11111,
1540        ],
1541        'L' => [
1542            0b10000, 0b10000, 0b10000, 0b10000, 0b10000, 0b10000, 0b11111,
1543        ],
1544        'P' => [
1545            0b11110, 0b10001, 0b10001, 0b11110, 0b10000, 0b10000, 0b10000,
1546        ],
1547        'R' => [
1548            0b11110, 0b10001, 0b10001, 0b11110, 0b10100, 0b10010, 0b10001,
1549        ],
1550        'S' => [
1551            0b01111, 0b10000, 0b10000, 0b01110, 0b00001, 0b00001, 0b11110,
1552        ],
1553        ' ' => [0, 0, 0, 0, 0, 0, 0],
1554        _ => [
1555            0b11111, 0b00001, 0b00010, 0b00100, 0b00100, 0b00000, 0b00100,
1556        ],
1557    }
1558}
1559
1560#[cfg(test)]
1561mod tests {
1562    use super::*;
1563
1564    #[test]
1565    fn glyph_table_contains_expected_rows() {
1566        assert_eq!(glyph_rows('R')[0], 0b11110);
1567        assert_eq!(glyph_rows('I')[6], 0b11111);
1568    }
1569
1570    #[test]
1571    fn thick_slab_mode_codes_are_stable() {
1572        assert_eq!(thick_slab_mode_code(ThickSlabMode::Mip), 0);
1573        assert_eq!(thick_slab_mode_code(ThickSlabMode::MinIp), 1);
1574        assert_eq!(thick_slab_mode_code(ThickSlabMode::Mean), 2);
1575    }
1576}
1577
1578#[cfg(all(test, feature = "snapshot-tests"))]
1579mod gpu_smoke_tests {
1580    use super::*;
1581    use std::sync::mpsc;
1582
1583    use glam::{DMat3, DVec3, UVec3};
1584    use volren_core::{Volume, VolumeRenderParams};
1585
1586    fn test_device() -> Option<(wgpu::Device, wgpu::Queue)> {
1587        pollster::block_on(async {
1588            let instance = wgpu::Instance::default();
1589            let adapter = instance
1590                .request_adapter(&wgpu::RequestAdapterOptions {
1591                    power_preference: wgpu::PowerPreference::LowPower,
1592                    compatible_surface: None,
1593                    force_fallback_adapter: false,
1594                })
1595                .await?;
1596            adapter
1597                .request_device(&wgpu::DeviceDescriptor::default(), None)
1598                .await
1599                .ok()
1600        })
1601    }
1602
1603    fn small_volume() -> DynVolume {
1604        let mut data = vec![0u16; 16 * 16 * 16];
1605        data[8 + 8 * 16 + 8 * 16 * 16] = 2048;
1606        Volume::from_data(
1607            data,
1608            UVec3::new(16, 16, 16),
1609            DVec3::ONE,
1610            DVec3::ZERO,
1611            DMat3::IDENTITY,
1612            1,
1613        )
1614        .expect("valid test volume")
1615        .into()
1616    }
1617
1618    fn sphere_volume() -> DynVolume {
1619        let dims = UVec3::new(32, 32, 32);
1620        let center = DVec3::new(15.5, 15.5, 15.5);
1621        let radius = 9.0;
1622        let mut data = vec![0u16; (dims.x * dims.y * dims.z) as usize];
1623
1624        for z in 0..dims.z {
1625            for y in 0..dims.y {
1626                for x in 0..dims.x {
1627                    let index = (z * dims.x * dims.y + y * dims.x + x) as usize;
1628                    let point = DVec3::new(f64::from(x), f64::from(y), f64::from(z));
1629                    data[index] = if (point - center).length() <= radius {
1630                        2048
1631                    } else {
1632                        0
1633                    };
1634                }
1635            }
1636        }
1637
1638        Volume::from_data(data, dims, DVec3::ONE, DVec3::ZERO, DMat3::IDENTITY, 1)
1639            .expect("valid sphere volume")
1640            .into()
1641    }
1642
1643    fn read_texture(
1644        device: &wgpu::Device,
1645        queue: &wgpu::Queue,
1646        texture: &wgpu::Texture,
1647        width: u32,
1648        height: u32,
1649    ) -> Vec<u8> {
1650        let unpadded_bytes_per_row = width * 4;
1651        let padded_bytes_per_row = unpadded_bytes_per_row.div_ceil(256) * 256;
1652        let buffer_size = u64::from(padded_bytes_per_row) * u64::from(height);
1653        let buffer = device.create_buffer(&wgpu::BufferDescriptor {
1654            label: Some("volren_test_readback"),
1655            size: buffer_size,
1656            usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
1657            mapped_at_creation: false,
1658        });
1659
1660        let mut encoder =
1661            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1662        encoder.copy_texture_to_buffer(
1663            texture.as_image_copy(),
1664            wgpu::TexelCopyBufferInfo {
1665                buffer: &buffer,
1666                layout: wgpu::TexelCopyBufferLayout {
1667                    offset: 0,
1668                    bytes_per_row: Some(padded_bytes_per_row),
1669                    rows_per_image: Some(height),
1670                },
1671            },
1672            wgpu::Extent3d {
1673                width,
1674                height,
1675                depth_or_array_layers: 1,
1676            },
1677        );
1678        queue.submit(std::iter::once(encoder.finish()));
1679
1680        let (sender, receiver) = mpsc::channel();
1681        buffer
1682            .slice(..)
1683            .map_async(wgpu::MapMode::Read, move |result| {
1684                let _ = sender.send(result);
1685            });
1686        let _ = device.poll(wgpu::MaintainBase::Wait);
1687        receiver.recv().expect("map callback").expect("map success");
1688
1689        let mapped = buffer.slice(..).get_mapped_range();
1690        let mut pixels = vec![0u8; (unpadded_bytes_per_row * height) as usize];
1691        for row in 0..height as usize {
1692            let src_offset = row * padded_bytes_per_row as usize;
1693            let dst_offset = row * unpadded_bytes_per_row as usize;
1694            pixels[dst_offset..dst_offset + unpadded_bytes_per_row as usize]
1695                .copy_from_slice(&mapped[src_offset..src_offset + unpadded_bytes_per_row as usize]);
1696        }
1697        drop(mapped);
1698        buffer.unmap();
1699        pixels
1700    }
1701
1702    fn checksum(bytes: &[u8]) -> u64 {
1703        bytes.iter().enumerate().fold(0u64, |acc, (index, value)| {
1704            acc.wrapping_add((index as u64 + 1) * u64::from(*value))
1705        })
1706    }
1707
1708    #[test]
1709    #[ignore = "requires a working GPU adapter"]
1710    fn render_volume_smoke_test() {
1711        let Some((device, queue)) = test_device() else {
1712            return;
1713        };
1714        let mut renderer = VolumeRenderer::new(&device, &queue, wgpu::TextureFormat::Rgba8Unorm);
1715        let volume = small_volume();
1716        renderer.set_volume(&volume, true);
1717        renderer
1718            .set_render_params(&VolumeRenderParams::default())
1719            .expect("render params upload");
1720
1721        let camera = Camera::new_perspective(DVec3::new(0.0, 0.0, 50.0), DVec3::ZERO, 30.0);
1722        let texture = renderer.create_render_target(64, 64);
1723        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1724        let mut encoder =
1725            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1726        renderer
1727            .render_volume(
1728                &mut encoder,
1729                &view,
1730                &camera,
1731                &VolumeRenderParams::default(),
1732                Viewport::full(64, 64),
1733            )
1734            .expect("volume render");
1735        queue.submit(std::iter::once(encoder.finish()));
1736    }
1737
1738    #[test]
1739    #[ignore = "requires a working GPU adapter"]
1740    fn render_sphere_snapshot_checksum() {
1741        let Some((device, queue)) = test_device() else {
1742            return;
1743        };
1744        let mut renderer = VolumeRenderer::new(&device, &queue, wgpu::TextureFormat::Rgba8Unorm);
1745        let volume = sphere_volume();
1746        renderer.set_volume(&volume, true);
1747        let params = VolumeRenderParams::default();
1748        renderer
1749            .set_render_params(&params)
1750            .expect("render params upload");
1751
1752        let camera = Camera::new_perspective(DVec3::new(0.0, 0.0, 60.0), DVec3::ZERO, 30.0);
1753        let texture = renderer.create_render_target(64, 64);
1754        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1755        let mut encoder =
1756            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1757        renderer
1758            .render_volume(
1759                &mut encoder,
1760                &view,
1761                &camera,
1762                &params,
1763                Viewport::full(64, 64),
1764            )
1765            .expect("volume render");
1766        queue.submit(std::iter::once(encoder.finish()));
1767
1768        let pixels = read_texture(&device, &queue, &texture, 64, 64);
1769        let image_checksum = checksum(&pixels);
1770        eprintln!("sphere checksum: {image_checksum}");
1771        assert!(image_checksum > 0, "rendered sphere should not be empty");
1772    }
1773
1774    #[test]
1775    #[ignore = "requires a working GPU adapter"]
1776    fn render_slice_and_crosshair_smoke_test() {
1777        let Some((device, queue)) = test_device() else {
1778            return;
1779        };
1780        let mut renderer = VolumeRenderer::new(&device, &queue, wgpu::TextureFormat::Rgba8Unorm);
1781        let volume = small_volume();
1782        renderer.set_volume(&volume, true);
1783        renderer
1784            .set_render_params(&VolumeRenderParams::default())
1785            .expect("render params upload");
1786
1787        let texture = renderer.create_render_target(64, 64);
1788        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1789        let mut encoder =
1790            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1791        renderer
1792            .render_slice(
1793                &mut encoder,
1794                &view,
1795                &SlicePlane::axial(0.0, 32.0),
1796                &WindowLevel::from_scalar_range(0.0, 2048.0),
1797                Viewport::full(64, 64),
1798                None,
1799            )
1800            .expect("slice render");
1801        renderer
1802            .render_crosshair(
1803                &mut encoder,
1804                &view,
1805                Viewport::full(64, 64),
1806                &CrosshairParams::default(),
1807            )
1808            .expect("crosshair render");
1809        queue.submit(std::iter::once(encoder.finish()));
1810    }
1811}