Skip to main content

volren_gpu/
renderer.rs

1//! Main volume renderer: wgpu pipeline creation and render execution.
2
3use std::sync::Arc;
4
5use bytemuck::{Pod, Zeroable};
6use glam::{DMat3, DMat4, DVec2, DVec3, DVec4};
7use half::f16;
8use volren_core::{
9    camera::Camera,
10    render_params::{BlendMode, ClipPlane, VolumeRenderParams},
11    reslice::{SlicePlane, ThickSlabMode, ThickSlabParams},
12    transfer_function::{OpacityTransferFunction, TransferFunctionLut},
13    volume::{DynVolume, VolumeInfo},
14    window_level::WindowLevel,
15};
16
17use crate::{
18    texture::GpuVolumeTexture,
19    uniforms::{blend_mode as bm, VolumeUniforms},
20};
21
22const VOLUME_SHADER_SRC: &str = concat!(
23    include_str!("shaders/common.wgsl"),
24    "\n",
25    include_str!("shaders/fullscreen_quad.wgsl"),
26    "\n",
27    include_str!("shaders/gradient.wgsl"),
28    "\n",
29    include_str!("shaders/shading.wgsl"),
30    "\n",
31    include_str!("shaders/volume_raycast.wgsl"),
32);
33const RESLICE_SHADER_SRC: &str = include_str!("shaders/reslice.wgsl");
34const CROSSHAIR_SHADER_SRC: &str = include_str!("shaders/crosshair.wgsl");
35const BLIT_SHADER_SRC: &str = include_str!("shaders/blit_rgba.wgsl");
36
37/// Rectangular sub-region of the render target, in pixels.
38#[derive(Debug, Clone, Copy)]
39pub struct Viewport {
40    /// Horizontal offset from the left edge.
41    pub x: u32,
42    /// Vertical offset from the top edge.
43    pub y: u32,
44    /// Width in pixels.
45    pub width: u32,
46    /// Height in pixels.
47    pub height: u32,
48}
49
50impl Viewport {
51    /// Create a viewport covering the full render target.
52    #[must_use]
53    pub fn full(width: u32, height: u32) -> Self {
54        Self {
55            x: 0,
56            y: 0,
57            width,
58            height,
59        }
60    }
61}
62
63/// Errors that can occur during rendering.
64#[derive(Debug, thiserror::Error)]
65pub enum RenderError {
66    /// No volume has been uploaded yet.
67    #[error("no volume data uploaded — call `set_volume()` first")]
68    NoVolume,
69    /// No transfer-function data has been uploaded yet.
70    #[error("no render parameters uploaded — call `set_render_params()` first")]
71    NoTransferFunction,
72    /// Viewport has zero area.
73    #[error("viewport has zero area")]
74    ZeroViewport,
75    /// A progressive slice update referenced a texture slice that does not exist.
76    #[error("slice index {z_index} is out of bounds for depth {depth}")]
77    SliceOutOfBounds {
78        /// Slice index requested by the caller.
79        z_index: u32,
80        /// Texture depth currently allocated.
81        depth: u32,
82    },
83    /// A progressive slice update had the wrong number of voxels.
84    #[error("slice {z_index} has {actual} voxels, expected {expected}")]
85    SliceLengthMismatch {
86        /// Slice index requested by the caller.
87        z_index: u32,
88        /// Expected voxel count for one slice.
89        expected: usize,
90        /// Actual voxel count supplied.
91        actual: usize,
92    },
93}
94
95/// Parameters for rendering crosshair overlay lines on a 2D slice viewport.
96#[derive(Debug, Clone)]
97pub struct CrosshairParams {
98    /// Normalised position in `[0, 1] × [0, 1]` on the slice.
99    pub position: [f32; 2],
100    /// Line colour for the horizontal line (RGBA, 0–1).
101    pub horizontal_color: [f32; 4],
102    /// Line colour for the vertical line (RGBA, 0–1).
103    pub vertical_color: [f32; 4],
104    /// Line thickness in pixels.
105    pub thickness: f32,
106}
107
108impl Default for CrosshairParams {
109    fn default() -> Self {
110        Self {
111            position: [0.5, 0.5],
112            horizontal_color: [1.0, 0.0, 0.0, 1.0],
113            vertical_color: [0.0, 1.0, 0.0, 1.0],
114            thickness: 1.0,
115        }
116    }
117}
118
119/// Patient orientation labels for the orientation marker.
120#[derive(Debug, Clone)]
121pub struct OrientationLabels {
122    /// Label for the right direction (+X).
123    pub right: String,
124    /// Label for the left direction (−X).
125    pub left: String,
126    /// Label for the anterior direction (+Y).
127    pub anterior: String,
128    /// Label for the posterior direction (−Y).
129    pub posterior: String,
130    /// Label for the superior direction (+Z).
131    pub superior: String,
132    /// Label for the inferior direction (−Z).
133    pub inferior: String,
134}
135
136impl Default for OrientationLabels {
137    fn default() -> Self {
138        Self {
139            right: "R".into(),
140            left: "L".into(),
141            anterior: "A".into(),
142            posterior: "P".into(),
143            superior: "S".into(),
144            inferior: "I".into(),
145        }
146    }
147}
148
149#[derive(Debug, Clone, Copy)]
150struct VolumeMetadata {
151    world_to_volume: [[f32; 4]; 4],
152    volume_to_world: [[f32; 4]; 4],
153    dimensions: [f32; 3],
154    spacing: [f32; 3],
155    scalar_range: [f32; 2],
156}
157
158impl VolumeMetadata {
159    fn from_volume(volume: &DynVolume) -> Self {
160        let (scalar_min, scalar_max) = volume.scalar_range();
161        Self::from_parts(
162            volume.dimensions(),
163            volume.spacing(),
164            volume.origin(),
165            volume.direction(),
166            (scalar_min, scalar_max),
167        )
168    }
169
170    fn from_parts(
171        dimensions: glam::UVec3,
172        spacing: DVec3,
173        origin: DVec3,
174        direction: DMat3,
175        scalar_range: (f64, f64),
176    ) -> Self {
177        let dimensions_f64 = dimensions.as_dvec3();
178        let direction = mat4_from_direction(direction);
179        let scale = DVec3::new(
180            (dimensions_f64.x - 1.0).max(1.0) * spacing.x,
181            (dimensions_f64.y - 1.0).max(1.0) * spacing.y,
182            (dimensions_f64.z - 1.0).max(1.0) * spacing.z,
183        );
184        let volume_to_world =
185            DMat4::from_translation(origin) * direction * DMat4::from_scale(scale);
186        let world_to_volume = volume_to_world.inverse();
187
188        Self {
189            world_to_volume: world_to_volume.as_mat4().to_cols_array_2d(),
190            volume_to_world: volume_to_world.as_mat4().to_cols_array_2d(),
191            dimensions: [
192                dimensions_f64.x as f32,
193                dimensions_f64.y as f32,
194                dimensions_f64.z as f32,
195            ],
196            spacing: [spacing.x as f32, spacing.y as f32, spacing.z as f32],
197            scalar_range: [scalar_range.0 as f32, scalar_range.1 as f32],
198        }
199    }
200}
201
202#[repr(C)]
203#[derive(Debug, Clone, Copy, Pod, Zeroable)]
204struct SliceUniforms {
205    world_to_volume: [[f32; 4]; 4],
206    slice_origin: [f32; 4],
207    slice_right: [f32; 4],
208    slice_up: [f32; 4],
209    slice_normal: [f32; 4],
210    slice_extent: [f32; 4],
211    window_level: [f32; 4],
212    slab_params: [u32; 4],
213}
214
215#[repr(C)]
216#[derive(Debug, Clone, Copy, Pod, Zeroable)]
217struct CrosshairUniforms {
218    position: [f32; 4],
219    horizontal_color: [f32; 4],
220    vertical_color: [f32; 4],
221    viewport: [f32; 4],
222}
223
224/// A fully GPU-resident volume renderer.
225///
226/// The renderer stores the uploaded 3D texture plus the metadata needed for
227/// raycasting and reslicing. Call [`VolumeRenderer::set_render_params`] after
228/// uploading a volume to bake the active transfer functions.
229pub struct VolumeRenderer {
230    device: Arc<wgpu::Device>,
231    queue: Arc<wgpu::Queue>,
232
233    volume_pipeline: wgpu::RenderPipeline,
234    volume_bind_group_layout: wgpu::BindGroupLayout,
235    volume_uniform_buffer: wgpu::Buffer,
236
237    slice_pipeline: wgpu::RenderPipeline,
238    slice_bind_group_layout: wgpu::BindGroupLayout,
239    slice_uniform_buffer: wgpu::Buffer,
240
241    crosshair_pipeline: wgpu::RenderPipeline,
242    crosshair_uniform_buffer: wgpu::Buffer,
243    crosshair_bind_group: wgpu::BindGroup,
244
245    blit_pipeline: wgpu::RenderPipeline,
246    blit_bind_group_layout: wgpu::BindGroupLayout,
247    blit_sampler: wgpu::Sampler,
248
249    lut_texture: wgpu::Texture,
250    lut_view: wgpu::TextureView,
251    lut_sampler: wgpu::Sampler,
252    gradient_lut_texture: wgpu::Texture,
253    gradient_lut_view: wgpu::TextureView,
254    gradient_lut_sampler: wgpu::Sampler,
255
256    volume_texture: Option<GpuVolumeTexture>,
257    volume_bind_group: Option<wgpu::BindGroup>,
258    slice_bind_group: Option<wgpu::BindGroup>,
259    volume_metadata: Option<VolumeMetadata>,
260    has_render_params: bool,
261
262    output_format: wgpu::TextureFormat,
263    viewport_size: (u32, u32),
264}
265
266impl VolumeRenderer {
267    /// Create a new renderer for the given device and output format.
268    #[must_use]
269    pub fn new(
270        device: &wgpu::Device,
271        queue: &wgpu::Queue,
272        output_format: wgpu::TextureFormat,
273    ) -> Self {
274        Self::from_arc(
275            Arc::new(device.clone()),
276            Arc::new(queue.clone()),
277            output_format,
278        )
279    }
280
281    /// Create a renderer from `Arc`-wrapped device and queue.
282    #[must_use]
283    pub fn from_arc(
284        device: Arc<wgpu::Device>,
285        queue: Arc<wgpu::Queue>,
286        output_format: wgpu::TextureFormat,
287    ) -> Self {
288        let volume_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
289            label: Some("volren_volume_shader"),
290            source: wgpu::ShaderSource::Wgsl(VOLUME_SHADER_SRC.into()),
291        });
292        let slice_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
293            label: Some("volren_reslice_shader"),
294            source: wgpu::ShaderSource::Wgsl(RESLICE_SHADER_SRC.into()),
295        });
296        let crosshair_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
297            label: Some("volren_crosshair_shader"),
298            source: wgpu::ShaderSource::Wgsl(CROSSHAIR_SHADER_SRC.into()),
299        });
300        let blit_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
301            label: Some("volren_blit_shader"),
302            source: wgpu::ShaderSource::Wgsl(BLIT_SHADER_SRC.into()),
303        });
304
305        let volume_bind_group_layout = Self::create_volume_bind_group_layout(&device);
306        let slice_bind_group_layout = Self::create_slice_bind_group_layout(&device);
307        let crosshair_bind_group_layout = Self::create_crosshair_bind_group_layout(&device);
308        let blit_bind_group_layout = Self::create_blit_bind_group_layout(&device);
309
310        let volume_pipeline = Self::create_pipeline(
311            &device,
312            &volume_shader,
313            &volume_bind_group_layout,
314            output_format,
315            Some(wgpu::BlendState::ALPHA_BLENDING),
316        );
317        let slice_pipeline = Self::create_pipeline(
318            &device,
319            &slice_shader,
320            &slice_bind_group_layout,
321            output_format,
322            Some(wgpu::BlendState::ALPHA_BLENDING),
323        );
324        let crosshair_pipeline = Self::create_pipeline(
325            &device,
326            &crosshair_shader,
327            &crosshair_bind_group_layout,
328            output_format,
329            Some(wgpu::BlendState::ALPHA_BLENDING),
330        );
331        let blit_pipeline = Self::create_pipeline(
332            &device,
333            &blit_shader,
334            &blit_bind_group_layout,
335            output_format,
336            Some(wgpu::BlendState::ALPHA_BLENDING),
337        );
338
339        let volume_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
340            label: Some("volren_volume_uniforms"),
341            size: std::mem::size_of::<VolumeUniforms>() as u64,
342            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
343            mapped_at_creation: false,
344        });
345        let slice_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
346            label: Some("volren_slice_uniforms"),
347            size: std::mem::size_of::<SliceUniforms>() as u64,
348            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
349            mapped_at_creation: false,
350        });
351        let crosshair_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
352            label: Some("volren_crosshair_uniforms"),
353            size: std::mem::size_of::<CrosshairUniforms>() as u64,
354            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
355            mapped_at_creation: false,
356        });
357
358        let crosshair_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
359            label: Some("volren_crosshair_bind_group"),
360            layout: &crosshair_bind_group_layout,
361            entries: &[wgpu::BindGroupEntry {
362                binding: 0,
363                resource: crosshair_uniform_buffer.as_entire_binding(),
364            }],
365        });
366
367        let (lut_texture, lut_view, lut_sampler) = Self::create_lut_texture(&device, 4096);
368        let (gradient_lut_texture, gradient_lut_view, gradient_lut_sampler) =
369            Self::create_lut_texture(&device, 1024);
370        let blit_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
371            label: Some("volren_blit_sampler"),
372            address_mode_u: wgpu::AddressMode::ClampToEdge,
373            address_mode_v: wgpu::AddressMode::ClampToEdge,
374            mag_filter: wgpu::FilterMode::Linear,
375            min_filter: wgpu::FilterMode::Linear,
376            ..Default::default()
377        });
378
379        Self {
380            device,
381            queue,
382            volume_pipeline,
383            volume_bind_group_layout,
384            volume_uniform_buffer,
385            slice_pipeline,
386            slice_bind_group_layout,
387            slice_uniform_buffer,
388            crosshair_pipeline,
389            crosshair_uniform_buffer,
390            crosshair_bind_group,
391            blit_pipeline,
392            blit_bind_group_layout,
393            blit_sampler,
394            lut_texture,
395            lut_view,
396            lut_sampler,
397            gradient_lut_texture,
398            gradient_lut_view,
399            gradient_lut_sampler,
400            volume_texture: None,
401            volume_bind_group: None,
402            slice_bind_group: None,
403            volume_metadata: None,
404            has_render_params: false,
405            output_format,
406            viewport_size: (0, 0),
407        }
408    }
409
410    /// The texture format this renderer outputs into.
411    #[must_use]
412    pub fn output_format(&self) -> wgpu::TextureFormat {
413        self.output_format
414    }
415
416    /// Upload (or replace) volume data as a 3D GPU texture.
417    pub fn set_volume(&mut self, volume: &DynVolume, linear_interpolation: bool) {
418        self.volume_texture = Some(GpuVolumeTexture::upload(
419            &self.device,
420            &self.queue,
421            volume,
422            linear_interpolation,
423        ));
424        self.volume_metadata = Some(VolumeMetadata::from_volume(volume));
425        self.rebuild_bind_groups();
426    }
427
428    /// Allocate an empty 3D volume texture for progressive slice uploads.
429    pub fn allocate_volume(
430        &mut self,
431        dimensions: glam::UVec3,
432        spacing: DVec3,
433        origin: DVec3,
434        direction: DMat3,
435        scalar_range: (f64, f64),
436        linear_interpolation: bool,
437    ) {
438        self.volume_texture = Some(GpuVolumeTexture::allocate_empty(
439            &self.device,
440            dimensions,
441            linear_interpolation,
442        ));
443        self.volume_metadata = Some(VolumeMetadata::from_parts(
444            dimensions,
445            spacing,
446            origin,
447            direction,
448            scalar_range,
449        ));
450        self.rebuild_bind_groups();
451    }
452
453    /// Update one signed 16-bit volume slice inside an already allocated texture.
454    pub fn update_volume_slice_i16(
455        &mut self,
456        z_index: u32,
457        pixels: &[i16],
458        scalar_range: (f64, f64),
459    ) -> Result<(), RenderError> {
460        let texture = self.volume_texture.as_ref().ok_or(RenderError::NoVolume)?;
461        if z_index >= texture.dimensions.z {
462            return Err(RenderError::SliceOutOfBounds {
463                z_index,
464                depth: texture.dimensions.z,
465            });
466        }
467        let expected = (texture.dimensions.x * texture.dimensions.y) as usize;
468        if pixels.len() != expected {
469            return Err(RenderError::SliceLengthMismatch {
470                z_index,
471                expected,
472                actual: pixels.len(),
473            });
474        }
475        texture.update_i16_slice(&self.queue, z_index, pixels);
476        if let Some(metadata) = self.volume_metadata.as_mut() {
477            metadata.scalar_range = [scalar_range.0 as f32, scalar_range.1 as f32];
478        }
479        Ok(())
480    }
481
482    /// Upload a baked transfer-function LUT to the GPU.
483    pub fn set_transfer_function(&mut self, lut: &TransferFunctionLut) {
484        let (texture, view, sampler) = Self::create_lut_texture(&self.device, lut.lut_size());
485        let f16_bytes = f32_slice_to_f16_bytes(lut.as_rgba_f32());
486        self.queue.write_texture(
487            texture.as_image_copy(),
488            &f16_bytes,
489            wgpu::TexelCopyBufferLayout {
490                offset: 0,
491                bytes_per_row: Some(lut.lut_size() * 4 * 2),
492                rows_per_image: None,
493            },
494            wgpu::Extent3d {
495                width: lut.lut_size(),
496                height: 1,
497                depth_or_array_layers: 1,
498            },
499        );
500        self.lut_texture = texture;
501        self.lut_view = view;
502        self.lut_sampler = sampler;
503        self.has_render_params = true;
504        self.rebuild_bind_groups();
505    }
506
507    /// Bake and upload transfer functions from the current render parameters.
508    ///
509    /// # Errors
510    /// Returns [`RenderError::NoVolume`] if no volume metadata has been uploaded yet.
511    pub fn set_render_params(&mut self, params: &VolumeRenderParams) -> Result<(), RenderError> {
512        let metadata = self.volume_metadata.ok_or(RenderError::NoVolume)?;
513        let lut = TransferFunctionLut::bake(
514            &params.color_tf,
515            &params.opacity_tf,
516            f64::from(metadata.scalar_range[0]),
517            f64::from(metadata.scalar_range[1]),
518            4096,
519        );
520        self.set_transfer_function(&lut);
521        let gradient_tf = params
522            .gradient_opacity_tf
523            .clone()
524            .unwrap_or_else(opaque_unit_ramp);
525        self.upload_gradient_lut(&gradient_tf);
526        Ok(())
527    }
528
529    /// Handle viewport resize.
530    pub fn resize(&mut self, width: u32, height: u32) {
531        self.viewport_size = (width, height);
532    }
533
534    /// Create an off-screen render target texture.
535    #[must_use]
536    pub fn create_render_target(&self, width: u32, height: u32) -> wgpu::Texture {
537        self.device.create_texture(&wgpu::TextureDescriptor {
538            label: Some("volren_offscreen_target"),
539            size: wgpu::Extent3d {
540                width,
541                height,
542                depth_or_array_layers: 1,
543            },
544            mip_level_count: 1,
545            sample_count: 1,
546            dimension: wgpu::TextureDimension::D2,
547            format: self.output_format,
548            usage: wgpu::TextureUsages::RENDER_ATTACHMENT
549                | wgpu::TextureUsages::TEXTURE_BINDING
550                | wgpu::TextureUsages::COPY_SRC,
551            view_formats: &[],
552        })
553    }
554
555    /// Render the volume into the given color attachment.
556    ///
557    /// The caller owns the command encoder and submits it.
558    ///
559    /// # Errors
560    /// Returns [`RenderError`] if prerequisites are missing or the viewport is invalid.
561    pub fn render_volume(
562        &self,
563        encoder: &mut wgpu::CommandEncoder,
564        target: &wgpu::TextureView,
565        camera: &Camera,
566        params: &VolumeRenderParams,
567        viewport: Viewport,
568    ) -> Result<(), RenderError> {
569        let metadata = self.volume_metadata.ok_or(RenderError::NoVolume)?;
570        let bind_group = self
571            .volume_bind_group
572            .as_ref()
573            .ok_or(RenderError::NoVolume)?;
574        validate_viewport(viewport)?;
575        if !self.has_render_params {
576            return Err(RenderError::NoTransferFunction);
577        }
578
579        let uniforms = self.build_uniforms(camera, metadata, params, viewport);
580        self.queue.write_buffer(
581            &self.volume_uniform_buffer,
582            0,
583            bytemuck::bytes_of(&uniforms),
584        );
585
586        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
587            label: Some("volren_volume_pass"),
588            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
589                view: target,
590                resolve_target: None,
591                depth_slice: None,
592                ops: wgpu::Operations {
593                    load: wgpu::LoadOp::Load,
594                    store: wgpu::StoreOp::Store,
595                },
596            })],
597            depth_stencil_attachment: None,
598            timestamp_writes: None,
599            occlusion_query_set: None,
600            multiview_mask: None,
601        });
602        pass.set_pipeline(&self.volume_pipeline);
603        pass.set_bind_group(0, bind_group, &[]);
604        pass.set_viewport(
605            viewport.x as f32,
606            viewport.y as f32,
607            viewport.width as f32,
608            viewport.height as f32,
609            0.0,
610            1.0,
611        );
612        pass.draw(0..6, 0..1);
613        Ok(())
614    }
615
616    /// Render the volume into a newly-created off-screen texture.
617    ///
618    /// # Errors
619    /// Propagates the same errors as [`Self::render_volume`].
620    pub fn render_volume_to_texture(
621        &self,
622        encoder: &mut wgpu::CommandEncoder,
623        camera: &Camera,
624        params: &VolumeRenderParams,
625        width: u32,
626        height: u32,
627    ) -> Result<wgpu::Texture, RenderError> {
628        let texture = self.create_render_target(width, height);
629        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
630        self.render_volume(
631            encoder,
632            &view,
633            camera,
634            params,
635            Viewport::full(width, height),
636        )?;
637        Ok(texture)
638    }
639
640    /// Render the volume at reduced resolution and upscale the result into `target`.
641    ///
642    /// This is intended for interactive manipulation where responsiveness matters
643    /// more than final image quality. Passing `1` disables downsampling.
644    ///
645    /// # Errors
646    /// Propagates the same errors as [`Self::render_volume`].
647    pub fn render_volume_interactive(
648        &self,
649        encoder: &mut wgpu::CommandEncoder,
650        target: &wgpu::TextureView,
651        camera: &Camera,
652        params: &VolumeRenderParams,
653        viewport: Viewport,
654        downsample_factor: u32,
655    ) -> Result<(), RenderError> {
656        validate_viewport(viewport)?;
657        let factor = downsample_factor.max(1);
658        if factor == 1 {
659            return self.render_volume(encoder, target, camera, params, viewport);
660        }
661
662        let lod_width = (viewport.width / factor).max(1);
663        let lod_height = (viewport.height / factor).max(1);
664        let texture = self.create_render_target(lod_width, lod_height);
665        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
666
667        self.render_volume(
668            encoder,
669            &view,
670            camera,
671            params,
672            Viewport::full(lod_width, lod_height),
673        )?;
674        self.blit_texture_view(encoder, target, viewport, &view);
675        Ok(())
676    }
677
678    /// Render a 2D reslice (MPR slice) into the given color attachment.
679    ///
680    /// # Errors
681    /// Returns [`RenderError::NoVolume`] when no volume has been uploaded.
682    pub fn render_slice(
683        &self,
684        encoder: &mut wgpu::CommandEncoder,
685        target: &wgpu::TextureView,
686        slice_plane: &SlicePlane,
687        window_level: &WindowLevel,
688        viewport: Viewport,
689        thick_slab: Option<&ThickSlabParams>,
690    ) -> Result<(), RenderError> {
691        let metadata = self.volume_metadata.ok_or(RenderError::NoVolume)?;
692        let bind_group = self
693            .slice_bind_group
694            .as_ref()
695            .ok_or(RenderError::NoVolume)?;
696        validate_viewport(viewport)?;
697
698        let uniforms = self.build_slice_uniforms(metadata, slice_plane, window_level, thick_slab);
699        self.queue
700            .write_buffer(&self.slice_uniform_buffer, 0, bytemuck::bytes_of(&uniforms));
701
702        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
703            label: Some("volren_slice_pass"),
704            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
705                view: target,
706                resolve_target: None,
707                depth_slice: None,
708                ops: wgpu::Operations {
709                    load: wgpu::LoadOp::Load,
710                    store: wgpu::StoreOp::Store,
711                },
712            })],
713            depth_stencil_attachment: None,
714            timestamp_writes: None,
715            occlusion_query_set: None,
716            multiview_mask: None,
717        });
718        pass.set_pipeline(&self.slice_pipeline);
719        pass.set_bind_group(0, bind_group, &[]);
720        pass.set_viewport(
721            viewport.x as f32,
722            viewport.y as f32,
723            viewport.width as f32,
724            viewport.height as f32,
725            0.0,
726            1.0,
727        );
728        pass.draw(0..6, 0..1);
729        Ok(())
730    }
731
732    /// Render a slice into a newly-created off-screen texture.
733    ///
734    /// # Errors
735    /// Propagates the same errors as [`Self::render_slice`].
736    pub fn render_slice_to_texture(
737        &self,
738        encoder: &mut wgpu::CommandEncoder,
739        slice_plane: &SlicePlane,
740        window_level: &WindowLevel,
741        width: u32,
742        height: u32,
743        thick_slab: Option<&ThickSlabParams>,
744    ) -> Result<wgpu::Texture, RenderError> {
745        let texture = self.create_render_target(width, height);
746        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
747        self.render_slice(
748            encoder,
749            &view,
750            slice_plane,
751            window_level,
752            Viewport::full(width, height),
753            thick_slab,
754        )?;
755        Ok(texture)
756    }
757
758    /// Render crosshair overlay lines on a slice viewport.
759    ///
760    /// # Errors
761    /// Returns [`RenderError::ZeroViewport`] for an empty viewport.
762    pub fn render_crosshair(
763        &self,
764        encoder: &mut wgpu::CommandEncoder,
765        target: &wgpu::TextureView,
766        viewport: Viewport,
767        crosshair: &CrosshairParams,
768    ) -> Result<(), RenderError> {
769        validate_viewport(viewport)?;
770        let uniforms = CrosshairUniforms {
771            position: [
772                crosshair.position[0],
773                crosshair.position[1],
774                crosshair.thickness,
775                0.0,
776            ],
777            horizontal_color: crosshair.horizontal_color,
778            vertical_color: crosshair.vertical_color,
779            viewport: [viewport.width as f32, viewport.height as f32, 0.0, 0.0],
780        };
781        self.queue.write_buffer(
782            &self.crosshair_uniform_buffer,
783            0,
784            bytemuck::bytes_of(&uniforms),
785        );
786
787        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
788            label: Some("volren_crosshair_pass"),
789            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
790                view: target,
791                resolve_target: None,
792                depth_slice: None,
793                ops: wgpu::Operations {
794                    load: wgpu::LoadOp::Load,
795                    store: wgpu::StoreOp::Store,
796                },
797            })],
798            depth_stencil_attachment: None,
799            timestamp_writes: None,
800            occlusion_query_set: None,
801            multiview_mask: None,
802        });
803        pass.set_pipeline(&self.crosshair_pipeline);
804        pass.set_bind_group(0, &self.crosshair_bind_group, &[]);
805        pass.set_viewport(
806            viewport.x as f32,
807            viewport.y as f32,
808            viewport.width as f32,
809            viewport.height as f32,
810            0.0,
811            1.0,
812        );
813        pass.draw(0..6, 0..1);
814        Ok(())
815    }
816
817    /// Render an orientation marker in the given viewport.
818    ///
819    /// The marker is generated on the CPU as a small RGBA image, then composited
820    /// over the target with a lightweight textured-quad pass.
821    ///
822    /// # Errors
823    /// Returns [`RenderError::ZeroViewport`] for an empty viewport.
824    pub fn render_orientation_marker(
825        &self,
826        encoder: &mut wgpu::CommandEncoder,
827        target: &wgpu::TextureView,
828        camera: &Camera,
829        viewport: Viewport,
830        labels: &OrientationLabels,
831    ) -> Result<(), RenderError> {
832        validate_viewport(viewport)?;
833        let width = viewport.width.max(1);
834        let height = viewport.height.max(1);
835        let image = build_orientation_marker_image(width, height, camera, labels);
836        self.blit_rgba8(encoder, target, viewport, width, height, &image);
837        Ok(())
838    }
839
840    fn build_uniforms(
841        &self,
842        camera: &Camera,
843        metadata: VolumeMetadata,
844        params: &VolumeRenderParams,
845        viewport: Viewport,
846    ) -> VolumeUniforms {
847        let aspect = f64::from(viewport.width) / f64::from(viewport.height.max(1));
848        let view = camera.view_matrix();
849        let proj = camera.projection_matrix(aspect);
850        let mvp = (proj * view).as_mat4();
851        let inv_mvp = (proj * view).inverse().as_mat4();
852
853        let blend_mode = match params.blend_mode {
854            BlendMode::Composite => bm::COMPOSITE,
855            BlendMode::MaximumIntensity => bm::MAXIMUM_INTENSITY,
856            BlendMode::MinimumIntensity => bm::MINIMUM_INTENSITY,
857            BlendMode::AverageIntensity => bm::AVERAGE_INTENSITY,
858            BlendMode::Additive => bm::ADDITIVE,
859            BlendMode::Isosurface { .. } => bm::ISOSURFACE,
860            _ => bm::COMPOSITE,
861        };
862        let (window_center, window_width) = params.window_level.map_or_else(
863            || {
864                let wl = WindowLevel::from_scalar_range(
865                    f64::from(metadata.scalar_range[0]),
866                    f64::from(metadata.scalar_range[1]),
867                );
868                (wl.center as f32, wl.width as f32)
869            },
870            |wl| (wl.center as f32, wl.width as f32),
871        );
872        let (shading_enabled, ambient, diffuse, specular, specular_power) =
873            if let Some(shading) = params.shading {
874                (
875                    1u32,
876                    shading.ambient,
877                    shading.diffuse,
878                    shading.specular,
879                    shading.specular_power,
880                )
881            } else {
882                (0u32, 0.0, 0.0, 0.0, 0.0)
883            };
884        let (clip_planes, num_clip_planes) = combined_clip_planes(params);
885        let iso_value = match params.blend_mode {
886            BlendMode::Isosurface { iso_value } => iso_value as f32,
887            _ => 0.0,
888        };
889        let camera_position = camera.position().as_vec3();
890
891        VolumeUniforms {
892            mvp: mvp.to_cols_array_2d(),
893            inv_mvp: inv_mvp.to_cols_array_2d(),
894            world_to_volume: metadata.world_to_volume,
895            volume_to_world: metadata.volume_to_world,
896            dimensions: [
897                metadata.dimensions[0],
898                metadata.dimensions[1],
899                metadata.dimensions[2],
900                0.0,
901            ],
902            spacing: [
903                metadata.spacing[0],
904                metadata.spacing[1],
905                metadata.spacing[2],
906                0.0,
907            ],
908            scalar_range: [
909                metadata.scalar_range[0],
910                metadata.scalar_range[1],
911                iso_value,
912                0.0,
913            ],
914            step_size: params.step_size_factor.max(1e-3),
915            opacity_correction: 1.0 / params.step_size_factor.max(1e-3),
916            blend_mode,
917            shading_enabled,
918            ambient,
919            diffuse,
920            specular,
921            specular_power,
922            light_position: [camera_position.x, camera_position.y, camera_position.z, 0.0],
923            camera_position: [camera_position.x, camera_position.y, camera_position.z, 0.0],
924            window_center,
925            window_width,
926            num_clip_planes,
927            _pad0: 0,
928            clip_planes,
929            background: params.background,
930        }
931    }
932
933    fn build_slice_uniforms(
934        &self,
935        metadata: VolumeMetadata,
936        slice_plane: &SlicePlane,
937        window_level: &WindowLevel,
938        thick_slab: Option<&ThickSlabParams>,
939    ) -> SliceUniforms {
940        let slab_mode = thick_slab.map_or(ThickSlabMode::Mip, |params| params.mode);
941        let (half_thickness, num_samples) = thick_slab.map_or((0.0f32, 1u32), |params| {
942            (params.half_thickness as f32, params.num_samples.max(1))
943        });
944
945        SliceUniforms {
946            world_to_volume: metadata.world_to_volume,
947            slice_origin: [
948                slice_plane.origin.x as f32,
949                slice_plane.origin.y as f32,
950                slice_plane.origin.z as f32,
951                0.0,
952            ],
953            slice_right: [
954                slice_plane.right.x as f32,
955                slice_plane.right.y as f32,
956                slice_plane.right.z as f32,
957                0.0,
958            ],
959            slice_up: [
960                slice_plane.up.x as f32,
961                slice_plane.up.y as f32,
962                slice_plane.up.z as f32,
963                0.0,
964            ],
965            slice_normal: [
966                slice_plane.normal().x as f32,
967                slice_plane.normal().y as f32,
968                slice_plane.normal().z as f32,
969                0.0,
970            ],
971            slice_extent: [
972                slice_plane.width as f32,
973                slice_plane.height as f32,
974                half_thickness,
975                0.0,
976            ],
977            window_level: [
978                window_level.center as f32,
979                window_level.width as f32,
980                0.0,
981                0.0,
982            ],
983            slab_params: [thick_slab_mode_code(slab_mode), num_samples, 0, 0],
984        }
985    }
986
987    fn upload_gradient_lut(&mut self, tf: &OpacityTransferFunction) {
988        let resolution = 1024u32;
989        let f32_bytes = bake_opacity_lut_bytes(tf, resolution);
990        let f32_slice: &[f32] = bytemuck::cast_slice(&f32_bytes);
991        let f16_bytes = f32_slice_to_f16_bytes(f32_slice);
992        let (texture, view, sampler) = Self::create_lut_texture(&self.device, resolution);
993        self.queue.write_texture(
994            texture.as_image_copy(),
995            &f16_bytes,
996            wgpu::TexelCopyBufferLayout {
997                offset: 0,
998                bytes_per_row: Some(resolution * 4 * 2),
999                rows_per_image: None,
1000            },
1001            wgpu::Extent3d {
1002                width: resolution,
1003                height: 1,
1004                depth_or_array_layers: 1,
1005            },
1006        );
1007        self.gradient_lut_texture = texture;
1008        self.gradient_lut_view = view;
1009        self.gradient_lut_sampler = sampler;
1010        self.rebuild_bind_groups();
1011    }
1012
1013    fn rebuild_bind_groups(&mut self) {
1014        let Some(volume_texture) = self.volume_texture.as_ref() else {
1015            return;
1016        };
1017
1018        self.volume_bind_group = Some(self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1019            label: Some("volren_volume_bind_group"),
1020            layout: &self.volume_bind_group_layout,
1021            entries: &[
1022                wgpu::BindGroupEntry {
1023                    binding: 0,
1024                    resource: self.volume_uniform_buffer.as_entire_binding(),
1025                },
1026                wgpu::BindGroupEntry {
1027                    binding: 1,
1028                    resource: wgpu::BindingResource::TextureView(&volume_texture.view),
1029                },
1030                wgpu::BindGroupEntry {
1031                    binding: 2,
1032                    resource: wgpu::BindingResource::Sampler(&volume_texture.sampler),
1033                },
1034                wgpu::BindGroupEntry {
1035                    binding: 3,
1036                    resource: wgpu::BindingResource::TextureView(&self.lut_view),
1037                },
1038                wgpu::BindGroupEntry {
1039                    binding: 4,
1040                    resource: wgpu::BindingResource::Sampler(&self.lut_sampler),
1041                },
1042                wgpu::BindGroupEntry {
1043                    binding: 5,
1044                    resource: wgpu::BindingResource::TextureView(&self.gradient_lut_view),
1045                },
1046                wgpu::BindGroupEntry {
1047                    binding: 6,
1048                    resource: wgpu::BindingResource::Sampler(&self.gradient_lut_sampler),
1049                },
1050            ],
1051        }));
1052
1053        self.slice_bind_group = Some(self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1054            label: Some("volren_slice_bind_group"),
1055            layout: &self.slice_bind_group_layout,
1056            entries: &[
1057                wgpu::BindGroupEntry {
1058                    binding: 0,
1059                    resource: self.slice_uniform_buffer.as_entire_binding(),
1060                },
1061                wgpu::BindGroupEntry {
1062                    binding: 1,
1063                    resource: wgpu::BindingResource::TextureView(&volume_texture.view),
1064                },
1065                wgpu::BindGroupEntry {
1066                    binding: 2,
1067                    resource: wgpu::BindingResource::Sampler(&volume_texture.sampler),
1068                },
1069            ],
1070        }));
1071    }
1072
1073    fn blit_rgba8(
1074        &self,
1075        encoder: &mut wgpu::CommandEncoder,
1076        target: &wgpu::TextureView,
1077        viewport: Viewport,
1078        width: u32,
1079        height: u32,
1080        rgba: &[u8],
1081    ) {
1082        let texture = self.device.create_texture(&wgpu::TextureDescriptor {
1083            label: Some("volren_blit_texture"),
1084            size: wgpu::Extent3d {
1085                width,
1086                height,
1087                depth_or_array_layers: 1,
1088            },
1089            mip_level_count: 1,
1090            sample_count: 1,
1091            dimension: wgpu::TextureDimension::D2,
1092            format: wgpu::TextureFormat::Rgba8Unorm,
1093            usage: wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::TEXTURE_BINDING,
1094            view_formats: &[],
1095        });
1096        self.queue.write_texture(
1097            texture.as_image_copy(),
1098            rgba,
1099            wgpu::TexelCopyBufferLayout {
1100                offset: 0,
1101                bytes_per_row: Some(width * 4),
1102                rows_per_image: Some(height),
1103            },
1104            wgpu::Extent3d {
1105                width,
1106                height,
1107                depth_or_array_layers: 1,
1108            },
1109        );
1110        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1111        self.blit_texture_view(encoder, target, viewport, &view);
1112    }
1113
1114    fn blit_texture_view(
1115        &self,
1116        encoder: &mut wgpu::CommandEncoder,
1117        target: &wgpu::TextureView,
1118        viewport: Viewport,
1119        source_view: &wgpu::TextureView,
1120    ) {
1121        let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1122            label: Some("volren_blit_bind_group"),
1123            layout: &self.blit_bind_group_layout,
1124            entries: &[
1125                wgpu::BindGroupEntry {
1126                    binding: 0,
1127                    resource: wgpu::BindingResource::TextureView(source_view),
1128                },
1129                wgpu::BindGroupEntry {
1130                    binding: 1,
1131                    resource: wgpu::BindingResource::Sampler(&self.blit_sampler),
1132                },
1133            ],
1134        });
1135
1136        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1137            label: Some("volren_blit_pass"),
1138            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1139                view: target,
1140                resolve_target: None,
1141                depth_slice: None,
1142                ops: wgpu::Operations {
1143                    load: wgpu::LoadOp::Load,
1144                    store: wgpu::StoreOp::Store,
1145                },
1146            })],
1147            depth_stencil_attachment: None,
1148            timestamp_writes: None,
1149            occlusion_query_set: None,
1150            multiview_mask: None,
1151        });
1152        pass.set_pipeline(&self.blit_pipeline);
1153        pass.set_bind_group(0, &bind_group, &[]);
1154        pass.set_viewport(
1155            viewport.x as f32,
1156            viewport.y as f32,
1157            viewport.width as f32,
1158            viewport.height as f32,
1159            0.0,
1160            1.0,
1161        );
1162        pass.draw(0..6, 0..1);
1163    }
1164
1165    fn create_volume_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1166        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1167            label: Some("volren_volume_bgl"),
1168            entries: &[
1169                uniform_bgl_entry(0),
1170                texture_bgl_entry(1, wgpu::TextureViewDimension::D3),
1171                sampler_bgl_entry(2),
1172                texture_bgl_entry(3, wgpu::TextureViewDimension::D1),
1173                sampler_bgl_entry(4),
1174                texture_bgl_entry(5, wgpu::TextureViewDimension::D1),
1175                sampler_bgl_entry(6),
1176            ],
1177        })
1178    }
1179
1180    fn create_slice_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1181        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1182            label: Some("volren_slice_bgl"),
1183            entries: &[
1184                uniform_bgl_entry(0),
1185                texture_bgl_entry(1, wgpu::TextureViewDimension::D3),
1186                sampler_bgl_entry(2),
1187            ],
1188        })
1189    }
1190
1191    fn create_crosshair_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1192        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1193            label: Some("volren_crosshair_bgl"),
1194            entries: &[uniform_bgl_entry(0)],
1195        })
1196    }
1197
1198    fn create_blit_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
1199        device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
1200            label: Some("volren_blit_bgl"),
1201            entries: &[texture_bgl_entry_2d(0), sampler_bgl_entry(1)],
1202        })
1203    }
1204
1205    fn create_pipeline(
1206        device: &wgpu::Device,
1207        shader: &wgpu::ShaderModule,
1208        bind_group_layout: &wgpu::BindGroupLayout,
1209        output_format: wgpu::TextureFormat,
1210        blend: Option<wgpu::BlendState>,
1211    ) -> wgpu::RenderPipeline {
1212        let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
1213            label: Some("volren_pipeline_layout"),
1214            bind_group_layouts: &[Some(bind_group_layout)],
1215            immediate_size: 0,
1216        });
1217
1218        device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
1219            label: Some("volren_pipeline"),
1220            layout: Some(&layout),
1221            vertex: wgpu::VertexState {
1222                module: shader,
1223                entry_point: Some("vs_main"),
1224                buffers: &[],
1225                compilation_options: Default::default(),
1226            },
1227            fragment: Some(wgpu::FragmentState {
1228                module: shader,
1229                entry_point: Some("fs_main"),
1230                targets: &[Some(wgpu::ColorTargetState {
1231                    format: output_format,
1232                    blend,
1233                    write_mask: wgpu::ColorWrites::ALL,
1234                })],
1235                compilation_options: Default::default(),
1236            }),
1237            primitive: wgpu::PrimitiveState {
1238                topology: wgpu::PrimitiveTopology::TriangleList,
1239                ..Default::default()
1240            },
1241            depth_stencil: None,
1242            multisample: wgpu::MultisampleState::default(),
1243            multiview_mask: None,
1244            cache: None,
1245        })
1246    }
1247
1248    fn create_lut_texture(
1249        device: &wgpu::Device,
1250        size: u32,
1251    ) -> (wgpu::Texture, wgpu::TextureView, wgpu::Sampler) {
1252        let texture = device.create_texture(&wgpu::TextureDescriptor {
1253            label: Some("volren_lut"),
1254            size: wgpu::Extent3d {
1255                width: size,
1256                height: 1,
1257                depth_or_array_layers: 1,
1258            },
1259            mip_level_count: 1,
1260            sample_count: 1,
1261            dimension: wgpu::TextureDimension::D1,
1262            format: wgpu::TextureFormat::Rgba16Float,
1263            usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
1264            view_formats: &[],
1265        });
1266        let view = texture.create_view(&wgpu::TextureViewDescriptor {
1267            dimension: Some(wgpu::TextureViewDimension::D1),
1268            ..Default::default()
1269        });
1270        let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
1271            label: Some("volren_lut_sampler"),
1272            address_mode_u: wgpu::AddressMode::ClampToEdge,
1273            mag_filter: wgpu::FilterMode::Linear,
1274            min_filter: wgpu::FilterMode::Linear,
1275            ..Default::default()
1276        });
1277        (texture, view, sampler)
1278    }
1279}
1280
1281fn validate_viewport(viewport: Viewport) -> Result<(), RenderError> {
1282    if viewport.width == 0 || viewport.height == 0 {
1283        Err(RenderError::ZeroViewport)
1284    } else {
1285        Ok(())
1286    }
1287}
1288
1289fn opaque_unit_ramp() -> OpacityTransferFunction {
1290    let mut tf = OpacityTransferFunction::new();
1291    tf.add_point(0.0, 1.0);
1292    tf.add_point(1.0, 1.0);
1293    tf
1294}
1295
1296fn bake_opacity_lut_bytes(tf: &OpacityTransferFunction, resolution: u32) -> Vec<u8> {
1297    let mut rgba = Vec::with_capacity((resolution * 4) as usize);
1298    for i in 0..resolution {
1299        let t = if resolution <= 1 {
1300            0.0
1301        } else {
1302            f64::from(i) / f64::from(resolution - 1)
1303        };
1304        let opacity = tf.evaluate(t) as f32;
1305        rgba.extend_from_slice(&[opacity, opacity, opacity, 1.0]);
1306    }
1307    bytemuck::cast_slice(&rgba).to_vec()
1308}
1309
1310/// Convert an f32 slice to packed f16 (little-endian) bytes for `Rgba16Float` upload.
1311fn f32_slice_to_f16_bytes(data: &[f32]) -> Vec<u8> {
1312    let mut bytes = Vec::with_capacity(data.len() * 2);
1313    for &val in data {
1314        bytes.extend_from_slice(&f16::from_f32(val).to_le_bytes());
1315    }
1316    bytes
1317}
1318
1319fn combined_clip_planes(params: &VolumeRenderParams) -> ([[f32; 4]; 6], u32) {
1320    let mut planes = params.clip_planes.clone();
1321    if let Some(bounds) = params.cropping_bounds {
1322        planes.extend([
1323            ClipPlane::from_point_and_normal(DVec3::new(bounds.min.x, 0.0, 0.0), DVec3::X),
1324            ClipPlane::from_point_and_normal(DVec3::new(bounds.max.x, 0.0, 0.0), DVec3::NEG_X),
1325            ClipPlane::from_point_and_normal(DVec3::new(0.0, bounds.min.y, 0.0), DVec3::Y),
1326            ClipPlane::from_point_and_normal(DVec3::new(0.0, bounds.max.y, 0.0), DVec3::NEG_Y),
1327            ClipPlane::from_point_and_normal(DVec3::new(0.0, 0.0, bounds.min.z), DVec3::Z),
1328            ClipPlane::from_point_and_normal(DVec3::new(0.0, 0.0, bounds.max.z), DVec3::NEG_Z),
1329        ]);
1330    }
1331
1332    let mut packed = [[0.0f32; 4]; 6];
1333    for (index, plane) in planes.iter().take(6).enumerate() {
1334        let eq = plane.equation;
1335        packed[index] = [eq.x as f32, eq.y as f32, eq.z as f32, eq.w as f32];
1336    }
1337    (packed, planes.len().min(6) as u32)
1338}
1339
1340fn thick_slab_mode_code(mode: ThickSlabMode) -> u32 {
1341    match mode {
1342        ThickSlabMode::Mip => 0,
1343        ThickSlabMode::MinIp => 1,
1344        ThickSlabMode::Mean => 2,
1345        _ => 0,
1346    }
1347}
1348
1349fn mat4_from_direction(direction: DMat3) -> DMat4 {
1350    DMat4::from_cols(
1351        direction.x_axis.extend(0.0),
1352        direction.y_axis.extend(0.0),
1353        direction.z_axis.extend(0.0),
1354        DVec4::W,
1355    )
1356}
1357
1358fn uniform_bgl_entry(binding: u32) -> wgpu::BindGroupLayoutEntry {
1359    wgpu::BindGroupLayoutEntry {
1360        binding,
1361        visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
1362        ty: wgpu::BindingType::Buffer {
1363            ty: wgpu::BufferBindingType::Uniform,
1364            has_dynamic_offset: false,
1365            min_binding_size: None,
1366        },
1367        count: None,
1368    }
1369}
1370
1371fn texture_bgl_entry(
1372    binding: u32,
1373    view_dimension: wgpu::TextureViewDimension,
1374) -> wgpu::BindGroupLayoutEntry {
1375    wgpu::BindGroupLayoutEntry {
1376        binding,
1377        visibility: wgpu::ShaderStages::FRAGMENT,
1378        ty: wgpu::BindingType::Texture {
1379            sample_type: wgpu::TextureSampleType::Float { filterable: true },
1380            view_dimension,
1381            multisampled: false,
1382        },
1383        count: None,
1384    }
1385}
1386
1387fn texture_bgl_entry_2d(binding: u32) -> wgpu::BindGroupLayoutEntry {
1388    texture_bgl_entry(binding, wgpu::TextureViewDimension::D2)
1389}
1390
1391fn sampler_bgl_entry(binding: u32) -> wgpu::BindGroupLayoutEntry {
1392    wgpu::BindGroupLayoutEntry {
1393        binding,
1394        visibility: wgpu::ShaderStages::FRAGMENT,
1395        ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
1396        count: None,
1397    }
1398}
1399
1400fn build_orientation_marker_image(
1401    width: u32,
1402    height: u32,
1403    camera: &Camera,
1404    labels: &OrientationLabels,
1405) -> Vec<u8> {
1406    let mut pixels = vec![0u8; (width as usize) * (height as usize) * 4];
1407    let center = DVec2::new(f64::from(width) * 0.5, f64::from(height) * 0.5);
1408    let radius = f64::from(width.min(height)) * 0.28;
1409    let view = camera.view_matrix();
1410
1411    let axes = [
1412        (DVec3::X, [255, 80, 80, 255], labels.right.as_str()),
1413        (-DVec3::X, [128, 40, 40, 220], labels.left.as_str()),
1414        (DVec3::Y, [80, 255, 80, 255], labels.anterior.as_str()),
1415        (-DVec3::Y, [40, 128, 40, 220], labels.posterior.as_str()),
1416        (DVec3::Z, [80, 160, 255, 255], labels.superior.as_str()),
1417        (-DVec3::Z, [40, 80, 128, 220], labels.inferior.as_str()),
1418    ];
1419
1420    for (axis, color, label) in axes {
1421        let projected = project_axis(view, axis);
1422        if projected.length_squared() < 1e-8 {
1423            continue;
1424        }
1425        let end = center + projected.normalize() * radius;
1426        draw_line(&mut pixels, width, height, center, end, color);
1427        draw_text(
1428            &mut pixels,
1429            width,
1430            height,
1431            end + projected.normalize() * 6.0,
1432            label,
1433            color,
1434        );
1435    }
1436
1437    draw_disc(
1438        &mut pixels,
1439        width,
1440        height,
1441        center,
1442        2.5,
1443        [255, 255, 255, 255],
1444    );
1445    pixels
1446}
1447
1448fn project_axis(view: DMat4, axis: DVec3) -> DVec2 {
1449    let camera_space = view.transform_vector3(axis);
1450    DVec2::new(camera_space.x, -camera_space.y)
1451}
1452
1453fn draw_line(pixels: &mut [u8], width: u32, height: u32, start: DVec2, end: DVec2, color: [u8; 4]) {
1454    let delta = end - start;
1455    let steps = delta.length().ceil().max(1.0) as u32;
1456    for step in 0..=steps {
1457        let t = f64::from(step) / f64::from(steps.max(1));
1458        let point = start + delta * t;
1459        alpha_plot(
1460            pixels,
1461            width,
1462            height,
1463            point.x.round() as i32,
1464            point.y.round() as i32,
1465            color,
1466        );
1467    }
1468}
1469
1470fn draw_disc(
1471    pixels: &mut [u8],
1472    width: u32,
1473    height: u32,
1474    center: DVec2,
1475    radius: f64,
1476    color: [u8; 4],
1477) {
1478    let min_x = (center.x - radius).floor() as i32;
1479    let max_x = (center.x + radius).ceil() as i32;
1480    let min_y = (center.y - radius).floor() as i32;
1481    let max_y = (center.y + radius).ceil() as i32;
1482
1483    for y in min_y..=max_y {
1484        for x in min_x..=max_x {
1485            let dx = f64::from(x) - center.x;
1486            let dy = f64::from(y) - center.y;
1487            if dx * dx + dy * dy <= radius * radius {
1488                alpha_plot(pixels, width, height, x, y, color);
1489            }
1490        }
1491    }
1492}
1493
1494fn draw_text(
1495    pixels: &mut [u8],
1496    width: u32,
1497    height: u32,
1498    position: DVec2,
1499    text: &str,
1500    color: [u8; 4],
1501) {
1502    let mut cursor_x = position.x.round() as i32;
1503    let cursor_y = position.y.round() as i32;
1504    for ch in text.chars() {
1505        draw_char(pixels, width, height, cursor_x, cursor_y, ch, color);
1506        cursor_x += 6;
1507    }
1508}
1509
1510fn draw_char(pixels: &mut [u8], width: u32, height: u32, x: i32, y: i32, ch: char, color: [u8; 4]) {
1511    let glyph = glyph_rows(ch);
1512    for (row_index, row_bits) in glyph.iter().enumerate() {
1513        for col in 0..5 {
1514            if (row_bits >> (4 - col)) & 1 == 1 {
1515                alpha_plot(pixels, width, height, x + col, y + row_index as i32, color);
1516            }
1517        }
1518    }
1519}
1520
1521fn alpha_plot(pixels: &mut [u8], width: u32, height: u32, x: i32, y: i32, color: [u8; 4]) {
1522    if x < 0 || y < 0 || x >= width as i32 || y >= height as i32 {
1523        return;
1524    }
1525    let index = ((y as u32 * width + x as u32) * 4) as usize;
1526    let src_a = f32::from(color[3]) / 255.0;
1527    let dst_a = f32::from(pixels[index + 3]) / 255.0;
1528    let out_a = src_a + dst_a * (1.0 - src_a);
1529    let blend = |src: u8, dst: u8| -> u8 {
1530        if out_a <= f32::EPSILON {
1531            0
1532        } else {
1533            (((f32::from(src) * src_a) + (f32::from(dst) * dst_a * (1.0 - src_a))) / out_a)
1534                .round()
1535                .clamp(0.0, 255.0) as u8
1536        }
1537    };
1538
1539    pixels[index] = blend(color[0], pixels[index]);
1540    pixels[index + 1] = blend(color[1], pixels[index + 1]);
1541    pixels[index + 2] = blend(color[2], pixels[index + 2]);
1542    pixels[index + 3] = (out_a * 255.0).round().clamp(0.0, 255.0) as u8;
1543}
1544
1545fn glyph_rows(ch: char) -> [u8; 7] {
1546    match ch.to_ascii_uppercase() {
1547        'A' => [
1548            0b01110, 0b10001, 0b10001, 0b11111, 0b10001, 0b10001, 0b10001,
1549        ],
1550        'I' => [
1551            0b11111, 0b00100, 0b00100, 0b00100, 0b00100, 0b00100, 0b11111,
1552        ],
1553        'L' => [
1554            0b10000, 0b10000, 0b10000, 0b10000, 0b10000, 0b10000, 0b11111,
1555        ],
1556        'P' => [
1557            0b11110, 0b10001, 0b10001, 0b11110, 0b10000, 0b10000, 0b10000,
1558        ],
1559        'R' => [
1560            0b11110, 0b10001, 0b10001, 0b11110, 0b10100, 0b10010, 0b10001,
1561        ],
1562        'S' => [
1563            0b01111, 0b10000, 0b10000, 0b01110, 0b00001, 0b00001, 0b11110,
1564        ],
1565        ' ' => [0, 0, 0, 0, 0, 0, 0],
1566        _ => [
1567            0b11111, 0b00001, 0b00010, 0b00100, 0b00100, 0b00000, 0b00100,
1568        ],
1569    }
1570}
1571
1572#[cfg(test)]
1573mod tests {
1574    use super::*;
1575
1576    #[test]
1577    fn glyph_table_contains_expected_rows() {
1578        assert_eq!(glyph_rows('R')[0], 0b11110);
1579        assert_eq!(glyph_rows('I')[6], 0b11111);
1580    }
1581
1582    #[test]
1583    fn thick_slab_mode_codes_are_stable() {
1584        assert_eq!(thick_slab_mode_code(ThickSlabMode::Mip), 0);
1585        assert_eq!(thick_slab_mode_code(ThickSlabMode::MinIp), 1);
1586        assert_eq!(thick_slab_mode_code(ThickSlabMode::Mean), 2);
1587    }
1588}
1589
1590#[cfg(all(test, feature = "snapshot-tests"))]
1591mod gpu_smoke_tests {
1592    use super::*;
1593    use std::sync::mpsc;
1594
1595    use glam::{DMat3, DVec3, UVec3};
1596    use volren_core::{Volume, VolumeRenderParams};
1597
1598    fn test_device() -> Option<(wgpu::Device, wgpu::Queue)> {
1599        pollster::block_on(async {
1600            let instance = wgpu::Instance::default();
1601            let adapter = instance
1602                .request_adapter(&wgpu::RequestAdapterOptions {
1603                    power_preference: wgpu::PowerPreference::LowPower,
1604                    compatible_surface: None,
1605                    force_fallback_adapter: false,
1606                })
1607                .await?;
1608            adapter
1609                .request_device(&wgpu::DeviceDescriptor::default(), None)
1610                .await
1611                .ok()
1612        })
1613    }
1614
1615    fn small_volume() -> DynVolume {
1616        let mut data = vec![0u16; 16 * 16 * 16];
1617        data[8 + 8 * 16 + 8 * 16 * 16] = 2048;
1618        Volume::from_data(
1619            data,
1620            UVec3::new(16, 16, 16),
1621            DVec3::ONE,
1622            DVec3::ZERO,
1623            DMat3::IDENTITY,
1624            1,
1625        )
1626        .expect("valid test volume")
1627        .into()
1628    }
1629
1630    fn sphere_volume() -> DynVolume {
1631        let dims = UVec3::new(32, 32, 32);
1632        let center = DVec3::new(15.5, 15.5, 15.5);
1633        let radius = 9.0;
1634        let mut data = vec![0u16; (dims.x * dims.y * dims.z) as usize];
1635
1636        for z in 0..dims.z {
1637            for y in 0..dims.y {
1638                for x in 0..dims.x {
1639                    let index = (z * dims.x * dims.y + y * dims.x + x) as usize;
1640                    let point = DVec3::new(f64::from(x), f64::from(y), f64::from(z));
1641                    data[index] = if (point - center).length() <= radius {
1642                        2048
1643                    } else {
1644                        0
1645                    };
1646                }
1647            }
1648        }
1649
1650        Volume::from_data(data, dims, DVec3::ONE, DVec3::ZERO, DMat3::IDENTITY, 1)
1651            .expect("valid sphere volume")
1652            .into()
1653    }
1654
1655    fn read_texture(
1656        device: &wgpu::Device,
1657        queue: &wgpu::Queue,
1658        texture: &wgpu::Texture,
1659        width: u32,
1660        height: u32,
1661    ) -> Vec<u8> {
1662        let unpadded_bytes_per_row = width * 4;
1663        let padded_bytes_per_row = unpadded_bytes_per_row.div_ceil(256) * 256;
1664        let buffer_size = u64::from(padded_bytes_per_row) * u64::from(height);
1665        let buffer = device.create_buffer(&wgpu::BufferDescriptor {
1666            label: Some("volren_test_readback"),
1667            size: buffer_size,
1668            usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
1669            mapped_at_creation: false,
1670        });
1671
1672        let mut encoder =
1673            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1674        encoder.copy_texture_to_buffer(
1675            texture.as_image_copy(),
1676            wgpu::TexelCopyBufferInfo {
1677                buffer: &buffer,
1678                layout: wgpu::TexelCopyBufferLayout {
1679                    offset: 0,
1680                    bytes_per_row: Some(padded_bytes_per_row),
1681                    rows_per_image: Some(height),
1682                },
1683            },
1684            wgpu::Extent3d {
1685                width,
1686                height,
1687                depth_or_array_layers: 1,
1688            },
1689        );
1690        queue.submit(std::iter::once(encoder.finish()));
1691
1692        let (sender, receiver) = mpsc::channel();
1693        buffer
1694            .slice(..)
1695            .map_async(wgpu::MapMode::Read, move |result| {
1696                let _ = sender.send(result);
1697            });
1698        let _ = device.poll(wgpu::MaintainBase::Wait);
1699        receiver.recv().expect("map callback").expect("map success");
1700
1701        let mapped = buffer.slice(..).get_mapped_range();
1702        let mut pixels = vec![0u8; (unpadded_bytes_per_row * height) as usize];
1703        for row in 0..height as usize {
1704            let src_offset = row * padded_bytes_per_row as usize;
1705            let dst_offset = row * unpadded_bytes_per_row as usize;
1706            pixels[dst_offset..dst_offset + unpadded_bytes_per_row as usize]
1707                .copy_from_slice(&mapped[src_offset..src_offset + unpadded_bytes_per_row as usize]);
1708        }
1709        drop(mapped);
1710        buffer.unmap();
1711        pixels
1712    }
1713
1714    fn checksum(bytes: &[u8]) -> u64 {
1715        bytes.iter().enumerate().fold(0u64, |acc, (index, value)| {
1716            acc.wrapping_add((index as u64 + 1) * u64::from(*value))
1717        })
1718    }
1719
1720    #[test]
1721    #[ignore = "requires a working GPU adapter"]
1722    fn render_volume_smoke_test() {
1723        let Some((device, queue)) = test_device() else {
1724            return;
1725        };
1726        let mut renderer = VolumeRenderer::new(&device, &queue, wgpu::TextureFormat::Rgba8Unorm);
1727        let volume = small_volume();
1728        renderer.set_volume(&volume, true);
1729        renderer
1730            .set_render_params(&VolumeRenderParams::default())
1731            .expect("render params upload");
1732
1733        let camera = Camera::new_perspective(DVec3::new(0.0, 0.0, 50.0), DVec3::ZERO, 30.0);
1734        let texture = renderer.create_render_target(64, 64);
1735        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1736        let mut encoder =
1737            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1738        renderer
1739            .render_volume(
1740                &mut encoder,
1741                &view,
1742                &camera,
1743                &VolumeRenderParams::default(),
1744                Viewport::full(64, 64),
1745            )
1746            .expect("volume render");
1747        queue.submit(std::iter::once(encoder.finish()));
1748    }
1749
1750    #[test]
1751    #[ignore = "requires a working GPU adapter"]
1752    fn render_sphere_snapshot_checksum() {
1753        let Some((device, queue)) = test_device() else {
1754            return;
1755        };
1756        let mut renderer = VolumeRenderer::new(&device, &queue, wgpu::TextureFormat::Rgba8Unorm);
1757        let volume = sphere_volume();
1758        renderer.set_volume(&volume, true);
1759        let params = VolumeRenderParams::default();
1760        renderer
1761            .set_render_params(&params)
1762            .expect("render params upload");
1763
1764        let camera = Camera::new_perspective(DVec3::new(0.0, 0.0, 60.0), DVec3::ZERO, 30.0);
1765        let texture = renderer.create_render_target(64, 64);
1766        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1767        let mut encoder =
1768            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1769        renderer
1770            .render_volume(
1771                &mut encoder,
1772                &view,
1773                &camera,
1774                &params,
1775                Viewport::full(64, 64),
1776            )
1777            .expect("volume render");
1778        queue.submit(std::iter::once(encoder.finish()));
1779
1780        let pixels = read_texture(&device, &queue, &texture, 64, 64);
1781        let image_checksum = checksum(&pixels);
1782        eprintln!("sphere checksum: {image_checksum}");
1783        assert!(image_checksum > 0, "rendered sphere should not be empty");
1784    }
1785
1786    #[test]
1787    #[ignore = "requires a working GPU adapter"]
1788    fn render_slice_and_crosshair_smoke_test() {
1789        let Some((device, queue)) = test_device() else {
1790            return;
1791        };
1792        let mut renderer = VolumeRenderer::new(&device, &queue, wgpu::TextureFormat::Rgba8Unorm);
1793        let volume = small_volume();
1794        renderer.set_volume(&volume, true);
1795        renderer
1796            .set_render_params(&VolumeRenderParams::default())
1797            .expect("render params upload");
1798
1799        let texture = renderer.create_render_target(64, 64);
1800        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
1801        let mut encoder =
1802            device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
1803        renderer
1804            .render_slice(
1805                &mut encoder,
1806                &view,
1807                &SlicePlane::axial(0.0, 32.0),
1808                &WindowLevel::from_scalar_range(0.0, 2048.0),
1809                Viewport::full(64, 64),
1810                None,
1811            )
1812            .expect("slice render");
1813        renderer
1814            .render_crosshair(
1815                &mut encoder,
1816                &view,
1817                Viewport::full(64, 64),
1818                &CrosshairParams::default(),
1819            )
1820            .expect("crosshair render");
1821        queue.submit(std::iter::once(encoder.finish()));
1822    }
1823}