dicom_pixeldata/
lib.rs

1#![allow(clippy::derive_partial_eq_without_eq)]
2//! This crate contains the DICOM pixel data handlers and is
3//! responsible for decoding various forms of native and compressed pixel data,
4//! such as JPEG lossless,
5//! and convert it into more usable data structures.
6//!
7//! Once the pixel data is decoded,
8//! the decoded data can be converted to:
9//! - a vector of flat pixel data values;
10//! - a [multi-dimensional array](ndarray::Array), using [`ndarray`];
11//! - or a [dynamic image object](image::DynamicImage), using [`image`].
12//!
13//! This conversion includes
14//! eventual Modality and value of interest (VOI) transformations,
15//! either for further processing or presentation.
16//!
17//! # Encoding support
18//!
19//! The pixel data encodings supported by `dicom-pixeldata`
20//! are backed by the [`dicom-transfer-syntax-registry`][ts-registry] crate.
21//! By default, this crate will consider this set of image encoding implementations written in pure Rust:
22//!
23//! - `jpeg` for JPEG lossy and lossless encodings via `jpeg-decoder` and `jpeg-encoder`;
24//! - `rle` for RLE compressed pixel data;
25//! - `deflate` for deflated data set compression via `flate2`.
26//!
27//! See the [`dicom-transfer-syntax-registry` documentation][ts-registry]
28//! for an extended list of supported encodings and more details.
29//!
30//! Alternatively, this library has an integration with [GDCM bindings],
31//! which serves as a different backend.
32//! This allows for decoding pixel data
33//! in transfer syntaxes which are only supported by GDCM.
34//! This integration is behind the Cargo feature "gdcm",
35//! which requires CMake and a C++ compiler.
36//!
37//! [ts-registry]: https://docs.rs/dicom-transfer-syntax-registry
38//! [GDCM bindings]: https://crates.io/crates/gdcm-rs
39//!
40//! ```toml
41//! dicom-pixeldata = { version = "0.8", features = ["gdcm"] }
42//! ```
43//!
44//! # Usage
45//!
46//!
47//! # WebAssembly support
48//!
49//! This library works in WebAssembly with the following two measures:
50//!  - Ensure that the "gdcm" feature is disabled.
51//!    Some Cargo feature referring to encodings
52//!    which depend on bindings to C or C++ libraries
53//!    might also need to be disabled.
54//!  - And either set up [`wasm-bindgen-rayon`][1]
55//!    or disable the `rayon` feature.
56//!
57//! [1]: https://crates.io/crates/wasm-bindgen-rayon
58//!
59//! # Examples
60//!
61//! To convert a DICOM object into a dynamic image
62//! (requires the `image` feature):
63//! ```no_run
64//! # use std::error::Error;
65//! use dicom_object::open_file;
66//! use dicom_pixeldata::PixelDecoder;
67//! # #[cfg(feature = "image")]
68//! # fn main() -> Result<(), Box<dyn Error>> {
69//! let obj = open_file("dicom.dcm")?;
70//! let image = obj.decode_pixel_data()?;
71//! let dynamic_image = image.to_dynamic_image(0)?;
72//! dynamic_image.save("out.png")?;
73//! # Ok(())
74//! # }
75//! # #[cfg(not(feature = "image"))]
76//! # fn main() {}
77//! ```
78//!
79//! To convert a DICOM object into an ndarray
80//! (requires the `ndarray` feature):
81//! ```no_run
82//! # use std::error::Error;
83//! use dicom_object::open_file;
84//! use dicom_pixeldata::PixelDecoder;
85//! # #[cfg(feature = "ndarray")]
86//! use ndarray::s;
87//! # #[cfg(feature = "ndarray")]
88//! # fn main() -> Result<(), Box<dyn Error>> {
89//! let obj = open_file("rgb_dicom.dcm")?;
90//! let pixel_data = obj.decode_pixel_data()?;
91//! let ndarray = pixel_data.to_ndarray::<u16>()?;
92//! let red_values = ndarray.slice(s![.., .., .., 0]);
93//! # Ok(())
94//! # }
95//! # #[cfg(not(feature = "ndarray"))]
96//! # fn main() {}
97//! ```
98//!
99//! In order to parameterize the conversion,
100//! pass a conversion options value to the `_with_options` variant methods.
101//!
102//! ```no_run
103//! # use std::error::Error;
104//! use dicom_object::open_file;
105//! use dicom_pixeldata::{ConvertOptions, PixelDecoder, VoiLutOption};
106//! # #[cfg(feature = "image")]
107//! # fn main() -> Result<(), Box<dyn Error>> {
108//! let obj = open_file("dicom.dcm")?;
109//! let image = obj.decode_pixel_data()?;
110//! let options = ConvertOptions::new()
111//!     .with_voi_lut(VoiLutOption::Normalize)
112//!     .force_8bit();
113//! let dynamic_image = image.to_dynamic_image_with_options(0, &options)?;
114//! # Ok(())
115//! # }
116//! # #[cfg(not(feature = "image"))]
117//! # fn main() {}
118//! ```
119//!
120//! See [`ConvertOptions`] for the options available,
121//! including the default behavior for each method.
122//!
123
124use attribute::VoiLut;
125use byteorder::{ByteOrder, NativeEndian};
126#[cfg(not(feature = "gdcm"))]
127use dicom_core::{DataDictionary, DicomValue};
128use dicom_encoding::adapters::DecodeError;
129#[cfg(not(feature = "gdcm"))]
130use dicom_encoding::transfer_syntax::TransferSyntaxIndex;
131#[cfg(not(feature = "gdcm"))]
132use dicom_encoding::Codec;
133#[cfg(not(feature = "gdcm"))]
134use dicom_object::{FileDicomObject, InMemDicomObject};
135#[cfg(not(feature = "gdcm"))]
136use dicom_transfer_syntax_registry::TransferSyntaxRegistry;
137#[cfg(feature = "image")]
138use image::{DynamicImage, ImageBuffer, Luma, Rgb};
139#[cfg(feature = "ndarray")]
140use ndarray::{Array, Ix3, Ix4};
141use num_traits::NumCast;
142#[cfg(feature = "rayon")]
143use rayon::iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator};
144#[cfg(all(feature = "rayon", feature = "image"))]
145use rayon::slice::ParallelSliceMut;
146#[cfg(not(feature = "gdcm"))]
147use snafu::ensure;
148#[cfg(any(not(feature = "gdcm"), feature = "image"))]
149use snafu::OptionExt;
150use snafu::{Backtrace, ResultExt, Snafu};
151use std::borrow::Cow;
152#[cfg(not(feature = "gdcm"))]
153use std::iter::zip;
154
155#[cfg(feature = "image")]
156pub use image;
157#[cfg(feature = "ndarray")]
158pub use ndarray;
159
160mod attribute;
161mod lut;
162mod transcode;
163
164pub mod encapsulation;
165pub(crate) mod transform;
166
167// re-exports
168pub use attribute::{
169    AttributeName, PhotometricInterpretation, PixelRepresentation, PlanarConfiguration,
170};
171pub use lut::{CreateLutError, Lut};
172pub use transcode::{Error as TranscodeError, Result as TranscodeResult, Transcode};
173pub use transform::{Rescale, VoiLutFunction, WindowLevel, WindowLevelTransform};
174
175#[cfg(feature = "gdcm")]
176mod gdcm;
177
178/// Error type for most pixel data related operations.
179#[derive(Debug, Snafu)]
180pub struct Error(InnerError);
181
182/// Inner error type
183#[derive(Debug, Snafu)]
184#[non_exhaustive]
185enum InnerError {
186    #[snafu(transparent)]
187    GetAttribute {
188        #[snafu(backtrace)]
189        source: attribute::GetAttributeError,
190    },
191
192    #[snafu(display("PixelData attribute is not a primitive value or pixel sequence"))]
193    InvalidPixelData { backtrace: Backtrace },
194
195    #[snafu(display("Invalid BitsAllocated, must be 1, 8 or 16"))]
196    InvalidBitsAllocated { backtrace: Backtrace },
197
198    #[cfg(any(feature = "image", feature = "gdcm"))]
199    #[snafu(display("Unsupported PhotometricInterpretation `{pi}`"))]
200    UnsupportedPhotometricInterpretation {
201        pi: PhotometricInterpretation,
202        backtrace: Backtrace,
203    },
204
205    #[cfg(feature = "image")]
206    #[snafu(display("Unsupported SamplesPerPixel `{spp}`"))]
207    UnsupportedSamplesPerPixel { spp: u16, backtrace: Backtrace },
208
209    #[snafu(display("Unsupported {name} `{value}`"))]
210    UnsupportedOther {
211        name: &'static str,
212        value: String,
213        backtrace: Backtrace,
214    },
215
216    #[snafu(display("Unknown transfer syntax `{ts_uid}`"))]
217    UnknownTransferSyntax {
218        ts_uid: String,
219        backtrace: Backtrace,
220    },
221
222    #[snafu(display("Unsupported TransferSyntax `{ts}`"))]
223    UnsupportedTransferSyntax { ts: String, backtrace: Backtrace },
224
225    #[cfg(feature = "image")]
226    #[snafu(display("Invalid buffer when constructing ImageBuffer"))]
227    InvalidImageBuffer { backtrace: Backtrace },
228
229    #[cfg(feature = "ndarray")]
230    #[snafu(display("Invalid shape for ndarray"))]
231    InvalidShape {
232        source: ndarray::ShapeError,
233        backtrace: Backtrace,
234    },
235
236    /// Could not create LUT for target data type
237    CreateLut {
238        source: lut::CreateLutError,
239        backtrace: Backtrace,
240    },
241
242    #[snafu(display("Invalid data type for ndarray element"))]
243    InvalidDataType { backtrace: Backtrace },
244
245    #[snafu(display("Could not decode pixel data"))]
246    DecodePixelData { source: DecodeError },
247
248    #[snafu(display("Frame #{frame_number} is out of range"))]
249    FrameOutOfRange {
250        frame_number: u32,
251        backtrace: Backtrace,
252    },
253    #[snafu(display("Value multiplicity of VOI LUT Function must match the number of frames. Expected `{nr_frames:?}`, found `{vm:?}`"))]
254    LengthMismatchVoiLutFunction {
255        vm: u32,
256        nr_frames: u32,
257        backtrace: Backtrace,
258    },
259    #[snafu(display("Value multiplicity of Rescale Slope/Intercept must match. Found `{slope_vm:?}` (slope), `{intercept_vm:?}` (intercept)"))]
260    LengthMismatchRescale {
261        intercept_vm: u32,
262        slope_vm: u32,
263        backtrace: Backtrace,
264    },
265    #[snafu(display("Value multiplicity of Window Center/Width must match. Found `{wc_vm:?}` (center), `{ww_vm:?}` (width)"))]
266    LengthMismatchWindowLevel {
267        wc_vm: u32,
268        ww_vm: u32,
269        backtrace: Backtrace,
270    },
271    #[snafu(display("Value multiplicity of VOI LUT must match the number of frames. Expected `{nr_frames:?}`, found `{vm:?}`"))]
272    LengthMismatchVoiLut {
273        vm: u32,
274        nr_frames: u32,
275        backtrace: Backtrace,
276    },
277}
278
279pub type Result<T, E = Error> = std::result::Result<T, E>;
280
281impl From<attribute::GetAttributeError> for crate::Error {
282    fn from(source: attribute::GetAttributeError) -> Self {
283        Error(crate::InnerError::GetAttribute { source })
284    }
285}
286
287/// Option set for converting decoded pixel data
288/// into other common data structures,
289/// such as a vector, an image, or a multidimensional array.
290///
291/// Each option listed affects the transformation in this order:
292/// 1. The Modality LUT function (`modality_lut`)
293///    is applied to the raw pixel data sample values.
294///    This is usually an affine function based on the
295///    _Rescale Slope_ and _Rescale Intercept_ attributes.
296///    If this option is set to [`None`](ModalityLutOption::None),
297///    the VOI LUT function is ignored.
298/// 2. The VOI LUT function (`voi_lut`)
299///    is applied to the rescaled values,
300///    such as a window level.
301/// 3. In the case of converting to an image,
302///    the transformed values are extended or narrowed
303///    to the range of the target bit depth (`bit_depth`).
304#[derive(Debug, Default, Clone, PartialEq)]
305#[non_exhaustive]
306pub struct ConvertOptions {
307    /// Modality LUT option
308    pub modality_lut: ModalityLutOption,
309    /// VOI LUT option
310    pub voi_lut: VoiLutOption,
311    /// Output image bit depth
312    pub bit_depth: BitDepthOption,
313}
314
315impl ConvertOptions {
316    pub fn new() -> Self {
317        Default::default()
318    }
319
320    /// Set the modality LUT option.
321    pub fn with_modality_lut(mut self, modality_lut: ModalityLutOption) -> Self {
322        self.modality_lut = modality_lut;
323        self
324    }
325
326    /// Set the VOI LUT option.
327    pub fn with_voi_lut(mut self, voi_lut: VoiLutOption) -> Self {
328        self.voi_lut = voi_lut;
329        self
330    }
331
332    /// Set the output bit depth option.
333    pub fn with_bit_depth(mut self, bit_depth: BitDepthOption) -> Self {
334        self.bit_depth = bit_depth;
335        self
336    }
337
338    /// Set the output bit depth option to force 8 bits.
339    ///
340    /// This is equivalent to `self.with_bit_depth(BitDepthOption::Force8Bit)`.
341    pub fn force_8bit(mut self) -> Self {
342        self.bit_depth = BitDepthOption::Force8Bit;
343        self
344    }
345
346    /// Set the output bit depth option to force 16 bits.
347    ///
348    /// This is equivalent to `self.with_bit_depth(BitDepthOption::Force16Bit)`.
349    pub fn force_16bit(mut self) -> Self {
350        self.bit_depth = BitDepthOption::Force16Bit;
351        self
352    }
353}
354
355/// Modality LUT function specifier.
356///
357/// See also [`ConvertOptions`].
358#[derive(Debug, Default, Clone, PartialEq)]
359#[non_exhaustive]
360pub enum ModalityLutOption {
361    /// _Default behavior:_
362    /// rescale the pixel data values
363    /// as described in the decoded pixel data.
364    #[default]
365    Default,
366    /// Rescale the pixel data values
367    /// according to the given rescale parameters
368    Override(Rescale),
369    /// Do not rescale nor transform the pixel data value samples.
370    ///
371    /// This also overrides any option to apply VOI LUT transformations
372    /// in the decoded pixel data conversion methods.
373    /// To assume the identity function for rescaling
374    /// and apply the VOI LUT transformations as normal,
375    /// use the `Override` variant instead.
376    None,
377}
378
379/// VOI LUT function specifier.
380///
381/// Note that the VOI LUT function is only applied
382/// alongside a modality LUT function.
383///
384/// See also [`ConvertOptions`].
385#[derive(Debug, Default, Clone, PartialEq)]
386#[non_exhaustive]
387pub enum VoiLutOption {
388    /// _Default behavior:_
389    /// apply the first VOI LUT function transformation described in the pixel data
390    /// only when converting to an image;
391    /// no VOI LUT function is performed
392    /// when converting to an ndarray or to bare pixel values.
393    ///
394    /// If both a VOI LUT table and window values are present in the pixel data,
395    /// use the VOI LUT table.
396    #[default]
397    Default,
398    /// Apply the first VOI LUT function transformation
399    /// described in the pixel data.
400    First,
401    /// Apply a custom window level instead of the one described in the object.
402    Custom(WindowLevel),
403    /// Apply a custom window level and a custom function instead of the one described in the object.
404    CustomWithFunction(WindowLevel, VoiLutFunction),
405    /// Perform a min-max normalization instead,
406    /// so that the lowest value is 0 and
407    /// the highest value is the maximum value of the target type.
408    Normalize,
409    /// Do not apply any VOI LUT transformation.
410    Identity,
411}
412
413/// Output image bit depth specifier.
414///
415/// Note that this is only applied
416/// when converting to an image.
417/// In the other cases,
418/// output narrowing is already done by the caller
419/// when specifying the intended output element type.
420///
421/// See also [`ConvertOptions`].
422#[derive(Debug, Default, Copy, Clone, PartialEq)]
423#[non_exhaustive]
424pub enum BitDepthOption {
425    /// _Default behavior:_
426    /// infer the bit depth based on the input's number of bits per sample.
427    #[default]
428    Auto,
429    /// Force the output image to have 8 bits per sample.
430    Force8Bit,
431    /// Force the output image to have 16 bits per sample.
432    Force16Bit,
433}
434
435/// A blob of decoded pixel data.
436///
437/// This is the outcome of collecting a DICOM object's imaging-related attributes
438/// into a decoded form
439/// (see [`PixelDecoder`]).
440/// The decoded pixel data samples will be stored as raw bytes in native form
441/// without any LUT transformations applied.
442/// Whether to apply such transformations
443/// can be specified through one of the various `to_*` methods,
444/// such as [`to_dynamic_image`](Self::to_dynamic_image)
445/// and [`to_vec`](Self::to_vec).
446#[derive(Debug, Clone)]
447pub struct DecodedPixelData<'a> {
448    /// the raw bytes of pixel data
449    data: Cow<'a, [u8]>,
450    /// the number of rows
451    rows: u32,
452    /// the number of columns
453    cols: u32,
454    /// the number of frames
455    number_of_frames: u32,
456    /// the photometric interpretation
457    photometric_interpretation: PhotometricInterpretation,
458    /// the number of samples per pixel
459    samples_per_pixel: u16,
460    /// the planar configuration: 0 for standard, 1 for channel-contiguous
461    planar_configuration: PlanarConfiguration,
462    /// the number of bits allocated, as a multiple of 8
463    bits_allocated: u16,
464    /// the number of bits stored
465    bits_stored: u16,
466    /// the high bit, usually `bits_stored - 1`
467    high_bit: u16,
468    /// the pixel representation: 0 for unsigned, 1 for signed
469    pixel_representation: PixelRepresentation,
470    /// Multiframe dicom objects can have rescale information, voi LUT and
471    /// window level information once in the shared functional group sequence,
472    /// or multiple times in the per-frame functional group sequence. This is a
473    /// vector of intercepts and slopes, one for each frame.
474    ///
475    /// the pixel value rescale slope and intercept
476    rescale: Vec<Rescale>,
477    // the VOI LUT function
478    voi_lut_function: Option<Vec<VoiLutFunction>>,
479    /// the window level specified via width and center
480    window: Option<Vec<WindowLevel>>,
481    /// the explicit VOI LUTs
482    voi_lut_sequence: Option<Vec<VoiLut>>,
483
484    /// Enforce frame functional groups VMs match `number_of_frames`
485    enforce_frame_fg_vm_match: bool,
486}
487
488impl DecodedPixelData<'_> {
489    // getter methods
490
491    /// Retrieve a slice of all raw pixel data samples as bytes,
492    /// irrespective of the expected size of each sample.
493    #[inline]
494    pub fn data(&self) -> &[u8] {
495        &self.data
496    }
497
498    /// Retrieve a copy of all raw pixel data samples
499    /// as unsigned 16-bit integers.
500    ///
501    /// This is useful for retrieving pixel data
502    /// with the _OW_ value representation.
503    #[inline]
504    pub fn data_ow(&self) -> Vec<u16> {
505        bytes_to_vec_u16(&self.data)
506    }
507
508    /// Retrieve a slice of a frame's raw pixel data samples as bytes,
509    /// irrespective of the expected size of each sample.
510    pub fn frame_data(&self, frame: u32) -> Result<&[u8]> {
511        let bytes_per_sample = (self.bits_allocated as usize + 7) / 8;
512        let frame_length = self.rows as usize
513            * self.cols as usize
514            * self.samples_per_pixel as usize
515            * bytes_per_sample;
516        let frame_start = frame_length * frame as usize;
517        let frame_end = frame_start + frame_length;
518        if frame_end > (*self.data).len() {
519            FrameOutOfRangeSnafu {
520                frame_number: frame,
521            }
522            .fail()?
523        }
524
525        Ok(&self.data[frame_start..frame_end])
526    }
527
528    /// Retrieve a copy of a frame's raw pixel data samples
529    /// as unsigned 16-bit integers.
530    ///
531    /// This is useful for retrieving pixel data
532    /// with the _OW_ value representation.
533    pub fn frame_data_ow(&self, frame: u32) -> Result<Vec<u16>> {
534        let data = self.frame_data(frame)?;
535
536        Ok(bytes_to_vec_u16(data))
537    }
538
539    /// Retrieves the number of rows of the pixel data.
540    #[inline]
541    pub fn rows(&self) -> u32 {
542        self.rows
543    }
544
545    /// Retrieves the number of columns of the pixel data.
546    #[inline]
547    pub fn columns(&self) -> u32 {
548        self.cols
549    }
550
551    /// Retrieves the photometric interpretation.
552    #[inline]
553    pub fn photometric_interpretation(&self) -> &PhotometricInterpretation {
554        &self.photometric_interpretation
555    }
556
557    /// Retrieves the planar configuration of the pixel data.
558    ///
559    /// The value returned is only meaningful for
560    /// images with more than 1 sample per pixel.
561    #[inline]
562    pub fn planar_configuration(&self) -> PlanarConfiguration {
563        self.planar_configuration
564    }
565
566    /// Retrieves the total number of frames
567    /// in this piece of decoded pixel data.
568    #[inline]
569    pub fn number_of_frames(&self) -> u32 {
570        self.number_of_frames
571    }
572
573    /// Retrieves the number of samples per pixel.
574    #[inline]
575    pub fn samples_per_pixel(&self) -> u16 {
576        self.samples_per_pixel
577    }
578
579    /// Retrieve the number of bits effectively used for each sample.
580    #[inline]
581    pub fn bits_stored(&self) -> u16 {
582        self.bits_stored
583    }
584
585    /// Retrieve the number of bits allocated for each sample.
586    #[inline]
587    pub fn bits_allocated(&self) -> u16 {
588        self.bits_allocated
589    }
590
591    /// Retrieve the high bit index of each sample.
592    #[inline]
593    pub fn high_bit(&self) -> u16 {
594        self.high_bit
595    }
596
597    /// Retrieve the pixel representation.
598    #[inline]
599    pub fn pixel_representation(&self) -> PixelRepresentation {
600        self.pixel_representation
601    }
602
603    /// Retrieve object's rescale parameters.
604    #[inline]
605    pub fn rescale(&self) -> Result<&[Rescale]> {
606        match &self.rescale.len() {
607            0 => Ok(&[Rescale {
608                slope: 1.,
609                intercept: 0.,
610            }]),
611            1 => Ok(&self.rescale),
612            len => {
613                if *len == self.number_of_frames as usize {
614                    Ok(&self.rescale)
615                } else {
616                    if self.enforce_frame_fg_vm_match {
617                        LengthMismatchRescaleSnafu {
618                            slope_vm: *len as u32,
619                            intercept_vm: *len as u32,
620                        }
621                        .fail()?
622                    }
623                    tracing::warn!("Expected `{:?}` rescale parameters, found `{:?}`, using first value for all", self.number_of_frames, len);
624                    Ok(&self.rescale[0..1])
625                }
626            }
627        }
628    }
629
630    /// Retrieve the VOI LUT function defined by the object, if any.
631    #[inline]
632    pub fn voi_lut_function(&self) -> Result<Option<&[VoiLutFunction]>> {
633        if let Some(inner) = &self.voi_lut_function {
634            let res = match &inner.len() {
635                0 => Ok(None),
636                1 => Ok(Some(inner.as_slice())),
637                len => {
638                    if *len == self.number_of_frames as usize {
639                        Ok(Some(inner.as_slice()))
640                    } else {
641                        if self.enforce_frame_fg_vm_match {
642                            LengthMismatchVoiLutFunctionSnafu {
643                                vm: *len as u32,
644                                nr_frames: self.number_of_frames,
645                            }
646                            .fail()?
647                        }
648                        tracing::warn!("Expected `{:?}` VOI LUT functions, found `{:?}`, using first value for all", self.number_of_frames, len);
649                        Ok(Some(&inner[0..1]))
650                    }
651                }
652            };
653            res
654        } else {
655            Ok(None)
656        }
657    }
658
659    #[inline]
660    pub fn window(&self) -> Result<Option<&[WindowLevel]>> {
661        if let Some(inner) = &self.window {
662            let res = match &inner.len() {
663                0 => Ok(None),
664                1 => Ok(Some(inner.as_slice())),
665                len => {
666                    if *len == self.number_of_frames as usize {
667                        Ok(Some(inner.as_slice()))
668                    } else {
669                        if self.enforce_frame_fg_vm_match {
670                            LengthMismatchWindowLevelSnafu {
671                                ww_vm: *len as u32,
672                                wc_vm: *len as u32,
673                            }
674                            .fail()?
675                        }
676                        tracing::warn!("Expected `{:?}` Window Levels, found `{:?}`, using first value for all", self.number_of_frames, len);
677                        Ok(Some(&inner[0..1]))
678                    }
679                }
680            };
681            res
682        } else {
683            Ok(None)
684        }
685    }
686
687    /// Retrieve the VOI LUT sequence defined by the object, if any
688    pub fn voi_lut_sequence(&self) -> Result<Option<&[VoiLut]>> {
689        if let Some(inner) = &self.voi_lut_sequence {
690            match &inner.len() {
691                0 => Ok(None),
692                1 => Ok(Some(inner.as_slice())),
693                len => {
694                    if *len == self.number_of_frames as usize {
695                        Ok(Some(inner.as_slice()))
696                    } else {
697                        if self.enforce_frame_fg_vm_match {
698                            LengthMismatchVoiLutSnafu {
699                                vm: *len as u32,
700                                nr_frames: self.number_of_frames(),
701                            }
702                            .fail()?
703                        }
704                        tracing::warn!(
705                            "Expected `{:?}` VOI LUTs, found `{:?}`, using first value for all",
706                            self.number_of_frames,
707                            len
708                        );
709                        Ok(Some(&inner[0..1]))
710                    }
711                }
712            }
713        } else {
714            Ok(None)
715        }
716    }
717
718    // converter methods
719
720    /// Convert the decoded pixel data of a specific frame into a dynamic image.
721    ///
722    /// The default pixel data process pipeline
723    /// applies the Modality LUT function,
724    /// followed by the first VOI LUT transformation found in the object.
725    /// To change this behavior,
726    /// see [`to_dynamic_image_with_options`](Self::to_dynamic_image_with_options).
727    #[cfg(feature = "image")]
728    pub fn to_dynamic_image(&self, frame: u32) -> Result<DynamicImage> {
729        self.to_dynamic_image_with_options(frame, &ConvertOptions::default())
730    }
731
732    /// Convert the decoded pixel data of a specific frame into a dynamic image.
733    ///
734    /// The `options` value allows you to specify
735    /// which transformations should be done to the pixel data
736    /// (primarily Modality LUT function and VOI LUT function).
737    /// By default, both Modality and VOI LUT functions are applied
738    /// according to the attributes of the given object.
739    /// Note that certain options may be ignored
740    /// if they do not apply.
741    ///
742    /// # Example
743    ///
744    /// ```no_run
745    /// # use dicom_pixeldata::{ConvertOptions, DecodedPixelData, VoiLutOption, WindowLevel};
746    /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
747    /// # let data: DecodedPixelData = unimplemented!();
748    /// let options = ConvertOptions::new()
749    ///     .with_voi_lut(VoiLutOption::Custom(WindowLevel {
750    ///         center: -300.0,
751    ///         width: 600.,
752    ///     }));
753    /// let img = data.to_dynamic_image_with_options(0, &options)?;
754    /// # Ok(())
755    /// # }
756    /// ```
757    #[cfg(feature = "image")]
758    pub fn to_dynamic_image_with_options(
759        &self,
760        frame: u32,
761        options: &ConvertOptions,
762    ) -> Result<DynamicImage> {
763        match self.samples_per_pixel {
764            1 => self.build_monochrome_image(frame, options),
765            3 => {
766                // Modality LUT and VOI LUT
767                // are currently ignored in this case
768
769                // RGB, YBR_FULL or YBR_FULL_422 colors
770                match self.bits_allocated {
771                    8 => {
772                        let mut pixel_array = match self.planar_configuration {
773                            PlanarConfiguration::Standard => self.frame_data(frame)?.to_vec(),
774                            PlanarConfiguration::PixelFirst => interleave(self.frame_data(frame)?),
775                        };
776
777                        // Convert YBR_FULL or YBR_FULL_422 to RGB
778                        let pixel_array = match &self.photometric_interpretation {
779                            PhotometricInterpretation::Rgb => pixel_array,
780                            PhotometricInterpretation::YbrFull
781                            | PhotometricInterpretation::YbrFull422 => {
782                                convert_colorspace_u8(&mut pixel_array);
783                                pixel_array
784                            }
785                            pi => UnsupportedPhotometricInterpretationSnafu { pi: pi.clone() }
786                                .fail()?,
787                        };
788
789                        self.rgb_image_with_extend(pixel_array, options.bit_depth)
790                    }
791                    16 => {
792                        let mut pixel_array: Vec<u16> = match self.planar_configuration {
793                            PlanarConfiguration::Standard => self.frame_data_ow(frame)?,
794                            PlanarConfiguration::PixelFirst => {
795                                // Would there be a way to avoid copying the data twice
796                                // here (once in frame_data_ow and once in interleave)?
797                                interleave(&(self.frame_data_ow(frame)?))
798                            }
799                        };
800
801                        // Convert YBR_FULL or YBR_FULL_422 to RGB
802                        let pixel_array = match &self.photometric_interpretation {
803                            PhotometricInterpretation::Rgb => pixel_array,
804                            PhotometricInterpretation::YbrFull
805                            | PhotometricInterpretation::YbrFull422 => {
806                                convert_colorspace_u16(&mut pixel_array);
807                                pixel_array
808                            }
809                            pi => UnsupportedPhotometricInterpretationSnafu { pi: pi.clone() }
810                                .fail()?,
811                        };
812
813                        self.rgb_image_with_narrow(pixel_array, options.bit_depth)
814                    }
815                    _ => InvalidBitsAllocatedSnafu.fail()?,
816                }
817            }
818            spp => UnsupportedSamplesPerPixelSnafu { spp }.fail()?,
819        }
820    }
821
822    #[cfg(feature = "image")]
823    fn mono_image_with_narrow(
824        &self,
825        pixel_values: impl IntoIterator<Item = u16>,
826        bit_depth: BitDepthOption,
827    ) -> Result<DynamicImage> {
828        if bit_depth == BitDepthOption::Force8Bit {
829            // user requested 8 bits, narrow
830            let data: Vec<u8> = pixel_values.into_iter().map(|x| (x >> 8) as u8).collect();
831            let image_buffer: ImageBuffer<Luma<u8>, Vec<u8>> =
832                ImageBuffer::from_raw(self.cols, self.rows, data)
833                    .context(InvalidImageBufferSnafu)?;
834            Ok(DynamicImage::ImageLuma8(image_buffer))
835        } else {
836            let data: Vec<u16> = pixel_values.into_iter().collect();
837            let image_buffer: ImageBuffer<Luma<u16>, Vec<u16>> =
838                ImageBuffer::from_raw(self.cols, self.rows, data)
839                    .context(InvalidImageBufferSnafu)?;
840            Ok(DynamicImage::ImageLuma16(image_buffer))
841        }
842    }
843
844    #[cfg(all(feature = "image", feature = "rayon"))]
845    fn mono_image_with_narrow_par(
846        &self,
847        pixel_values: impl ParallelIterator<Item = u16>,
848        bit_depth: BitDepthOption,
849    ) -> Result<DynamicImage> {
850        if bit_depth == BitDepthOption::Force8Bit {
851            // user requested 8 bits, narrow
852            let data: Vec<u8> = pixel_values.map(|x| (x >> 8) as u8).collect();
853            let image_buffer: ImageBuffer<Luma<u8>, Vec<u8>> =
854                ImageBuffer::from_raw(self.cols, self.rows, data)
855                    .context(InvalidImageBufferSnafu)?;
856            Ok(DynamicImage::ImageLuma8(image_buffer))
857        } else {
858            let data: Vec<u16> = pixel_values.collect();
859            let image_buffer: ImageBuffer<Luma<u16>, Vec<u16>> =
860                ImageBuffer::from_raw(self.cols, self.rows, data)
861                    .context(InvalidImageBufferSnafu)?;
862            Ok(DynamicImage::ImageLuma16(image_buffer))
863        }
864    }
865
866    #[cfg(feature = "image")]
867    fn mono_image_with_extend(
868        &self,
869        pixel_values: impl IntoIterator<Item = u8>,
870        bit_depth: BitDepthOption,
871    ) -> Result<DynamicImage> {
872        if bit_depth == BitDepthOption::Force16Bit {
873            // user requested 16 bits, extend
874            let data = pixel_values
875                .into_iter()
876                .map(|x| x as u16)
877                .map(|x| (x << 8) + x)
878                .collect();
879            let image_buffer: ImageBuffer<Luma<u16>, Vec<u16>> =
880                ImageBuffer::from_raw(self.cols, self.rows, data)
881                    .context(InvalidImageBufferSnafu)?;
882            Ok(DynamicImage::ImageLuma16(image_buffer))
883        } else {
884            let data: Vec<u8> = pixel_values.into_iter().collect();
885            let image_buffer: ImageBuffer<Luma<u8>, Vec<u8>> =
886                ImageBuffer::from_raw(self.cols, self.rows, data)
887                    .context(InvalidImageBufferSnafu)?;
888            Ok(DynamicImage::ImageLuma8(image_buffer))
889        }
890    }
891
892    #[cfg(all(feature = "image", feature = "rayon"))]
893    fn mono_image_with_extend_par(
894        &self,
895        pixel_values: impl ParallelIterator<Item = u8>,
896        bit_depth: BitDepthOption,
897    ) -> Result<DynamicImage> {
898        if bit_depth == BitDepthOption::Force16Bit {
899            // user requested 16 bits, extend
900            let data = pixel_values
901                .map(|x| x as u16)
902                .map(|x| (x << 8) + x)
903                .collect();
904            let image_buffer: ImageBuffer<Luma<u16>, Vec<u16>> =
905                ImageBuffer::from_raw(self.cols, self.rows, data)
906                    .context(InvalidImageBufferSnafu)?;
907            Ok(DynamicImage::ImageLuma16(image_buffer))
908        } else {
909            let data: Vec<u8> = pixel_values.collect();
910            let image_buffer: ImageBuffer<Luma<u8>, Vec<u8>> =
911                ImageBuffer::from_raw(self.cols, self.rows, data)
912                    .context(InvalidImageBufferSnafu)?;
913            Ok(DynamicImage::ImageLuma8(image_buffer))
914        }
915    }
916
917    #[cfg(feature = "image")]
918    fn rgb_image_with_extend(
919        &self,
920        pixels: Vec<u8>,
921        bit_depth: BitDepthOption,
922    ) -> Result<DynamicImage> {
923        if bit_depth == BitDepthOption::Force16Bit {
924            // user requested 16 bits, extend
925            let data: Vec<u16> = pixels
926                .into_iter()
927                .map(|x| x as u16)
928                .map(|x| (x << 8) + x)
929                .collect();
930            let image_buffer: ImageBuffer<Rgb<u16>, Vec<u16>> =
931                ImageBuffer::from_raw(self.cols, self.rows, data)
932                    .context(InvalidImageBufferSnafu)?;
933            Ok(DynamicImage::ImageRgb16(image_buffer))
934        } else {
935            let image_buffer: ImageBuffer<Rgb<u8>, Vec<u8>> =
936                ImageBuffer::from_raw(self.cols, self.rows, pixels)
937                    .context(InvalidImageBufferSnafu)?;
938            Ok(DynamicImage::ImageRgb8(image_buffer))
939        }
940    }
941
942    #[cfg(feature = "image")]
943    fn rgb_image_with_narrow(
944        &self,
945        pixels: Vec<u16>,
946        bit_depth: BitDepthOption,
947    ) -> Result<DynamicImage> {
948        if bit_depth == BitDepthOption::Force8Bit {
949            // user requested 8 bits, narrow
950            let data: Vec<u8> = pixels.into_iter().map(|x| (x >> 8) as u8).collect();
951            let image_buffer: ImageBuffer<Rgb<u8>, Vec<u8>> =
952                ImageBuffer::from_raw(self.cols, self.rows, data)
953                    .context(InvalidImageBufferSnafu)?;
954            Ok(DynamicImage::ImageRgb8(image_buffer))
955        } else {
956            let image_buffer: ImageBuffer<Rgb<u16>, Vec<u16>> =
957                ImageBuffer::from_raw(self.cols, self.rows, pixels)
958                    .context(InvalidImageBufferSnafu)?;
959            Ok(DynamicImage::ImageRgb16(image_buffer))
960        }
961    }
962
963    #[cfg(feature = "image")]
964    fn build_monochrome_image(&self, frame: u32, options: &ConvertOptions) -> Result<DynamicImage> {
965        use transform::VoiLutTransform;
966
967        let ConvertOptions {
968            modality_lut,
969            voi_lut,
970            bit_depth,
971        } = options;
972
973        let mut image = match self.bits_allocated {
974            8 => {
975                let data = self.frame_data(frame)?;
976
977                match modality_lut {
978                    // simplest one, no transformations
979                    ModalityLutOption::None => {
980                        self.mono_image_with_extend(data.iter().copied(), *bit_depth)?
981                    }
982                    // other
983                    ModalityLutOption::Default | ModalityLutOption::Override(..) => {
984                        let rescale = {
985                            let default = self.rescale()?;
986                            if let ModalityLutOption::Override(rescale) = modality_lut {
987                                *rescale
988                            } else if default.len() > 1 {
989                                default[frame as usize]
990                            } else {
991                                default[0]
992                            }
993                        };
994
995                        let signed = self.pixel_representation == PixelRepresentation::Signed;
996
997                        let lut: Lut<u8> = match (voi_lut, self.window()?, self.voi_lut_sequence()?)
998                        {
999                            (VoiLutOption::Identity, _, _) => {
1000                                Lut::new_rescale(8, false, rescale).context(CreateLutSnafu)?
1001                            }
1002                            (
1003                                VoiLutOption::Default | VoiLutOption::First,
1004                                _,
1005                                Some(voi_lut_sequence),
1006                            ) => Lut::new_rescale_and_lut(
1007                                8,
1008                                signed,
1009                                rescale,
1010                                VoiLutTransform::new(
1011                                    if voi_lut_sequence.len() > 1 {
1012                                        &voi_lut_sequence[frame as usize]
1013                                    } else {
1014                                        &voi_lut_sequence[0]
1015                                    },
1016                                    8,
1017                                ),
1018                            )
1019                            .context(CreateLutSnafu)?,
1020                            (VoiLutOption::Default | VoiLutOption::First, Some(window), _) => {
1021                                Lut::new_rescale_and_window(
1022                                    8,
1023                                    signed,
1024                                    rescale,
1025                                    WindowLevelTransform::new(
1026                                        match self.voi_lut_function()? {
1027                                            Some(lut) => {
1028                                                if lut.len() > 1 {
1029                                                    lut[frame as usize]
1030                                                } else {
1031                                                    lut[0]
1032                                                }
1033                                            }
1034                                            None => VoiLutFunction::Linear,
1035                                        },
1036                                        if window.len() > 1 {
1037                                            window[frame as usize]
1038                                        } else {
1039                                            window[0]
1040                                        },
1041                                    ),
1042                                )
1043                                .context(CreateLutSnafu)?
1044                            }
1045                            (VoiLutOption::Default | VoiLutOption::First, None, None) => {
1046                                tracing::warn!(
1047                                    "Could find neither VOI LUT nor window level for object"
1048                                );
1049                                Lut::new_rescale_and_normalize(
1050                                    8,
1051                                    signed,
1052                                    rescale,
1053                                    data.iter().copied(),
1054                                )
1055                                .context(CreateLutSnafu)?
1056                            }
1057                            (VoiLutOption::Custom(window), _, _) => Lut::new_rescale_and_window(
1058                                8,
1059                                signed,
1060                                rescale,
1061                                WindowLevelTransform::new(
1062                                    match self.voi_lut_function()? {
1063                                        Some(lut) => {
1064                                            if lut.len() > 1 {
1065                                                lut[frame as usize]
1066                                            } else {
1067                                                lut[0]
1068                                            }
1069                                        }
1070                                        None => VoiLutFunction::Linear,
1071                                    },
1072                                    *window,
1073                                ),
1074                            )
1075                            .context(CreateLutSnafu)?,
1076                            (VoiLutOption::CustomWithFunction(window, function), _, _) => {
1077                                Lut::new_rescale_and_window(
1078                                    8,
1079                                    signed,
1080                                    rescale,
1081                                    WindowLevelTransform::new(*function, *window),
1082                                )
1083                                .context(CreateLutSnafu)?
1084                            }
1085                            (VoiLutOption::Normalize, _, _) => Lut::new_rescale_and_normalize(
1086                                8,
1087                                signed,
1088                                rescale,
1089                                data.iter().copied(),
1090                            )
1091                            .context(CreateLutSnafu)?,
1092                        };
1093
1094                        #[cfg(feature = "rayon")]
1095                        {
1096                            let pixel_values = lut.map_par_iter(data.par_iter().copied());
1097                            self.mono_image_with_extend_par(pixel_values, *bit_depth)?
1098                        }
1099                        #[cfg(not(feature = "rayon"))]
1100                        {
1101                            let pixel_values = lut.map_iter(data.iter().copied());
1102                            self.mono_image_with_extend(pixel_values, *bit_depth)?
1103                        }
1104                    }
1105                }
1106            }
1107            16 => {
1108                match modality_lut {
1109                    // only take pixel representation,
1110                    // convert to image only after shifting values
1111                    // to an unsigned scale
1112                    ModalityLutOption::None => {
1113                        let frame_length = self.rows as usize
1114                            * self.cols as usize
1115                            * 2
1116                            * self.samples_per_pixel as usize;
1117                        let frame_start = frame_length * frame as usize;
1118                        let frame_end = frame_start + frame_length;
1119                        if frame_end > (*self.data).len() {
1120                            FrameOutOfRangeSnafu {
1121                                frame_number: frame,
1122                            }
1123                            .fail()?
1124                        }
1125
1126                        let buffer = match self.pixel_representation {
1127                            // Unsigned 16-bit representation
1128                            PixelRepresentation::Unsigned => {
1129                                bytes_to_vec_u16(&self.data[frame_start..frame_end])
1130                            }
1131                            // Signed 16-bit representation
1132                            PixelRepresentation::Signed => {
1133                                let mut signed_buffer = vec![0; frame_length / 2];
1134                                NativeEndian::read_i16_into(
1135                                    &self.data[frame_start..frame_end],
1136                                    &mut signed_buffer,
1137                                );
1138                                // Convert buffer to unsigned by shifting
1139                                convert_i16_to_u16(&signed_buffer)
1140                            }
1141                        };
1142
1143                        self.mono_image_with_narrow(buffer.into_iter(), *bit_depth)?
1144                    }
1145
1146                    ModalityLutOption::Default | ModalityLutOption::Override(..) => {
1147                        let rescale = {
1148                            let default = self.rescale()?;
1149                            if let ModalityLutOption::Override(rescale) = modality_lut {
1150                                *rescale
1151                            } else if default.len() > 1 {
1152                                self.rescale[frame as usize]
1153                            } else {
1154                                default[0]
1155                            }
1156                        };
1157
1158                        // fetch pixel data as a slice of u16 values,
1159                        // irrespective of pixel signedness
1160                        // (that is handled by the LUT)
1161                        let signed = self.pixel_representation == PixelRepresentation::Signed;
1162                        // Note: samples are not read as `i16` even if signed,
1163                        // because the LUT takes care of interpreting them properly.
1164
1165                        let samples = self.frame_data_ow(frame)?;
1166
1167                        // use 16-bit precision to prevent possible loss of precision in image
1168                        let lut: Lut<u16> =
1169                            match (voi_lut, self.window()?, self.voi_lut_sequence()?) {
1170                                (VoiLutOption::Identity, _, _) => {
1171                                    Lut::new_rescale(self.bits_stored, signed, rescale)
1172                                }
1173                                (
1174                                    VoiLutOption::Default | VoiLutOption::First,
1175                                    _,
1176                                    Some(voi_lut_sequence),
1177                                ) => Lut::new_rescale_and_lut(
1178                                    self.bits_stored,
1179                                    signed,
1180                                    rescale,
1181                                    VoiLutTransform::new(
1182                                        if voi_lut_sequence.len() > 1 {
1183                                            &voi_lut_sequence[frame as usize]
1184                                        } else {
1185                                            &voi_lut_sequence[0]
1186                                        },
1187                                        self.bits_stored,
1188                                    ),
1189                                ),
1190                                (VoiLutOption::Default | VoiLutOption::First, Some(window), _) => {
1191                                    Lut::new_rescale_and_window(
1192                                        self.bits_stored,
1193                                        signed,
1194                                        rescale,
1195                                        WindowLevelTransform::new(
1196                                            match self.voi_lut_function()? {
1197                                                Some(lut) => {
1198                                                    if lut.len() > 1 {
1199                                                        lut[frame as usize]
1200                                                    } else {
1201                                                        lut[0]
1202                                                    }
1203                                                }
1204                                                None => VoiLutFunction::Linear,
1205                                            },
1206                                            if window.len() > 1 {
1207                                                window[frame as usize]
1208                                            } else {
1209                                                window[0]
1210                                            },
1211                                        ),
1212                                    )
1213                                }
1214                                (VoiLutOption::Default | VoiLutOption::First, None, None) => {
1215                                    tracing::warn!(
1216                                        "Could find neither VOI LUT nor window level for object"
1217                                    );
1218
1219                                    Lut::new_rescale_and_normalize(
1220                                        self.bits_stored,
1221                                        signed,
1222                                        rescale,
1223                                        samples.iter().copied(),
1224                                    )
1225                                }
1226                                (VoiLutOption::Custom(window), _, _) => {
1227                                    Lut::new_rescale_and_window(
1228                                        self.bits_stored,
1229                                        signed,
1230                                        rescale,
1231                                        WindowLevelTransform::new(
1232                                            match self.voi_lut_function()? {
1233                                                Some(lut) => {
1234                                                    if lut.len() > 1 {
1235                                                        lut[frame as usize]
1236                                                    } else {
1237                                                        lut[0]
1238                                                    }
1239                                                }
1240                                                None => VoiLutFunction::Linear,
1241                                            },
1242                                            *window,
1243                                        ),
1244                                    )
1245                                }
1246                                (VoiLutOption::CustomWithFunction(window, function), _, _) => {
1247                                    Lut::new_rescale_and_window(
1248                                        self.bits_stored,
1249                                        signed,
1250                                        rescale,
1251                                        WindowLevelTransform::new(*function, *window),
1252                                    )
1253                                }
1254                                (VoiLutOption::Normalize, _, _) => Lut::new_rescale_and_normalize(
1255                                    self.bits_stored,
1256                                    signed,
1257                                    rescale,
1258                                    samples.iter().copied(),
1259                                ),
1260                            }
1261                            .context(CreateLutSnafu)?;
1262
1263                        #[cfg(feature = "rayon")]
1264                        {
1265                            let pixel_values = lut.map_par_iter(samples.par_iter().copied());
1266                            self.mono_image_with_narrow_par(pixel_values, *bit_depth)?
1267                        }
1268                        #[cfg(not(feature = "rayon"))]
1269                        {
1270                            let pixel_values = lut.map_iter(samples.iter().copied());
1271                            self.mono_image_with_narrow(pixel_values, *bit_depth)?
1272                        }
1273                    }
1274                }
1275            }
1276            1 => {
1277                let data = self.frame_data(frame)?;
1278                self.mono_image_with_extend(data.iter().copied(), *bit_depth)?
1279            }
1280            _ => InvalidBitsAllocatedSnafu.fail()?,
1281        };
1282        // Convert MONOCHROME1 => MONOCHROME2
1283        if self.photometric_interpretation == PhotometricInterpretation::Monochrome1 {
1284            image.invert();
1285        }
1286        Ok(image)
1287    }
1288
1289    /// Convert all of the decoded pixel data into a vector of flat pixels
1290    /// of a given type `T`.
1291    ///
1292    /// The values are provided in standard order and layout:
1293    /// pixels first, then columns, then rows, then frames.
1294    ///
1295    /// The underlying pixel data type is extracted based on
1296    /// the bits allocated and pixel representation,
1297    /// which is then converted to the requested type.
1298    /// Photometric interpretation is ignored.
1299    ///
1300    /// The default pixel data process pipeline
1301    /// applies only the Modality LUT function.
1302    /// To change this behavior,
1303    /// see [`to_vec_with_options`](Self::to_vec_with_options).
1304    ///
1305    /// # Example
1306    ///
1307    /// ```no_run
1308    /// # use dicom_pixeldata::{ConvertOptions, DecodedPixelData, VoiLutOption, WindowLevel};
1309    /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
1310    /// # let data: DecodedPixelData = unimplemented!();
1311    /// // get the pixels of all frames as 32-bit modality values
1312    /// let all_pixels: Vec<f32> = data.to_vec()?;
1313    /// # Ok(())
1314    /// # }
1315    /// ```
1316    pub fn to_vec<T>(&self) -> Result<Vec<T>>
1317    where
1318        T: NumCast + Send + Sync + Copy + 'static,
1319    {
1320        let mut res: Vec<T> = Vec::new();
1321        for frame in 0..self.number_of_frames {
1322            let frame_data: Vec<T> =
1323                self.convert_pixel_slice(self.frame_data(frame)?, frame, &Default::default())?;
1324            res.extend(frame_data)
1325        }
1326        Ok(res)
1327    }
1328
1329    /// Convert all of the decoded pixel data into a vector of flat pixels
1330    /// of a given type `T`.
1331    ///
1332    /// The values are provided in standard order and layout:
1333    /// pixel first, then column, then row, with frames traversed last.
1334    ///
1335    /// The underlying pixel data type is extracted based on
1336    /// the bits allocated and pixel representation,
1337    /// which is then converted to the requested type.
1338    /// Photometric interpretation is ignored.
1339    ///
1340    /// The `options` value allows you to specify
1341    /// which transformations should be done to the pixel data
1342    /// (primarily Modality LUT function and VOI LUT function).
1343    /// By default, only the Modality LUT function is applied.
1344    pub fn to_vec_with_options<T>(&self, options: &ConvertOptions) -> Result<Vec<T>>
1345    where
1346        T: NumCast + Send + Sync + Copy + 'static,
1347    {
1348        let mut res: Vec<T> = Vec::new();
1349        for frame in 0..self.number_of_frames {
1350            let frame_data: Vec<T> =
1351                self.convert_pixel_slice(self.frame_data(frame)?, frame, options)?;
1352            res.extend(frame_data)
1353        }
1354        Ok(res)
1355    }
1356
1357    /// Convert the decoded pixel data of a frame
1358    /// into a vector of flat pixels of a given type `T`.
1359    ///
1360    /// The values are provided in standard order and layout:
1361    /// pixels first, then columns, then rows.
1362    ///
1363    /// The underlying pixel data type is extracted based on
1364    /// the bits allocated and pixel representation,
1365    /// which is then converted to the requested type.
1366    /// Photometric interpretation is ignored.
1367    ///
1368    /// The default pixel data process pipeline
1369    /// applies only the Modality LUT function.
1370    /// To change this behavior,
1371    /// see [`to_vec_frame_with_options`](Self::to_vec_frame_with_options).
1372    pub fn to_vec_frame<T>(&self, frame: u32) -> Result<Vec<T>>
1373    where
1374        T: NumCast + Send + Sync + Copy + 'static,
1375    {
1376        self.convert_pixel_slice(self.frame_data(frame)?, frame, &Default::default())
1377    }
1378
1379    /// Convert the decoded pixel data of a frame
1380    /// into a vector of flat pixels of a given type `T`.
1381    ///
1382    /// The values are provided in standard order and layout:
1383    /// pixels first, then columns, then rows.
1384    ///
1385    /// The underlying pixel data type is extracted based on
1386    /// the bits allocated and pixel representation,
1387    /// which is then converted to the requested type.
1388    /// Photometric interpretation is considered
1389    /// to identify whether rescaling should be applied.
1390    /// The pixel values are not inverted
1391    /// if photometric interpretation is `MONOCHROME1`.
1392    ///
1393    /// The `options` value allows you to specify
1394    /// which transformations should be done to the pixel data
1395    /// (primarily Modality LUT function and VOI LUT function).
1396    /// By default, only the Modality LUT function is applied
1397    /// according to the attributes of the given object.
1398    /// Note that certain options may be ignored
1399    /// if they do not apply.
1400    ///
1401    /// # Example
1402    ///
1403    /// ```no_run
1404    /// # use dicom_pixeldata::{ConvertOptions, DecodedPixelData, VoiLutOption, WindowLevel};
1405    /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
1406    /// # let data: DecodedPixelData = unimplemented!();
1407    /// let options = ConvertOptions::new()
1408    ///     .with_voi_lut(VoiLutOption::Custom(WindowLevel {
1409    ///         center: -300.0,
1410    ///         width: 600.,
1411    ///     }));
1412    /// // get the pixels of the first frame with 8 bits per channel
1413    /// let first_frame_pixels: Vec<u8> = data.to_vec_frame_with_options(0, &options)?;
1414    /// # Ok(())
1415    /// # }
1416    /// ```
1417    pub fn to_vec_frame_with_options<T>(
1418        &self,
1419        frame: u32,
1420        options: &ConvertOptions,
1421    ) -> Result<Vec<T>>
1422    where
1423        T: NumCast + Send + Sync + Copy + 'static,
1424    {
1425        self.convert_pixel_slice(self.frame_data(frame)?, frame, options)
1426    }
1427
1428    fn convert_pixel_slice<T>(
1429        &self,
1430        data: &[u8],
1431        frame: u32,
1432        options: &ConvertOptions,
1433    ) -> Result<Vec<T>>
1434    where
1435        T: NumCast + Send + Sync + Copy + 'static,
1436    {
1437        let ConvertOptions {
1438            modality_lut,
1439            voi_lut,
1440            bit_depth: _,
1441        } = options;
1442
1443        if self.samples_per_pixel > 1 && self.planar_configuration != PlanarConfiguration::Standard
1444        {
1445            // TODO #129
1446            return UnsupportedOtherSnafu {
1447                name: "PlanarConfiguration",
1448                value: self.planar_configuration.to_string(),
1449            }
1450            .fail()?;
1451        }
1452
1453        match self.bits_allocated {
1454            8 => {
1455                match modality_lut {
1456                    ModalityLutOption::Default | ModalityLutOption::Override(_)
1457                        if self.photometric_interpretation.is_monochrome() =>
1458                    {
1459                        let rescale = {
1460                            let default = self.rescale()?;
1461                            if let ModalityLutOption::Override(rescale) = modality_lut {
1462                                *rescale
1463                            } else if default.len() > 1 {
1464                                default[frame as usize]
1465                            } else {
1466                                default[0]
1467                            }
1468                        };
1469                        let signed = self.pixel_representation == PixelRepresentation::Signed;
1470
1471                        let lut: Lut<T> = match (voi_lut, self.window()?) {
1472                            (VoiLutOption::Default | VoiLutOption::Identity, _) => {
1473                                Lut::new_rescale(8, signed, rescale)
1474                            }
1475                            (VoiLutOption::First, Some(window)) => Lut::new_rescale_and_window(
1476                                8,
1477                                signed,
1478                                rescale,
1479                                WindowLevelTransform::new(
1480                                    match self.voi_lut_function()? {
1481                                        Some(lut) => {
1482                                            if lut.len() > 1 {
1483                                                lut[frame as usize]
1484                                            } else {
1485                                                lut[0]
1486                                            }
1487                                        }
1488                                        None => VoiLutFunction::Linear,
1489                                    },
1490                                    if window.len() > 1 {
1491                                        window[frame as usize]
1492                                    } else {
1493                                        window[0]
1494                                    },
1495                                ),
1496                            ),
1497                            (VoiLutOption::First, None) => {
1498                                tracing::warn!("Could not find window level for object");
1499                                Lut::new_rescale(8, signed, rescale)
1500                            }
1501                            (VoiLutOption::Custom(window), _) => Lut::new_rescale_and_window(
1502                                8,
1503                                signed,
1504                                rescale,
1505                                WindowLevelTransform::new(
1506                                    match self.voi_lut_function()? {
1507                                        Some(lut) => {
1508                                            if lut.len() > 1 {
1509                                                lut[frame as usize]
1510                                            } else {
1511                                                lut[0]
1512                                            }
1513                                        }
1514                                        None => VoiLutFunction::Linear,
1515                                    },
1516                                    *window,
1517                                ),
1518                            ),
1519                            (VoiLutOption::CustomWithFunction(window, function), _) => {
1520                                Lut::new_rescale_and_window(
1521                                    8,
1522                                    signed,
1523                                    rescale,
1524                                    WindowLevelTransform::new(*function, *window),
1525                                )
1526                            }
1527                            (VoiLutOption::Normalize, _) => Lut::new_rescale_and_normalize(
1528                                8,
1529                                signed,
1530                                rescale,
1531                                data.iter().copied(),
1532                            ),
1533                        }
1534                        .context(CreateLutSnafu)?;
1535
1536                        #[cfg(feature = "rayon")]
1537                        let out = lut.map_par_iter(data.par_iter().copied()).collect();
1538
1539                        #[cfg(not(feature = "rayon"))]
1540                        let out = lut.map_iter(data.iter().copied()).collect();
1541
1542                        Ok(out)
1543                    }
1544                    _ => {
1545                        #[cfg(feature = "rayon")]
1546                        // 1-channel Grayscale image
1547                        let converted: Result<Vec<T>, _> = data
1548                            .par_iter()
1549                            .map(|v| T::from(*v).ok_or(snafu::NoneError))
1550                            .collect();
1551                        #[cfg(not(feature = "rayon"))]
1552                        // 1-channel Grayscale image
1553                        let converted: Result<Vec<T>, _> = data
1554                            .iter()
1555                            .map(|v| T::from(*v).ok_or(snafu::NoneError))
1556                            .collect();
1557                        converted.context(InvalidDataTypeSnafu).map_err(Error::from)
1558                    }
1559                }
1560            }
1561            16 => {
1562                match modality_lut {
1563                    ModalityLutOption::Default | ModalityLutOption::Override(_)
1564                        if self.photometric_interpretation.is_monochrome() =>
1565                    {
1566                        let samples = bytes_to_vec_u16(data);
1567
1568                        let rescale = {
1569                            let default = self.rescale()?;
1570                            if let ModalityLutOption::Override(rescale) = modality_lut {
1571                                *rescale
1572                            } else if default.len() > 1 {
1573                                default[frame as usize]
1574                            } else {
1575                                default[0]
1576                            }
1577                        };
1578
1579                        let signed = self.pixel_representation == PixelRepresentation::Signed;
1580
1581                        let lut: Lut<T> = match (voi_lut, self.window()?) {
1582                            (VoiLutOption::Default | VoiLutOption::Identity, _) => {
1583                                Lut::new_rescale(self.bits_stored, signed, rescale)
1584                            }
1585                            (VoiLutOption::First, Some(window)) => Lut::new_rescale_and_window(
1586                                self.bits_stored,
1587                                signed,
1588                                rescale,
1589                                WindowLevelTransform::new(
1590                                    match self.voi_lut_function()? {
1591                                        Some(lut) => {
1592                                            if lut.len() > 1 {
1593                                                lut[frame as usize]
1594                                            } else {
1595                                                lut[0]
1596                                            }
1597                                        }
1598                                        None => VoiLutFunction::Linear,
1599                                    },
1600                                    if window.len() > 1 {
1601                                        window[frame as usize]
1602                                    } else {
1603                                        window[0]
1604                                    },
1605                                ),
1606                            ),
1607                            (VoiLutOption::First, None) => {
1608                                tracing::warn!("Could not find window level for object");
1609                                Lut::new_rescale_and_normalize(
1610                                    self.bits_stored,
1611                                    signed,
1612                                    rescale,
1613                                    samples.iter().copied(),
1614                                )
1615                            }
1616                            (VoiLutOption::Custom(window), _) => Lut::new_rescale_and_window(
1617                                self.bits_stored,
1618                                signed,
1619                                rescale,
1620                                WindowLevelTransform::new(
1621                                    match self.voi_lut_function()? {
1622                                        Some(lut) => {
1623                                            if lut.len() > 1 {
1624                                                lut[frame as usize]
1625                                            } else {
1626                                                lut[0]
1627                                            }
1628                                        }
1629                                        None => VoiLutFunction::Linear,
1630                                    },
1631                                    *window,
1632                                ),
1633                            ),
1634                            (VoiLutOption::CustomWithFunction(window, function), _) => {
1635                                Lut::new_rescale_and_window(
1636                                    self.bits_stored,
1637                                    signed,
1638                                    rescale,
1639                                    WindowLevelTransform::new(*function, *window),
1640                                )
1641                            }
1642                            (VoiLutOption::Normalize, _) => Lut::new_rescale_and_normalize(
1643                                self.bits_stored,
1644                                signed,
1645                                rescale,
1646                                samples.iter().copied(),
1647                            ),
1648                        }
1649                        .context(CreateLutSnafu)?;
1650
1651                        #[cfg(feature = "rayon")]
1652                        {
1653                            Ok(lut.map_par_iter(samples.into_par_iter()).collect())
1654                        }
1655
1656                        #[cfg(not(feature = "rayon"))]
1657                        {
1658                            Ok(lut.map_iter(samples.into_iter()).collect())
1659                        }
1660                    }
1661                    _ => {
1662                        // no transformations
1663                        match self.pixel_representation {
1664                            // Unsigned 16 bit representation
1665                            PixelRepresentation::Unsigned => {
1666                                let dest = bytes_to_vec_u16(data);
1667
1668                                #[cfg(feature = "rayon")]
1669                                let converted: Result<Vec<T>, _> = dest
1670                                    .par_iter()
1671                                    .map(|v| T::from(*v).ok_or(snafu::NoneError))
1672                                    .collect();
1673                                #[cfg(not(feature = "rayon"))]
1674                                let converted: Result<Vec<T>, _> = dest
1675                                    .iter()
1676                                    .map(|v| T::from(*v).ok_or(snafu::NoneError))
1677                                    .collect();
1678                                converted.context(InvalidDataTypeSnafu).map_err(Error::from)
1679                            }
1680                            // Signed 16 bit 2s complement representation
1681                            PixelRepresentation::Signed => {
1682                                let mut signed_buffer = vec![0; data.len() / 2];
1683                                NativeEndian::read_i16_into(data, &mut signed_buffer);
1684
1685                                #[cfg(feature = "rayon")]
1686                                let converted: Result<Vec<T>, _> = signed_buffer
1687                                    .par_iter()
1688                                    .map(|v| T::from(*v).ok_or(snafu::NoneError))
1689                                    .collect();
1690                                #[cfg(not(feature = "rayon"))]
1691                                let converted: Result<Vec<T>, _> = signed_buffer
1692                                    .iter()
1693                                    .map(|v| T::from(*v).ok_or(snafu::NoneError))
1694                                    .collect();
1695                                converted.context(InvalidDataTypeSnafu).map_err(Error::from)
1696                            }
1697                        }
1698                    }
1699                }
1700            }
1701            _ => InvalidBitsAllocatedSnafu.fail()?,
1702        }
1703    }
1704
1705    /// Convert all of the decoded pixel data
1706    /// into a four dimensional array of a given type `T`.
1707    ///
1708    /// The underlying pixel data type is extracted based on
1709    /// the bits allocated and pixel representation,
1710    /// which is then converted to the requested type.
1711    /// Photometric interpretation is considered
1712    /// to identify whether rescaling should be applied.
1713    /// The pixel values are not inverted
1714    /// if photometric interpretation is `MONOCHROME1`.
1715    ///
1716    /// The shape of the array will be `[N, R, C, S]`,
1717    /// where `N` is the number of frames,
1718    /// `R` is the number of rows,
1719    /// `C` is the number of columns,
1720    /// and `S` is the number of samples per pixel.
1721    ///
1722    /// The default pixel data process pipeline
1723    /// applies only the Modality LUT function described in the object,
1724    /// To change this behavior,
1725    /// see [`to_ndarray_with_options`](Self::to_ndarray_with_options).
1726    #[cfg(feature = "ndarray")]
1727    pub fn to_ndarray<T>(&self) -> Result<Array<T, Ix4>>
1728    where
1729        T: 'static,
1730        T: NumCast,
1731        T: Copy,
1732        T: Send + Sync,
1733    {
1734        self.to_ndarray_with_options(&Default::default())
1735    }
1736
1737    /// Convert all of the decoded pixel data
1738    /// into a four dimensional array of a given type `T`.
1739    ///
1740    /// The underlying pixel data type is extracted based on
1741    /// the bits allocated and pixel representation,
1742    /// which is then converted to the requested type.
1743    /// Photometric interpretation is considered
1744    /// to identify whether rescaling should be applied.
1745    /// The pixel values are not inverted
1746    /// if photometric interpretation is `MONOCHROME1`.
1747    ///
1748    /// The shape of the array will be `[N, R, C, S]`,
1749    /// where `N` is the number of frames,
1750    /// `R` is the number of rows,
1751    /// `C` is the number of columns,
1752    /// and `S` is the number of samples per pixel.
1753    ///
1754    /// The `options` value allows you to specify
1755    /// which transformations should be done to the pixel data
1756    /// (primarily Modality LUT function and VOI LUT function).
1757    /// By default,
1758    /// only the Modality LUT function described in the object is applied.
1759    /// Note that certain options may be ignored
1760    /// if they do not apply.
1761    #[cfg(feature = "ndarray")]
1762    pub fn to_ndarray_with_options<T>(&self, options: &ConvertOptions) -> Result<Array<T, Ix4>>
1763    where
1764        T: 'static,
1765        T: NumCast,
1766        T: Copy,
1767        T: Send + Sync,
1768    {
1769        // Array shape is NumberOfFrames x Rows x Cols x SamplesPerPixel
1770        let shape = [
1771            self.number_of_frames as usize,
1772            self.rows as usize,
1773            self.cols as usize,
1774            self.samples_per_pixel as usize,
1775        ];
1776
1777        let converted = self.to_vec_with_options::<T>(options)?;
1778        Array::from_shape_vec(shape, converted)
1779            .context(InvalidShapeSnafu)
1780            .map_err(Error::from)
1781    }
1782
1783    /// Convert the decoded pixel data of a single frame
1784    /// into a three dimensional array of a given type `T`.
1785    ///
1786    /// The underlying pixel data type is extracted based on
1787    /// the bits allocated and pixel representation,
1788    /// which is then converted to the requested type.
1789    /// Photometric interpretation is considered
1790    /// to identify whether rescaling should be applied.
1791    /// The pixel values are not inverted
1792    /// if photometric interpretation is `MONOCHROME1`.
1793    ///
1794    /// The shape of the array will be `[R, C, S]`,
1795    /// where `R` is the number of rows,
1796    /// `C` is the number of columns,
1797    /// and `S` is the number of samples per pixel.
1798    ///
1799    /// The default pixel data process pipeline
1800    /// applies only the Modality LUT function described in the object,
1801    /// To change this behavior,
1802    /// see [`to_ndarray_frame_with_options`](Self::to_ndarray_frame_with_options).
1803    #[cfg(feature = "ndarray")]
1804    pub fn to_ndarray_frame<T>(&self, frame: u32) -> Result<Array<T, Ix3>>
1805    where
1806        T: 'static,
1807        T: NumCast,
1808        T: Copy,
1809        T: Send + Sync,
1810    {
1811        self.to_ndarray_frame_with_options(frame, &Default::default())
1812    }
1813
1814    /// Convert the decoded pixel data of a single frame
1815    /// into a three dimensional array of a given type `T`.
1816    ///
1817    /// The underlying pixel data type is extracted based on
1818    /// the bits allocated and pixel representation,
1819    /// which is then converted to the requested type.
1820    /// Photometric interpretation is considered
1821    /// to identify whether rescaling should be applied.
1822    /// The pixel values are not inverted
1823    /// if photometric interpretation is `MONOCHROME1`.
1824    ///
1825    /// The shape of the array will be `[R, C, S]`,
1826    /// where `R` is the number of rows,
1827    /// `C` is the number of columns,
1828    /// and `S` is the number of samples per pixel.
1829    ///
1830    /// The `options` value allows you to specify
1831    /// which transformations should be done to the pixel data
1832    /// (primarily Modality LUT function and VOI LUT function).
1833    /// By default,
1834    /// only the Modality LUT function described in the object is applied.
1835    /// Note that certain options may be ignored
1836    /// if they do not apply.
1837    #[cfg(feature = "ndarray")]
1838    pub fn to_ndarray_frame_with_options<T>(
1839        &self,
1840        frame: u32,
1841        options: &ConvertOptions,
1842    ) -> Result<Array<T, Ix3>>
1843    where
1844        T: 'static,
1845        T: NumCast,
1846        T: Copy,
1847        T: Send + Sync,
1848    {
1849        // Array shape is Rows x Cols x SamplesPerPixel
1850        let shape = [
1851            self.rows as usize,
1852            self.cols as usize,
1853            self.samples_per_pixel as usize,
1854        ];
1855
1856        let converted = self.to_vec_frame_with_options::<T>(frame, options)?;
1857        Array::from_shape_vec(shape, converted)
1858            .context(InvalidShapeSnafu)
1859            .map_err(Error::from)
1860    }
1861
1862    /// Obtain a version of the decoded pixel data
1863    /// that is independent from the original DICOM object,
1864    /// by making copies of any necessary data.
1865    ///
1866    /// This is useful when you only need the imaging data,
1867    /// or when you want a composition of the object and decoded pixel data
1868    /// within the same value type.
1869    ///
1870    /// # Example
1871    ///
1872    /// ```no_run
1873    /// # use dicom_object::open_file;
1874    /// # use dicom_pixeldata::{DecodedPixelData, PixelDecoder};
1875    /// # type Error = Box<dyn std::error::Error>;
1876    /// fn get_pixeldata_only(path: &str) -> Result<DecodedPixelData<'static>, Error> {
1877    ///     let obj = open_file(path)?;
1878    ///     let pixeldata = obj.decode_pixel_data()?;
1879    ///     // can freely return from function
1880    ///     Ok(pixeldata.to_owned())
1881    /// }
1882    /// ```
1883    pub fn to_owned(&self) -> DecodedPixelData<'static> {
1884        DecodedPixelData {
1885            data: Cow::Owned(self.data.to_vec()),
1886            bits_allocated: self.bits_allocated,
1887            bits_stored: self.bits_stored,
1888            high_bit: self.high_bit,
1889            pixel_representation: self.pixel_representation,
1890            photometric_interpretation: self.photometric_interpretation.clone(),
1891            planar_configuration: self.planar_configuration,
1892            number_of_frames: self.number_of_frames,
1893            rows: self.rows,
1894            cols: self.cols,
1895            samples_per_pixel: self.samples_per_pixel,
1896            rescale: self.rescale.to_vec(),
1897            voi_lut_function: self.voi_lut_function.clone(),
1898            window: self.window.clone(),
1899            voi_lut_sequence: self.voi_lut_sequence.clone(),
1900            enforce_frame_fg_vm_match: self.enforce_frame_fg_vm_match,
1901        }
1902    }
1903}
1904
1905fn bytes_to_vec_u16(data: &[u8]) -> Vec<u16> {
1906    debug_assert!(data.len() % 2 == 0);
1907    let mut pixel_array: Vec<u16> = vec![0; data.len() / 2];
1908    NativeEndian::read_u16_into(data, &mut pixel_array);
1909    pixel_array
1910}
1911
1912// Convert u8 pixel array from YBR_FULL or YBR_FULL_422 to RGB
1913// Every pixel is replaced with an RGB value
1914#[cfg(feature = "image")]
1915fn convert_colorspace_u8(i: &mut [u8]) {
1916    #[cfg(feature = "rayon")]
1917    let iter = i.par_chunks_mut(3);
1918    #[cfg(not(feature = "rayon"))]
1919    let iter = i.chunks_mut(3);
1920
1921    // Matrix multiplication taken from
1922    // https://github.com/pydicom/pydicom/blob/f36517e10/pydicom/pixel_data_handlers/util.py#L576
1923    iter.for_each(|pixel| {
1924        let y = pixel[0] as f32;
1925        let b: f32 = pixel[1] as f32;
1926        let r: f32 = pixel[2] as f32;
1927        let b = b - 128.0;
1928        let r = r - 128.0;
1929
1930        let cr = (y + 1.402 * r) + 0.5;
1931        let cg = (y + (0.114 * 1.772 / 0.587) * b + (-0.299 * 1.402 / 0.587) * r) + 0.5;
1932        let cb = (y + 1.772 * b) + 0.5;
1933
1934        let cr = cr.floor().clamp(0.0, u8::MAX as f32) as u8;
1935        let cg = cg.floor().clamp(0.0, u8::MAX as f32) as u8;
1936        let cb = cb.floor().clamp(0.0, u8::MAX as f32) as u8;
1937
1938        pixel[0] = cr;
1939        pixel[1] = cg;
1940        pixel[2] = cb;
1941    });
1942}
1943
1944#[cfg(feature = "image")]
1945fn interleave<T: Copy>(data: &[T]) -> Vec<T> {
1946    debug_assert_eq!(data.len() % 3, 0);
1947    let component_len = data.len() / 3;
1948    let r = &data[..component_len];
1949    let g = &data[component_len..2 * component_len];
1950    let b = &data[2 * component_len..];
1951    r.iter()
1952        .zip(g.iter())
1953        .zip(b.iter())
1954        .flat_map(|((r, g), b)| [*r, *g, *b])
1955        .collect()
1956}
1957
1958// Convert u16 pixel array from YBR_FULL or YBR_FULL_422 to RGB
1959// Every pixel is replaced with an RGB value
1960#[cfg(feature = "image")]
1961fn convert_colorspace_u16(i: &mut [u16]) {
1962    #[cfg(feature = "rayon")]
1963    let iter = i.par_chunks_mut(3);
1964    #[cfg(not(feature = "rayon"))]
1965    let iter = i.chunks_mut(3);
1966
1967    // Matrix multiplication taken from
1968    // https://github.com/pydicom/pydicom/blob/f36517e10/pydicom/pixel_data_handlers/util.py#L576
1969    iter.for_each(|pixel| {
1970        let y = pixel[0] as f32;
1971        let b: f32 = pixel[1] as f32;
1972        let r: f32 = pixel[2] as f32;
1973        let b = b - 32768.0;
1974        let r = r - 32768.0;
1975
1976        let cr = (y + 1.402 * r) + 0.5;
1977        let cg = (y + (0.114 * 1.772 / 0.587) * b + (-0.299 * 1.402 / 0.587) * r) + 0.5;
1978        let cb = (y + 1.772 * b) + 0.5;
1979
1980        let cr = cr.floor().clamp(0.0, u16::MAX as f32) as u16;
1981        let cg = cg.floor().clamp(0.0, u16::MAX as f32) as u16;
1982        let cb = cb.floor().clamp(0.0, u16::MAX as f32) as u16;
1983
1984        pixel[0] = cr;
1985        pixel[1] = cg;
1986        pixel[2] = cb;
1987    });
1988}
1989
1990/// Convert the i16 vector by shifting it up,
1991/// thus maintaining the order between sample values.
1992#[cfg(feature = "image")]
1993fn convert_i16_to_u16(i: &[i16]) -> Vec<u16> {
1994    #[cfg(feature = "rayon")]
1995    let iter = i.par_iter();
1996    #[cfg(not(feature = "rayon"))]
1997    let iter = i.iter();
1998    iter.map(|p| (*p as i32 + 0x8000) as u16).collect()
1999}
2000
2001/// Trait for objects which can be decoded into
2002/// blobs of easily consumable pixel data.
2003///
2004/// This is the main trait which extends the capability of DICOM objects
2005/// (such as [`DefaultDicomObject`](dicom_object::DefaultDicomObject) from [`dicom_object`])
2006/// with a pathway to retrieve the imaging data.
2007///
2008/// See examples of use in the [root crate documentation](crate).
2009pub trait PixelDecoder {
2010    /// Decode the full pixel data in this object,
2011    /// yielding a base set of imaging properties
2012    /// and pixel data in native form.
2013    ///
2014    /// The resulting pixel data will be tied to
2015    /// the original object's lifetime.
2016    /// In the event that the pixel data is in an encapsulated form,
2017    /// new byte buffers are allocated for holding their native form.
2018    fn decode_pixel_data(&self) -> Result<DecodedPixelData<'_>>;
2019
2020    /// Decode the pixel data of a single frame in this object,
2021    /// yielding a base set of imaging properties
2022    /// and pixel data in native form.
2023    ///
2024    /// The resulting pixel data will be tied to
2025    /// the original object's lifetime.
2026    /// In the event that the pixel data is in an encapsulated form,
2027    /// new byte buffers are allocated for holding their native form.
2028    /// The number of frames recorded will be always 1,
2029    /// and the existence of other frames is ignored.
2030    /// When calling single frame retrieval methods afterwards,
2031    /// such as [`to_vec_frame`](DecodedPixelData::to_vec_frame),
2032    /// assume the intended frame number to be `0`.
2033    ///
2034    /// ---
2035    ///
2036    /// The default implementation decodes the full pixel data
2037    /// and then provides a crop containing only the frame of interest.
2038    /// Implementers are advised to write their own implementation for efficiency.
2039    fn decode_pixel_data_frame(&self, frame: u32) -> Result<DecodedPixelData<'_>> {
2040        let mut px = self.decode_pixel_data()?;
2041
2042        // calculate frame offset and size
2043        let frame_size = ((px.bits_allocated + 7) / 8) as usize
2044            * px.samples_per_pixel as usize
2045            * px.rows as usize
2046            * px.cols as usize;
2047        let frame_offset = frame_size * frame as usize;
2048
2049        // crop to frame
2050        match &mut px.data {
2051            Cow::Owned(data) => *data = data[frame_offset..frame_offset + frame_size].to_vec(),
2052            Cow::Borrowed(data) => {
2053                *data = &data[frame_offset..frame_offset + frame_size];
2054            }
2055        }
2056
2057        // reset number of frames
2058        px.number_of_frames = 1;
2059
2060        Ok(px)
2061    }
2062}
2063
2064/// Aggregator of key properties for imaging data,
2065/// without the pixel data proper.
2066///
2067/// Currently kept private,
2068/// might become part of the public API in the future.
2069#[derive(Debug)]
2070#[cfg(not(feature = "gdcm"))]
2071pub(crate) struct ImagingProperties {
2072    pub(crate) cols: u16,
2073    pub(crate) rows: u16,
2074    pub(crate) samples_per_pixel: u16,
2075    pub(crate) bits_allocated: u16,
2076    pub(crate) bits_stored: u16,
2077    pub(crate) high_bit: u16,
2078    pub(crate) pixel_representation: PixelRepresentation,
2079    pub(crate) planar_configuration: PlanarConfiguration,
2080    pub(crate) photometric_interpretation: PhotometricInterpretation,
2081    pub(crate) rescale_intercept: Vec<f64>,
2082    pub(crate) rescale_slope: Vec<f64>,
2083    pub(crate) number_of_frames: u32,
2084    pub(crate) voi_lut_function: Option<Vec<VoiLutFunction>>,
2085    pub(crate) window: Option<Vec<WindowLevel>>,
2086    pub(crate) voi_lut_sequence: Option<Vec<VoiLut>>,
2087}
2088
2089#[cfg(not(feature = "gdcm"))]
2090impl ImagingProperties {
2091    fn from_obj<D>(obj: &FileDicomObject<InMemDicomObject<D>>) -> Result<Self>
2092    where
2093        D: Clone + DataDictionary,
2094    {
2095        use attribute::*;
2096        use std::convert::TryFrom;
2097
2098        let cols = cols(obj)?;
2099        let rows = rows(obj)?;
2100        let photometric_interpretation = photometric_interpretation(obj)?;
2101        let samples_per_pixel = samples_per_pixel(obj)?;
2102        let planar_configuration = planar_configuration(obj)?;
2103        let bits_allocated = bits_allocated(obj)?;
2104        let bits_stored = bits_stored(obj)?;
2105        let high_bit = high_bit(obj)?;
2106        let pixel_representation = pixel_representation(obj)?;
2107        let rescale_intercept = rescale_intercept(obj);
2108        let rescale_slope = rescale_slope(obj);
2109        let number_of_frames = number_of_frames(obj)?;
2110        let voi_lut_function = voi_lut_function(obj)?;
2111        let voi_lut_function: Option<Vec<VoiLutFunction>> = voi_lut_function.and_then(|fns| {
2112            fns.iter()
2113                .map(|v| VoiLutFunction::try_from((*v).as_str()).ok())
2114                .collect()
2115        });
2116        let voi_lut_sequence = voi_lut_sequence(obj);
2117
2118        ensure!(
2119            rescale_intercept.len() == rescale_slope.len(),
2120            LengthMismatchRescaleSnafu {
2121                slope_vm: rescale_slope.len() as u32,
2122                intercept_vm: rescale_intercept.len() as u32,
2123            }
2124        );
2125
2126        let window = if let Some(wcs) = window_center(obj) {
2127            let width = window_width(obj);
2128            if let Some(wws) = width {
2129                ensure!(
2130                    wcs.len() == wws.len(),
2131                    LengthMismatchWindowLevelSnafu {
2132                        wc_vm: wcs.len() as u32,
2133                        ww_vm: wws.len() as u32,
2134                    }
2135                );
2136                Some(
2137                    zip(wcs, wws)
2138                        .map(|(wc, ww)| WindowLevel {
2139                            center: wc,
2140                            width: ww,
2141                        })
2142                        .collect(),
2143                )
2144            } else {
2145                None
2146            }
2147        } else {
2148            None
2149        };
2150
2151        Ok(Self {
2152            cols,
2153            rows,
2154            samples_per_pixel,
2155            bits_allocated,
2156            bits_stored,
2157            high_bit,
2158            pixel_representation,
2159            planar_configuration,
2160            photometric_interpretation,
2161            rescale_intercept,
2162            rescale_slope,
2163            number_of_frames,
2164            voi_lut_function,
2165            window,
2166            voi_lut_sequence,
2167        })
2168    }
2169}
2170
2171#[cfg(not(feature = "gdcm"))]
2172impl<D> PixelDecoder for FileDicomObject<InMemDicomObject<D>>
2173where
2174    D: DataDictionary + Clone,
2175{
2176    fn decode_pixel_data(&self) -> Result<DecodedPixelData<'_>> {
2177        let pixel_data = attribute::pixel_data(self)?;
2178
2179        let ImagingProperties {
2180            cols,
2181            rows,
2182            samples_per_pixel,
2183            bits_allocated,
2184            bits_stored,
2185            high_bit,
2186            pixel_representation,
2187            planar_configuration,
2188            photometric_interpretation,
2189            rescale_intercept,
2190            rescale_slope,
2191            number_of_frames,
2192            voi_lut_function,
2193            window,
2194            voi_lut_sequence,
2195        } = ImagingProperties::from_obj(self)?;
2196
2197        let transfer_syntax = &self.meta().transfer_syntax;
2198        let ts = TransferSyntaxRegistry
2199            .get(transfer_syntax)
2200            .with_context(|| UnknownTransferSyntaxSnafu {
2201                ts_uid: transfer_syntax,
2202            })?;
2203
2204        if !ts.can_decode_all() {
2205            return UnsupportedTransferSyntaxSnafu {
2206                ts: transfer_syntax,
2207            }
2208            .fail()?;
2209        }
2210
2211        let rescale = zip(&rescale_intercept, &rescale_slope)
2212            .map(|(intercept, slope)| Rescale {
2213                intercept: *intercept,
2214                slope: *slope,
2215            })
2216            .collect();
2217
2218        // Try decoding it using a registered pixel data decoder
2219        if let Codec::EncapsulatedPixelData(Some(decoder), _) = ts.codec() {
2220            let mut data: Vec<u8> = Vec::new();
2221            (*decoder)
2222                .decode(self, &mut data)
2223                .context(DecodePixelDataSnafu)?;
2224
2225            // pixels are already interpreted,
2226            // set new photometric interpretation if necessary
2227            let new_pi = match samples_per_pixel {
2228                3 => PhotometricInterpretation::Rgb,
2229                _ => photometric_interpretation,
2230            };
2231
2232            return Ok(DecodedPixelData {
2233                data: Cow::from(data),
2234                cols: cols.into(),
2235                rows: rows.into(),
2236                number_of_frames,
2237                photometric_interpretation: new_pi,
2238                samples_per_pixel,
2239                planar_configuration: PlanarConfiguration::Standard,
2240                bits_allocated,
2241                bits_stored,
2242                high_bit,
2243                pixel_representation,
2244                rescale,
2245                voi_lut_function,
2246                window,
2247                voi_lut_sequence,
2248                enforce_frame_fg_vm_match: false,
2249            });
2250        }
2251
2252        let decoded_pixel_data = match pixel_data.value() {
2253            DicomValue::PixelSequence(v) => {
2254                // Return all fragments concatenated
2255                // (should only happen for Encapsulated Uncompressed)
2256                v.fragments().iter().flatten().copied().collect()
2257            }
2258            DicomValue::Primitive(p) => {
2259                // Non-encoded, just return the pixel data for all frames
2260                let data = p.to_bytes();
2261
2262                if bits_allocated == 1 {
2263                    // Expand 1-bit samples to 0/255 bytes for all frames
2264                    let frame_pixels = (rows as usize) * (cols as usize);
2265                    let frame_samples = frame_pixels * (samples_per_pixel as usize);
2266                    let frame_size = frame_samples / 8;
2267                    let frame_size_all = frame_size * (number_of_frames as usize);
2268
2269                    let frame_data = data.get(0..frame_size_all).context(FrameOutOfRangeSnafu {
2270                        frame_number: frame_size_all as u32,
2271                    })?;
2272                    // Map every bit in each byte to a separate byte of either 0 or 255
2273                    frame_data
2274                        .iter()
2275                        .flat_map(|&byte| (0..8).map(move |bit| ((byte >> bit) & 1) * 255))
2276                        .take(frame_pixels * number_of_frames as usize)
2277                        .collect()
2278                } else {
2279                    data.to_vec()
2280                }
2281            }
2282            DicomValue::Sequence(..) => InvalidPixelDataSnafu.fail()?,
2283        };
2284        Ok(DecodedPixelData {
2285            data: Cow::from(decoded_pixel_data),
2286            cols: cols.into(),
2287            rows: rows.into(),
2288            number_of_frames,
2289            photometric_interpretation,
2290            samples_per_pixel,
2291            planar_configuration,
2292            bits_allocated,
2293            bits_stored,
2294            high_bit,
2295            pixel_representation,
2296            rescale,
2297            voi_lut_function,
2298            window,
2299            voi_lut_sequence,
2300            enforce_frame_fg_vm_match: false,
2301        })
2302    }
2303
2304    fn decode_pixel_data_frame(&self, frame: u32) -> Result<DecodedPixelData<'_>> {
2305        let pixel_data = attribute::pixel_data(self)?;
2306
2307        let ImagingProperties {
2308            cols,
2309            rows,
2310            samples_per_pixel,
2311            bits_allocated,
2312            bits_stored,
2313            high_bit,
2314            pixel_representation,
2315            planar_configuration,
2316            photometric_interpretation,
2317            rescale_intercept,
2318            rescale_slope,
2319            number_of_frames,
2320            voi_lut_function,
2321            window,
2322            voi_lut_sequence,
2323        } = ImagingProperties::from_obj(self)?;
2324
2325        let transfer_syntax = &self.meta().transfer_syntax;
2326        let ts = TransferSyntaxRegistry
2327            .get(transfer_syntax)
2328            .with_context(|| UnknownTransferSyntaxSnafu {
2329                ts_uid: transfer_syntax,
2330            })?;
2331
2332        if !ts.can_decode_all() {
2333            return UnsupportedTransferSyntaxSnafu {
2334                ts: transfer_syntax,
2335            }
2336            .fail()?;
2337        }
2338
2339        let rescale_data = zip(&rescale_intercept, &rescale_slope)
2340            .map(|(intercept, slope)| Rescale {
2341                intercept: *intercept,
2342                slope: *slope,
2343            })
2344            .collect::<Vec<Rescale>>();
2345
2346        let rescale = rescale_data
2347            .get(frame as usize)
2348            .or(rescale_data.first())
2349            .copied()
2350            .map(|inner| vec![inner])
2351            .unwrap_or_default();
2352
2353        let window = window.and_then(|inner| {
2354            inner
2355                .get(frame as usize)
2356                .or(inner.first())
2357                .copied()
2358                .map(|el| vec![el])
2359        });
2360
2361        let voi_lut_function = voi_lut_function.and_then(|inner| {
2362            inner
2363                .get(frame as usize)
2364                .or(inner.first())
2365                .copied()
2366                .map(|el| vec![el])
2367        });
2368
2369        // Try decoding it using a registered pixel data decoder
2370        if let Codec::EncapsulatedPixelData(Some(decoder), _) = ts.codec() {
2371            let mut data: Vec<u8> = Vec::new();
2372            (*decoder)
2373                .decode_frame(self, frame, &mut data)
2374                .context(DecodePixelDataSnafu)?;
2375
2376            // pixels are already interpreted,
2377            // set new photometric interpretation if necessary
2378            let new_pi = match samples_per_pixel {
2379                3 => PhotometricInterpretation::Rgb,
2380                _ => photometric_interpretation,
2381            };
2382
2383            return Ok(DecodedPixelData {
2384                data: Cow::from(data),
2385                cols: cols.into(),
2386                rows: rows.into(),
2387                number_of_frames: 1,
2388                photometric_interpretation: new_pi,
2389                samples_per_pixel,
2390                planar_configuration: PlanarConfiguration::Standard,
2391                bits_allocated,
2392                bits_stored,
2393                high_bit,
2394                pixel_representation,
2395                rescale,
2396                voi_lut_function,
2397                window,
2398                voi_lut_sequence,
2399                enforce_frame_fg_vm_match: false,
2400            });
2401        }
2402
2403        let decoded_pixel_data = match pixel_data.value() {
2404            DicomValue::PixelSequence(v) => {
2405                let fragments = v.fragments();
2406                if number_of_frames as usize == fragments.len() {
2407                    // return a single fragment
2408                    fragments[frame as usize].to_vec()
2409                } else {
2410                    // not supported, return an error
2411                    InvalidPixelDataSnafu.fail()?
2412                }
2413            }
2414            DicomValue::Primitive(p) => {
2415                // Non-encoded, just return the pixel data for a single frame
2416                let frame_pixels = (rows as usize) * (cols as usize);
2417                let frame_samples = frame_pixels * (samples_per_pixel as usize);
2418                let frame_size = if bits_allocated == 1 {
2419                    frame_samples / 8
2420                } else {
2421                    frame_samples * ((bits_allocated as usize + 7) / 8)
2422                };
2423                let frame_offset = frame_size * (frame as usize);
2424
2425                let data = p.to_bytes();
2426
2427                let frame_data = data.get(frame_offset..frame_offset + frame_size).context(
2428                    FrameOutOfRangeSnafu {
2429                        frame_number: frame,
2430                    },
2431                )?;
2432
2433                let pixel_data = if bits_allocated == 1 {
2434                    // Map every bit in each byte to a separate byte of either 0 or 255
2435                    frame_data
2436                        .iter()
2437                        .flat_map(|&byte| (0..8).map(move |bit| ((byte >> bit) & 1) * 255))
2438                        .take(frame_pixels)
2439                        .collect()
2440                } else {
2441                    frame_data.to_vec()
2442                };
2443
2444                pixel_data
2445            }
2446            DicomValue::Sequence(..) => InvalidPixelDataSnafu.fail()?,
2447        };
2448
2449        Ok(DecodedPixelData {
2450            data: Cow::from(decoded_pixel_data),
2451            cols: cols.into(),
2452            rows: rows.into(),
2453            number_of_frames: 1,
2454            photometric_interpretation,
2455            samples_per_pixel,
2456            planar_configuration,
2457            bits_allocated,
2458            bits_stored,
2459            high_bit,
2460            pixel_representation,
2461            rescale,
2462            voi_lut_function,
2463            window,
2464            voi_lut_sequence,
2465            enforce_frame_fg_vm_match: false,
2466        })
2467    }
2468}
2469
2470#[cfg(test)]
2471mod tests {
2472    use super::*;
2473    use dicom_object::open_file;
2474
2475    fn is_send_and_sync<T>()
2476    where
2477        T: Send + Sync,
2478    {
2479    }
2480
2481    #[test]
2482    fn error_is_send_and_sync() {
2483        is_send_and_sync::<Error>();
2484    }
2485
2486    #[test]
2487    fn test_to_vec_rgb() {
2488        let test_file = dicom_test_files::path("pydicom/SC_rgb_16bit.dcm").unwrap();
2489        let obj = open_file(test_file).unwrap();
2490        let decoded = obj.decode_pixel_data().unwrap();
2491
2492        let rows = decoded.rows();
2493
2494        let values = decoded.to_vec::<u16>().unwrap();
2495        assert_eq!(values.len(), 30000);
2496
2497        // 50, 80, 1
2498        assert_eq!(values[50 * rows as usize * 3 + 80 * 3 + 1], 32896);
2499    }
2500
2501    #[test]
2502    #[cfg(feature = "ndarray")]
2503    fn test_to_ndarray_rgb() {
2504        let test_file = dicom_test_files::path("pydicom/SC_rgb_16bit.dcm").unwrap();
2505        let obj = open_file(test_file).unwrap();
2506        let ndarray = obj
2507            .decode_pixel_data()
2508            .unwrap()
2509            .to_ndarray::<u16>()
2510            .unwrap();
2511        assert_eq!(ndarray.shape(), &[1, 100, 100, 3]);
2512        assert_eq!(ndarray.len(), 30000);
2513        assert_eq!(ndarray[[0, 50, 80, 1]], 32896);
2514    }
2515
2516    /// to_ndarray fails if the target type cannot represent the transformed values
2517    #[cfg(feature = "ndarray")]
2518    #[test]
2519    fn test_to_ndarray_error() {
2520        let test_file = dicom_test_files::path("pydicom/CT_small.dcm").unwrap();
2521        let obj = open_file(test_file).unwrap();
2522        assert!(matches!(
2523            obj.decode_pixel_data().unwrap().to_ndarray::<u8>(),
2524            Err(Error(InnerError::InvalidDataType { .. }))
2525                | Err(Error(InnerError::CreateLut { .. }))
2526        ));
2527    }
2528
2529    /// conversion to ndarray in 16-bit
2530    /// retains the original data of a 16-bit image
2531    #[cfg(feature = "ndarray")]
2532    #[test]
2533    fn test_to_ndarray_16bit() {
2534        let test_file = dicom_test_files::path("pydicom/CT_small.dcm").unwrap();
2535        let obj = open_file(test_file).unwrap();
2536
2537        let decoded = obj.decode_pixel_data().unwrap();
2538        let options = ConvertOptions::new().with_modality_lut(ModalityLutOption::None);
2539        let ndarray = decoded.to_ndarray_with_options::<u16>(&options).unwrap();
2540
2541        assert_eq!(ndarray.shape(), &[1, 128, 128, 1]);
2542
2543        // sample value retrieved from the original image file
2544        assert_eq!(ndarray[[0, 127, 127, 0]], 0x038D);
2545    }
2546
2547    /// conversion of a 16-bit image to a vector of 16-bit processed pixel values
2548    /// takes advantage of the output's full spectrum
2549    #[test]
2550    fn test_to_vec_16bit_to_window() {
2551        let test_file = dicom_test_files::path("pydicom/CT_small.dcm").unwrap();
2552        let obj = open_file(test_file).unwrap();
2553
2554        let decoded = obj.decode_pixel_data().unwrap();
2555        let options = ConvertOptions::new()
2556            .with_modality_lut(ModalityLutOption::Default)
2557            .with_voi_lut(VoiLutOption::First);
2558        let values = decoded.to_vec_with_options::<u16>(&options).unwrap();
2559
2560        assert_eq!(values.len(), 128 * 128);
2561
2562        // values are in the full spectrum
2563
2564        let max = values.iter().max().unwrap();
2565        let min = values.iter().min().unwrap();
2566
2567        assert_eq!(*max, 0xFFFF, "maximum in window should be 65535");
2568        assert_eq!(*min, 0, "minimum in window should be 0");
2569    }
2570
2571    #[test]
2572    fn test_correct_ri_extracted() {
2573        // Rescale Slope and Intercept exist for this scan
2574        let test_file = dicom_test_files::path("pydicom/CT_small.dcm").unwrap();
2575        let obj = open_file(test_file).unwrap();
2576        let pixel_data = obj.decode_pixel_data().unwrap();
2577        assert_eq!(pixel_data.rescale().unwrap()[0], Rescale::new(1., -1024.));
2578    }
2579
2580    #[test]
2581    fn test_correct_rescale_extracted_without_element() {
2582        // RescaleIntercept does not exists for this scan
2583        let test_file = dicom_test_files::path("pydicom/MR_small.dcm").unwrap();
2584        let obj = open_file(test_file).unwrap();
2585        let pixel_data = obj.decode_pixel_data().unwrap();
2586        assert_eq!(pixel_data.rescale().unwrap()[0], Rescale::new(1., 0.));
2587    }
2588
2589    #[test]
2590    fn test_general_properties_from_16bit() {
2591        let test_file = dicom_test_files::path("pydicom/CT_small.dcm").unwrap();
2592        let obj = open_file(test_file).unwrap();
2593        let pixel_data = obj.decode_pixel_data().unwrap();
2594
2595        assert_eq!(pixel_data.columns(), 128, "Unexpected Columns");
2596        assert_eq!(pixel_data.rows(), 128, "Unexpected Rows");
2597        assert_eq!(
2598            pixel_data.number_of_frames(),
2599            1,
2600            "Unexpected Number of Frames"
2601        );
2602        assert_eq!(
2603            pixel_data.photometric_interpretation(),
2604            &PhotometricInterpretation::Monochrome2,
2605            "Unexpected Photometric Interpretation"
2606        );
2607        assert_eq!(
2608            pixel_data.samples_per_pixel(),
2609            1,
2610            "Unexpected Samples per Pixel"
2611        );
2612        assert_eq!(pixel_data.bits_allocated(), 16, "Unexpected Bits Allocated");
2613        assert_eq!(pixel_data.bits_stored(), 16, "Unexpected Bits Stored");
2614        assert_eq!(pixel_data.high_bit(), 15, "Unexpected High Bit");
2615        assert_eq!(
2616            pixel_data.pixel_representation(),
2617            PixelRepresentation::Signed
2618        );
2619    }
2620
2621    #[cfg(feature = "image")]
2622    #[test]
2623    fn test_force_bit_depth_from_16bit() {
2624        let test_file = dicom_test_files::path("pydicom/CT_small.dcm").unwrap();
2625        let obj = open_file(test_file).unwrap();
2626        let pixel_data = obj.decode_pixel_data().unwrap();
2627
2628        // original image has 16 bits stored
2629        {
2630            let image = pixel_data
2631                .to_dynamic_image(0)
2632                .expect("Failed to convert to image");
2633
2634            assert!(image.as_luma16().is_some());
2635        }
2636
2637        // force to 16 bits
2638        {
2639            let options = ConvertOptions::new().force_16bit();
2640            let image = pixel_data
2641                .to_dynamic_image_with_options(0, &options)
2642                .expect("Failed to convert to image");
2643
2644            assert!(image.as_luma16().is_some());
2645        }
2646
2647        // force to 8 bits
2648        {
2649            let options = ConvertOptions::new().force_8bit();
2650            let image = pixel_data
2651                .to_dynamic_image_with_options(0, &options)
2652                .expect("Failed to convert to image");
2653
2654            assert!(image.as_luma8().is_some());
2655        }
2656    }
2657
2658    #[cfg(feature = "image")]
2659    #[test]
2660    fn test_force_bit_depth_from_rgb() {
2661        let test_file = dicom_test_files::path("pydicom/color-px.dcm").unwrap();
2662        let obj = open_file(test_file).unwrap();
2663        let pixel_data = obj.decode_pixel_data().unwrap();
2664
2665        // original image is RGB with 8 bits per sample
2666        {
2667            let image = pixel_data
2668                .to_dynamic_image(0)
2669                .expect("Failed to convert to image");
2670
2671            assert!(image.as_rgb8().is_some());
2672        }
2673
2674        // force to 16 bits
2675        {
2676            let options = ConvertOptions::new().force_16bit();
2677            let image = pixel_data
2678                .to_dynamic_image_with_options(0, &options)
2679                .expect("Failed to convert to image");
2680
2681            assert!(image.as_rgb16().is_some());
2682        }
2683
2684        // force to 8 bits
2685        {
2686            let options = ConvertOptions::new().force_8bit();
2687            let image = pixel_data
2688                .to_dynamic_image_with_options(0, &options)
2689                .expect("Failed to convert to image");
2690
2691            assert!(image.as_rgb8().is_some());
2692        }
2693    }
2694
2695    #[cfg(feature = "image")]
2696    #[test]
2697    fn test_frame_out_of_range() {
2698        let path =
2699            dicom_test_files::path("pydicom/CT_small.dcm").expect("test DICOM file should exist");
2700        let image = open_file(&path).unwrap();
2701        // Only one frame in this test dicom
2702        image
2703            .decode_pixel_data()
2704            .unwrap()
2705            .to_dynamic_image(0)
2706            .unwrap();
2707        let result = image.decode_pixel_data().unwrap().to_dynamic_image(1);
2708        match result {
2709            Err(Error(InnerError::FrameOutOfRange {
2710                frame_number: 1, ..
2711            })) => {}
2712            _ => panic!("Unexpected positive outcome for out of range access"),
2713        }
2714    }
2715
2716    #[test]
2717    #[ignore = "test is unsound"]
2718    fn test_can_read_deflated() {
2719        let path =
2720            dicom_test_files::path("pydicom/image_dfl.dcm").expect("test DICOM file should exist");
2721
2722        // should read preamble even though it's from a reader
2723        let obj = open_file(path.clone()).expect("Should read file");
2724
2725        let res = obj.decode_pixel_data().expect("Should decode pixel data.");
2726        assert_eq!(
2727            res.to_vec::<u8>().unwrap().len(),
2728            (res.rows() as usize * res.columns() as usize)
2729        );
2730        let mut buf = Vec::<u8>::new();
2731        obj.write_all(&mut buf).expect("Should write deflated");
2732
2733        assert_eq!(std::fs::metadata(path).unwrap().len() as usize, buf.len())
2734    }
2735
2736    #[cfg(not(feature = "gdcm"))]
2737    mod not_gdcm {
2738        #[cfg(feature = "ndarray")]
2739        use crate::PixelDecoder;
2740        #[cfg(any(feature = "rle", feature = "image"))]
2741        #[cfg(feature = "image")]
2742        use rstest::rstest;
2743
2744        #[cfg(feature = "rle")]
2745        #[test]
2746        fn test_native_decoding_pixel_data_rle_8bit_1frame_vec() {
2747            use crate::{ConvertOptions, ModalityLutOption, PixelDecoder as _};
2748
2749            let path = dicom_test_files::path("pydicom/SC_rgb_rle.dcm")
2750                .expect("test DICOM file should exist");
2751            let object = dicom_object::open_file(&path).unwrap();
2752
2753            let options = ConvertOptions::new().with_modality_lut(ModalityLutOption::None);
2754            let decoded = object.decode_pixel_data().unwrap();
2755            let values = decoded.to_vec_with_options::<u8>(&options).unwrap();
2756
2757            let columns = decoded.columns() as usize;
2758            // validated through manual inspection of ground-truth
2759            assert_eq!(values.len(), 30_000);
2760            // 0,0,r
2761            assert_eq!(values[0], 255);
2762            // 0,0,g
2763            assert_eq!(values[1], 0);
2764            // 0,0,b
2765            assert_eq!(values[2], 0);
2766            // 50,50,r
2767            assert_eq!(values[50 * columns * 3 + 50 * 3], 128);
2768            // 50,50,g
2769            assert_eq!(values[50 * columns * 3 + 50 * 3 + 1], 128);
2770            // 50,50,b
2771            assert_eq!(values[50 * columns * 3 + 50 * 3 + 2], 255);
2772            // 75,75,r
2773            assert_eq!(values[75 * columns * 3 + 75 * 3], 64);
2774            // 75,75,g
2775            assert_eq!(values[75 * columns * 3 + 75 * 3 + 1], 64);
2776            // 75,75,b
2777            assert_eq!(values[75 * columns * 3 + 75 * 3 + 2], 64);
2778            // 16,49,r
2779            assert_eq!(values[49 * columns * 3 + 16 * 3], 0);
2780            // 16,49,g
2781            assert_eq!(values[49 * columns * 3 + 16 * 3 + 1], 0);
2782            // 16,49,b
2783            assert_eq!(values[49 * columns * 3 + 16 * 3 + 2], 255);
2784        }
2785
2786        #[cfg(feature = "ndarray")]
2787        #[test]
2788        fn test_native_decoding_pixel_data_rle_8bit_1frame_ndarray() {
2789            use crate::{ConvertOptions, ModalityLutOption};
2790
2791            let path = dicom_test_files::path("pydicom/SC_rgb_rle.dcm")
2792                .expect("test DICOM file should exist");
2793            let object = dicom_object::open_file(&path).unwrap();
2794
2795            let options = ConvertOptions::new().with_modality_lut(ModalityLutOption::None);
2796            let ndarray = object
2797                .decode_pixel_data()
2798                .unwrap()
2799                .to_ndarray_with_options::<u8>(&options)
2800                .unwrap();
2801            // validated through manual inspection of ground-truth
2802            assert_eq!(ndarray.shape(), &[1, 100, 100, 3]);
2803            assert_eq!(ndarray.len(), 30_000);
2804            // 0, 0
2805            assert_eq!(ndarray[[0, 0, 0, 0]], 255);
2806            assert_eq!(ndarray[[0, 0, 0, 1]], 0);
2807            assert_eq!(ndarray[[0, 0, 0, 2]], 0);
2808            // 50, 50
2809            assert_eq!(ndarray[[0, 50, 50, 0]], 128);
2810            assert_eq!(ndarray[[0, 50, 50, 1]], 128);
2811            assert_eq!(ndarray[[0, 50, 50, 2]], 255);
2812            // 75, 75
2813            assert_eq!(ndarray[[0, 75, 75, 0]], 64);
2814            assert_eq!(ndarray[[0, 75, 75, 1]], 64);
2815            assert_eq!(ndarray[[0, 75, 75, 2]], 64);
2816            // 16, 49
2817            assert_eq!(ndarray[[0, 49, 16, 0]], 0);
2818            assert_eq!(ndarray[[0, 49, 16, 1]], 0);
2819            assert_eq!(ndarray[[0, 49, 16, 2]], 255);
2820        }
2821
2822        #[cfg(feature = "ndarray")]
2823        #[test]
2824        fn test_native_decoding_pixel_data_rle_8bit_2frame() {
2825            use crate::{ConvertOptions, ModalityLutOption};
2826
2827            let path = dicom_test_files::path("pydicom/SC_rgb_rle_2frame.dcm")
2828                .expect("test DICOM file should exist");
2829            let object = dicom_object::open_file(&path).unwrap();
2830            let options = ConvertOptions::new().with_modality_lut(ModalityLutOption::None);
2831            let ndarray = object
2832                .decode_pixel_data()
2833                .unwrap()
2834                .to_ndarray_with_options::<u8>(&options)
2835                .unwrap();
2836            // validated through manual inspection of ground-truth
2837            assert_eq!(ndarray.shape(), &[2, 100, 100, 3]);
2838            assert_eq!(ndarray.len(), 60_000);
2839            // 0, 0
2840            assert_eq!(ndarray[[0, 0, 0, 0]], 255);
2841            assert_eq!(ndarray[[0, 0, 0, 1]], 0);
2842            assert_eq!(ndarray[[0, 0, 0, 2]], 0);
2843            // 50, 50
2844            assert_eq!(ndarray[[0, 50, 50, 0]], 128);
2845            assert_eq!(ndarray[[0, 50, 50, 1]], 128);
2846            assert_eq!(ndarray[[0, 50, 50, 2]], 255);
2847            // 75, 75
2848            assert_eq!(ndarray[[0, 75, 75, 0]], 64);
2849            assert_eq!(ndarray[[0, 75, 75, 1]], 64);
2850            assert_eq!(ndarray[[0, 75, 75, 2]], 64);
2851            // 16, 49
2852            assert_eq!(ndarray[[0, 49, 16, 0]], 0);
2853            assert_eq!(ndarray[[0, 49, 16, 1]], 0);
2854            assert_eq!(ndarray[[0, 49, 16, 2]], 255);
2855            // The second frame is the inverse of the first frame
2856            // 0, 0
2857            assert_eq!(ndarray[[1, 0, 0, 0]], 0);
2858            assert_eq!(ndarray[[1, 0, 0, 1]], 255);
2859            assert_eq!(ndarray[[1, 0, 0, 2]], 255);
2860            // 50, 50
2861            assert_eq!(ndarray[[1, 50, 50, 0]], 127);
2862            assert_eq!(ndarray[[1, 50, 50, 1]], 127);
2863            assert_eq!(ndarray[[1, 50, 50, 2]], 0);
2864            // 75, 75
2865            assert_eq!(ndarray[[1, 75, 75, 0]], 191);
2866            assert_eq!(ndarray[[1, 75, 75, 1]], 191);
2867            assert_eq!(ndarray[[1, 75, 75, 2]], 191);
2868            // 16, 49
2869            assert_eq!(ndarray[[1, 49, 16, 0]], 255);
2870            assert_eq!(ndarray[[1, 49, 16, 1]], 255);
2871            assert_eq!(ndarray[[1, 49, 16, 2]], 0);
2872        }
2873
2874        #[cfg(feature = "ndarray")]
2875        #[test]
2876        fn test_native_decoding_pixel_data_rle_16bit_1frame() {
2877            use crate::{ConvertOptions, ModalityLutOption};
2878
2879            let path = dicom_test_files::path("pydicom/SC_rgb_rle_16bit.dcm")
2880                .expect("test DICOM file should exist");
2881            let object = dicom_object::open_file(&path).unwrap();
2882            let options = ConvertOptions::new().with_modality_lut(ModalityLutOption::None);
2883            let ndarray = object
2884                .decode_pixel_data()
2885                .unwrap()
2886                .to_ndarray_with_options::<u16>(&options)
2887                .unwrap();
2888            assert_eq!(ndarray.shape(), &[1, 100, 100, 3]);
2889            assert_eq!(ndarray.len(), 30_000);
2890            // 0,0
2891            assert_eq!(ndarray[[0, 0, 0, 0]], 65535);
2892            assert_eq!(ndarray[[0, 0, 0, 1]], 0);
2893            assert_eq!(ndarray[[0, 0, 0, 2]], 0);
2894            // 50,50
2895            assert_eq!(ndarray[[0, 50, 50, 0]], 32896);
2896            assert_eq!(ndarray[[0, 50, 50, 1]], 32896);
2897            assert_eq!(ndarray[[0, 50, 50, 2]], 65535);
2898            // 75,75
2899            assert_eq!(ndarray[[0, 75, 75, 0]], 16448);
2900            assert_eq!(ndarray[[0, 75, 75, 1]], 16448);
2901            assert_eq!(ndarray[[0, 75, 75, 2]], 16448);
2902            // 16, 49
2903            assert_eq!(ndarray[[0, 49, 16, 0]], 0);
2904            assert_eq!(ndarray[[0, 49, 16, 1]], 0);
2905            assert_eq!(ndarray[[0, 49, 16, 2]], 65535);
2906        }
2907
2908        #[cfg(feature = "ndarray")]
2909        #[test]
2910        fn test_native_decoding_pixel_data_rle_16bit_2frame() {
2911            let path = dicom_test_files::path("pydicom/SC_rgb_rle_16bit_2frame.dcm")
2912                .expect("test DICOM file should exist");
2913            let object = dicom_object::open_file(&path).unwrap();
2914            let ndarray = object
2915                .decode_pixel_data()
2916                .unwrap()
2917                .to_ndarray::<u16>()
2918                .unwrap();
2919            // Validated using Numpy
2920            // This doesn't reshape the array based on the PlanarConfiguration
2921            // So for this scan the pixel layout is [Rlsb..Rmsb, Glsb..Gmsb, Blsb..msb]
2922            assert_eq!(ndarray.shape(), &[2, 100, 100, 3]);
2923            assert_eq!(ndarray.len(), 60_000);
2924            // 0,0
2925            assert_eq!(ndarray[[0, 0, 0, 0]], 65535);
2926            assert_eq!(ndarray[[0, 0, 0, 1]], 0);
2927            assert_eq!(ndarray[[0, 0, 0, 2]], 0);
2928            // 50,50
2929            assert_eq!(ndarray[[0, 50, 50, 0]], 32896);
2930            assert_eq!(ndarray[[0, 50, 50, 1]], 32896);
2931            assert_eq!(ndarray[[0, 50, 50, 2]], 65535);
2932            // 75,75
2933            assert_eq!(ndarray[[0, 75, 75, 0]], 16448);
2934            assert_eq!(ndarray[[0, 75, 75, 1]], 16448);
2935            assert_eq!(ndarray[[0, 75, 75, 2]], 16448);
2936            // 16, 49
2937            assert_eq!(ndarray[[0, 49, 16, 0]], 0);
2938            assert_eq!(ndarray[[0, 49, 16, 1]], 0);
2939            assert_eq!(ndarray[[0, 49, 16, 2]], 65535);
2940            // The second frame is the inverse of the first frame
2941            // 0,0
2942            assert_eq!(ndarray[[1, 0, 0, 0]], 0);
2943            assert_eq!(ndarray[[1, 0, 0, 1]], 65535);
2944            assert_eq!(ndarray[[1, 0, 0, 2]], 65535);
2945            // 50,50
2946            assert_eq!(ndarray[[1, 50, 50, 0]], 32639);
2947            assert_eq!(ndarray[[1, 50, 50, 1]], 32639);
2948            assert_eq!(ndarray[[1, 50, 50, 2]], 0);
2949            // 75,75
2950            assert_eq!(ndarray[[1, 75, 75, 0]], 49087);
2951            assert_eq!(ndarray[[1, 75, 75, 1]], 49087);
2952            assert_eq!(ndarray[[1, 75, 75, 2]], 49087);
2953            // 16, 49
2954            assert_eq!(ndarray[[1, 49, 16, 0]], 65535);
2955            assert_eq!(ndarray[[1, 49, 16, 1]], 65535);
2956            assert_eq!(ndarray[[1, 49, 16, 2]], 0);
2957        }
2958
2959        #[cfg(feature = "image")]
2960        const MAX_TEST_FRAMES: u32 = 16;
2961
2962        #[cfg(feature = "image")]
2963        #[rstest]
2964        // jpeg2000 encoding
2965        #[cfg_attr(
2966            any(feature = "openjp2", feature = "openjpeg-sys"),
2967            case("pydicom/emri_small_jpeg_2k_lossless.dcm", 10)
2968        )]
2969        #[cfg_attr(
2970            any(feature = "openjp2", feature = "openjpeg-sys"),
2971            case("pydicom/693_J2KI.dcm", 1)
2972        )]
2973        #[cfg_attr(
2974            any(feature = "openjp2", feature = "openjpeg-sys"),
2975            case("pydicom/693_J2KR.dcm", 1)
2976        )]
2977        #[cfg_attr(
2978            any(feature = "openjp2", feature = "openjpeg-sys"),
2979            case("pydicom/JPEG2000.dcm", 1)
2980        )]
2981        //
2982        // jpeg-ls encoding
2983        #[cfg_attr(
2984            feature = "charls",
2985            case("pydicom/emri_small_jpeg_ls_lossless.dcm", 10)
2986        )]
2987        #[cfg_attr(feature = "charls", case("pydicom/MR_small_jpeg_ls_lossless.dcm", 1))]
2988        //
2989        // sample precision of 12 not supported yet
2990        #[should_panic(expected = "Unsupported(SamplePrecision(12))")]
2991        #[case("pydicom/JPEG-lossy.dcm", 1)]
2992        //
2993        // JPEG baseline (8bit)
2994        #[cfg_attr(feature = "jpeg", case("pydicom/color3d_jpeg_baseline.dcm", 120))]
2995        #[cfg_attr(feature = "jpeg", case("pydicom/SC_rgb_jpeg_lossy_gdcm.dcm", 1))]
2996        #[cfg_attr(feature = "jpeg", case("pydicom/SC_rgb_jpeg_gdcm.dcm", 1))]
2997        //
2998        // JPEG lossless
2999        #[cfg_attr(feature = "jpeg", case("pydicom/JPEG-LL.dcm", 1))]
3000        #[cfg_attr(feature = "jpeg", case("pydicom/JPGLosslessP14SV1_1s_1f_8b.dcm", 1))]
3001
3002        fn test_parse_jpeg_encoded_dicom_pixel_data(#[case] value: &str, #[case] frames: u32) {
3003            use crate::PixelDecoder as _;
3004            use std::fs;
3005            use std::path::Path;
3006
3007            let test_file = dicom_test_files::path(value).unwrap();
3008            println!("Parsing pixel data for {}", test_file.display());
3009            let obj = dicom_object::open_file(test_file).unwrap();
3010            let pixel_data = obj.decode_pixel_data().unwrap();
3011            assert_eq!(
3012                pixel_data.number_of_frames(),
3013                frames,
3014                "number of frames mismatch"
3015            );
3016
3017            let output_dir = Path::new(
3018                "../target/dicom_test_files/_out/test_parse_jpeg_encoded_dicom_pixel_data",
3019            );
3020            fs::create_dir_all(output_dir).unwrap();
3021
3022            for i in 0..pixel_data.number_of_frames().min(MAX_TEST_FRAMES) {
3023                let image = pixel_data
3024                    .to_dynamic_image(i)
3025                    .expect("failed to retrieve the frame requested");
3026                let image_path = output_dir.join(format!(
3027                    "{}-{}.png",
3028                    Path::new(value).file_stem().unwrap().to_str().unwrap(),
3029                    i,
3030                ));
3031                image.save(image_path).unwrap();
3032            }
3033        }
3034
3035        #[cfg(feature = "image")]
3036        #[rstest]
3037        #[cfg_attr(feature = "jpeg", case("pydicom/color3d_jpeg_baseline.dcm", 0))]
3038        #[cfg_attr(feature = "jpeg", case("pydicom/color3d_jpeg_baseline.dcm", 1))]
3039        #[cfg_attr(feature = "jpeg", case("pydicom/color3d_jpeg_baseline.dcm", 78))]
3040        #[cfg_attr(feature = "jpeg", case("pydicom/color3d_jpeg_baseline.dcm", 119))]
3041        #[case("pydicom/SC_rgb_rle_2frame.dcm", 0)]
3042        #[case("pydicom/SC_rgb_rle_2frame.dcm", 1)]
3043        #[case("pydicom/JPEG2000_UNC.dcm", 0)]
3044        #[cfg_attr(feature = "charls", case("pydicom/emri_small_jpeg_ls_lossless.dcm", 5))]
3045        #[cfg_attr(feature = "charls", case("pydicom/MR_small_jpeg_ls_lossless.dcm", 0))]
3046        fn test_decode_pixel_data_individual_frames(#[case] value: &str, #[case] frame: u32) {
3047            use crate::PixelDecoder as _;
3048            use std::path::Path;
3049
3050            let test_file = dicom_test_files::path(value).unwrap();
3051            println!("Parsing pixel data for {}", test_file.display());
3052            let obj = dicom_object::open_file(test_file).unwrap();
3053            let pixel_data = obj.decode_pixel_data_frame(frame).unwrap();
3054            let output_dir = Path::new(
3055                "../target/dicom_test_files/_out/test_decode_pixel_data_individual_frames",
3056            );
3057            std::fs::create_dir_all(output_dir).unwrap();
3058
3059            assert_eq!(pixel_data.number_of_frames(), 1, "expected 1 frame only");
3060
3061            let image = pixel_data.to_dynamic_image(0).unwrap();
3062            let image_path = output_dir.join(format!(
3063                "{}-{}.png",
3064                Path::new(value).file_stem().unwrap().to_str().unwrap(),
3065                frame,
3066            ));
3067            image.save(image_path).unwrap();
3068        }
3069    }
3070
3071    /// Loading a MONOCHROME1 image with encapsulated pixel data
3072    /// should not change the photometric interpretation
3073    /// (this rule does not apply to decoding via GDCM)
3074    #[cfg(all(feature = "jpeg", not(feature = "gdcm")))]
3075    #[test]
3076    fn test_monochrome1_decode_retains_pmi() {
3077        let path = dicom_test_files::path("WG04/JPLL/RG1_JPLL").unwrap();
3078        let obj = dicom_object::open_file(&path).unwrap();
3079        let pixel_data = obj.decode_pixel_data().unwrap();
3080        assert_eq!(
3081            pixel_data.photometric_interpretation(),
3082            &PhotometricInterpretation::Monochrome1
3083        );
3084    }
3085
3086    #[cfg(feature = "image")]
3087    #[test]
3088    fn test_interleave() {
3089        let planar: Vec<u8> = vec![
3090            1, 2, 3, 4, // R
3091            5, 6, 7, 8, // G
3092            9, 10, 11, 12, // B
3093        ];
3094        let interleaved: Vec<u8> = vec![1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12];
3095        assert_eq!(interleave(&planar), interleaved);
3096    }
3097
3098    #[cfg(feature = "image")]
3099    #[test]
3100    fn test_1bit_image_decoding_data() {
3101        use crate::PixelDecoder as _;
3102        use std::path::Path;
3103
3104        let test_file =
3105            dicom_test_files::path("pydicom/liver.dcm").expect("test DICOM file should exist");
3106        let obj = dicom_object::open_file(test_file).unwrap();
3107        let pixel_data = obj.decode_pixel_data().unwrap();
3108
3109        assert_eq!(pixel_data.number_of_frames(), 3, "expected 3 frames");
3110
3111        let output_dir = Path::new("../target/dicom_test_files/_out/test_1bit_image_decoding");
3112        std::fs::create_dir_all(output_dir).unwrap();
3113
3114        for idx in 0..=2 {
3115            let image = pixel_data.to_dynamic_image(idx).unwrap();
3116            let image_path = output_dir.join(format!(
3117                "{}-image-{}.png",
3118                Path::new("pydicom/liver.dcm")
3119                    .file_stem()
3120                    .unwrap()
3121                    .to_str()
3122                    .unwrap(),
3123                idx,
3124            ));
3125            image.save(image_path).unwrap();
3126        }
3127    }
3128
3129    #[cfg(feature = "image")]
3130    #[test]
3131    fn test_1bit_image_decoding_data_frame() {
3132        use crate::PixelDecoder as _;
3133        use std::path::Path;
3134
3135        let test_file =
3136            dicom_test_files::path("pydicom/liver.dcm").expect("test DICOM file should exist");
3137        println!("Parsing pixel data for {}", test_file.display());
3138        let obj = dicom_object::open_file(test_file).unwrap();
3139        let output_dir = Path::new("../target/dicom_test_files/_out/test_1bit_image_decoding");
3140        std::fs::create_dir_all(output_dir).unwrap();
3141
3142        for idx in 0..=2 {
3143            let pixel_data = obj.decode_pixel_data_frame(idx).unwrap();
3144
3145            assert_eq!(pixel_data.number_of_frames(), 1, "expected 1 frame only");
3146
3147            let image = pixel_data.to_dynamic_image(0).unwrap();
3148            let image_path = output_dir.join(format!(
3149                "{}-frame-{}.png",
3150                Path::new("pydicom/liver.dcm")
3151                    .file_stem()
3152                    .unwrap()
3153                    .to_str()
3154                    .unwrap(),
3155                idx
3156            ));
3157            image.save(image_path).unwrap();
3158        }
3159    }
3160}