openh264/
decoder.rs

1//! Converts NAL packets to YUV images.
2//!
3//! # Examples
4//!
5//! Basic [Decoder] use looks as follows. In practice, you might get your `h264`
6//! bitstream from reading a file or network source.
7//!
8//! ```rust
9//! use openh264::decoder::Decoder;
10//! use openh264::nal_units;
11//!
12//! # use openh264::{Error, OpenH264API};
13//! # fn main() -> Result<(), Error> {
14//! let h264_in = include_bytes!("../tests/data/multi_512x512.h264");
15//! let mut decoder = Decoder::new()?;
16//!
17//! for packet in nal_units(h264_in) {
18//!     // If everything goes well this should yield a `DecodedYUV`.
19//!     // It can also be `Err()` if the bitstream had errors, or
20//!     // `Ok(None)` if no pictures were available (yet).
21//!     let Ok(Some(yuv)) = decoder.decode(packet) else { continue };
22//! }
23//! # Ok(())
24//! # }
25//! ```
26//!
27//! Once you have your `yuv`, which should be of type [`DecodedYUV`], you can proceed to converting it to RGB:
28//!
29//! ```rust
30//! # use openh264::decoder::Decoder;
31//! # use openh264::nal_units;
32//! use openh264::formats::YUVSource;
33//!
34//! # use openh264::{Error, OpenH264API};
35//! # fn main() -> Result<(), Error> {
36//! #
37//! # let h264_in = include_bytes!("../tests/data/multi_512x512.h264");
38//! # let mut decoder = Decoder::new()?;
39//! #
40//! # for packet in nal_units(h264_in) {
41//! #    let Ok(Some(yuv)) = decoder.decode(packet) else { continue; };
42//! let rgb_len = yuv.rgb8_len();
43//! let mut rgb_raw = vec![0; rgb_len];
44//!
45//! yuv.write_rgb8(&mut rgb_raw);
46//! # }
47//! # Ok(())
48//! # }
49//! ```
50
51use crate::error::NativeErrorExt;
52use crate::formats::yuv2rgb::{write_rgb8_f32x8, write_rgb8_scalar, write_rgba8_f32x8, write_rgba8_scalar};
53// use crate::formats::yuv2rgb::{write_rgb8_f32x8, write_rgb8_f32x8_par, write_rgb8_scalar, write_rgb8_scalar_par};
54use crate::formats::{YUVSlices, YUVSource};
55use crate::{Error, OpenH264API, Timestamp};
56use openh264_sys2::{
57    API, DECODER_OPTION, DECODER_OPTION_ERROR_CON_IDC, DECODER_OPTION_NUM_OF_FRAMES_REMAINING_IN_BUFFER,
58    DECODER_OPTION_NUM_OF_THREADS, DECODER_OPTION_TRACE_LEVEL, DECODING_STATE, ISVCDecoder, ISVCDecoderVtbl, SBufferInfo,
59    SDecodingParam, SParserBsInfo, SSysMEMBuffer, SVideoProperty, TagBufferInfo, WELS_LOG_DETAIL, WELS_LOG_QUIET,
60    videoFormatI420,
61};
62use std::os::raw::{c_int, c_long, c_uchar, c_void};
63use std::ptr::{addr_of_mut, from_mut, null, null_mut};
64
65/// Convenience wrapper with guaranteed function pointers for easy access.
66///
67/// This struct automatically handles `WelsCreateDecoder` and `WelsDestroyDecoder`.
68#[rustfmt::skip]
69#[allow(non_snake_case)]
70pub struct DecoderRawAPI {
71    api: OpenH264API,
72    decoder_ptr: *mut *const ISVCDecoderVtbl,
73    initialize: unsafe extern "C" fn(arg1: *mut ISVCDecoder, pParam: *const SDecodingParam) -> c_long,
74    uninitialize: unsafe extern "C" fn(arg1: *mut ISVCDecoder) -> c_long,
75    decode_frame: unsafe extern "C" fn(arg1: *mut ISVCDecoder, pSrc: *const c_uchar, iSrcLen: c_int, ppDst: *mut *mut c_uchar, pStride: *mut c_int, iWidth: *mut c_int, iHeight: *mut c_int) -> DECODING_STATE,
76    decode_frame_no_delay: unsafe extern "C" fn(arg1: *mut ISVCDecoder, pSrc: *const c_uchar, iSrcLen: c_int, ppDst: *mut *mut c_uchar, pDstInfo: *mut SBufferInfo) -> DECODING_STATE,
77    decode_frame2: unsafe extern "C" fn(arg1: *mut ISVCDecoder, pSrc: *const c_uchar, iSrcLen: c_int, ppDst: *mut *mut c_uchar, pDstInfo: *mut SBufferInfo) -> DECODING_STATE,
78    flush_frame:  unsafe extern "C" fn(arg1: *mut ISVCDecoder, ppDst: *mut *mut c_uchar, pDstInfo: *mut SBufferInfo) -> DECODING_STATE,
79    decode_parser: unsafe extern "C" fn(arg1: *mut ISVCDecoder, pSrc: *const c_uchar, iSrcLen: c_int, pDstInfo: *mut SParserBsInfo) -> DECODING_STATE,
80    decode_frame_ex: unsafe extern "C" fn(arg1: *mut ISVCDecoder, pSrc: *const c_uchar, iSrcLen: c_int, pDst: *mut c_uchar, iDstStride: c_int, iDstLen: *mut c_int, iWidth: *mut c_int, iHeight: *mut c_int, iColorFormat: *mut c_int) -> DECODING_STATE,
81    set_option: unsafe extern "C" fn(arg1: *mut ISVCDecoder, eOptionId: DECODER_OPTION, pOption: *mut c_void) -> c_long,
82    get_option: unsafe extern "C" fn(arg1: *mut ISVCDecoder, eOptionId: DECODER_OPTION, pOption: *mut c_void) -> c_long,
83}
84
85#[rustfmt::skip]
86#[allow(clippy::too_many_arguments)]
87#[allow(clippy::missing_safety_doc)]
88#[allow(non_snake_case, unused, missing_docs)]
89impl DecoderRawAPI {
90    fn new(api: OpenH264API) -> Result<Self, Error> {
91        unsafe {
92            let mut decoder_ptr = null::<ISVCDecoderVtbl>() as *mut *const ISVCDecoderVtbl;
93
94            api.WelsCreateDecoder(from_mut(&mut decoder_ptr)).ok()?;
95
96            let e = || {
97                Error::msg("VTable missing function.")
98            };
99
100            Ok(Self {
101                api,
102                decoder_ptr,
103                initialize: (*(*decoder_ptr)).Initialize.ok_or_else(e)?,
104                uninitialize: (*(*decoder_ptr)).Uninitialize.ok_or_else(e)?,
105                decode_frame: (*(*decoder_ptr)).DecodeFrame.ok_or_else(e)?,
106                decode_frame_no_delay: (*(*decoder_ptr)).DecodeFrameNoDelay.ok_or_else(e)?,
107                decode_frame2: (*(*decoder_ptr)).DecodeFrame2.ok_or_else(e)?,
108                flush_frame: (*(*decoder_ptr)).FlushFrame.ok_or_else(e)?,
109                decode_parser: (*(*decoder_ptr)).DecodeParser.ok_or_else(e)?,
110                decode_frame_ex: (*(*decoder_ptr)).DecodeFrameEx.ok_or_else(e)?,
111                set_option: (*(*decoder_ptr)).SetOption.ok_or_else(e)?,
112                get_option: (*(*decoder_ptr)).GetOption.ok_or_else(e)?,
113            })
114        }
115    }
116
117    // Exposing these will probably do more harm than good.
118    unsafe fn initialize(&self, pParam: *const SDecodingParam) -> c_long { unsafe { (self.initialize)(self.decoder_ptr, pParam) }}
119    unsafe fn uninitialize(&self, ) -> c_long { unsafe { (self.uninitialize)(self.decoder_ptr) }}
120
121    pub unsafe fn decode_frame(&self, Src: *const c_uchar, iSrcLen: c_int, ppDst: *mut *mut c_uchar, pStride: *mut c_int, iWidth: *mut c_int, iHeight: *mut c_int) -> DECODING_STATE { unsafe { (self.decode_frame)(self.decoder_ptr, Src, iSrcLen, ppDst, pStride, iWidth, iHeight) }}
122    pub unsafe fn decode_frame_no_delay(&self, pSrc: *const c_uchar, iSrcLen: c_int, ppDst: *mut *mut c_uchar, pDstInfo: *mut SBufferInfo) -> DECODING_STATE { unsafe { (self.decode_frame_no_delay)(self.decoder_ptr, pSrc, iSrcLen, ppDst, pDstInfo) }}
123    pub unsafe fn decode_frame2(&self, pSrc: *const c_uchar, iSrcLen: c_int, ppDst: *mut *mut c_uchar, pDstInfo: *mut SBufferInfo) -> DECODING_STATE { unsafe { (self.decode_frame2)(self.decoder_ptr, pSrc, iSrcLen, ppDst, pDstInfo) }}
124    pub unsafe fn flush_frame(&self, ppDst: *mut *mut c_uchar, pDstInfo: *mut SBufferInfo) -> DECODING_STATE { unsafe { (self.flush_frame)(self.decoder_ptr, ppDst, pDstInfo) }}
125    pub unsafe fn decode_parser(&self, pSrc: *const c_uchar, iSrcLen: c_int, pDstInfo: *mut SParserBsInfo) -> DECODING_STATE { unsafe { (self.decode_parser)(self.decoder_ptr, pSrc, iSrcLen, pDstInfo) }}
126    pub unsafe fn decode_frame_ex(&self, pSrc: *const c_uchar, iSrcLen: c_int, pDst: *mut c_uchar, iDstStride: c_int, iDstLen: *mut c_int, iWidth: *mut c_int, iHeight: *mut c_int, iColorFormat: *mut c_int) -> DECODING_STATE { unsafe { (self.decode_frame_ex)(self.decoder_ptr, pSrc, iSrcLen, pDst, iDstStride, iDstLen, iWidth, iHeight, iColorFormat) }}
127    pub unsafe fn set_option(&self, eOptionId: DECODER_OPTION, pOption: *mut c_void) -> c_long { unsafe {  (self.set_option)(self.decoder_ptr, eOptionId, pOption) }}
128    pub unsafe fn get_option(&self, eOptionId: DECODER_OPTION, pOption: *mut c_void) -> c_long { unsafe { (self.get_option)(self.decoder_ptr, eOptionId, pOption) }}
129}
130
131impl Drop for DecoderRawAPI {
132    fn drop(&mut self) {
133        // Safe because when we drop the pointer must have been initialized, and we aren't clone.
134        unsafe {
135            self.api.WelsDestroyDecoder(self.decoder_ptr);
136        }
137    }
138}
139
140unsafe impl Send for DecoderRawAPI {}
141unsafe impl Sync for DecoderRawAPI {}
142
143/// How the decoder should handle flushing.
144///
145/// The behavior of flushing is somewhat unclear upstream. If you run into decoder errors,
146/// you should probably disable automatic flushing, and manually call [`Decoder::flush_remaining`]
147/// after all NAL units have been processed. It might be a good idea to do the latter regardless.
148///
149/// If you have more info on flushing best practices, we'd greatly appreciate a PR to make our
150/// decoding pipeline more robust.
151#[derive(Default, Copy, Clone, Debug, Eq, PartialEq)]
152pub enum Flush {
153    /// Uses the current currently configured decoder default (which is attempted flushing after each decode).
154    #[default]
155    Auto,
156    /// Flushes after each decode operation.
157    Flush,
158    /// Do not flush after decode operations.
159    NoFlush,
160}
161
162impl Flush {
163    /// Given some existing flush options and some current frame decode options, returns
164    /// whether flushing should happen.
165    #[allow(clippy::match_same_arms)]
166    #[allow(clippy::needless_pass_by_value)]
167    const fn should_flush(self, decoder_options: DecodeOptions) -> bool {
168        match (self, decoder_options.flush_after_decode) {
169            (Self::Auto, Self::Auto) => true,
170            (Self::NoFlush, Self::Auto) => false,
171            (Self::Flush, Self::Auto) => true,
172            (_, Self::NoFlush) => false,
173            (_, Self::Flush) => true,
174        }
175    }
176}
177
178/// Configuration for the [`Decoder`].
179///
180/// Setting missing? Please file a PR!
181#[derive(Default, Copy, Clone, Debug)]
182#[must_use]
183pub struct DecoderConfig {
184    params: SDecodingParam,
185    num_threads: DECODER_OPTION,
186    debug: DECODER_OPTION,
187    error_concealment: DECODER_OPTION,
188    flush_after_decode: Flush,
189}
190
191unsafe impl Send for DecoderConfig {}
192unsafe impl Sync for DecoderConfig {}
193
194impl DecoderConfig {
195    /// Creates a new default encoder config.
196    pub const fn new() -> Self {
197        Self {
198            params: SDecodingParam {
199                pFileNameRestructed: null_mut(),
200                uiCpuLoad: 0,
201                uiTargetDqLayer: 0,
202                eEcActiveIdc: 0,
203                bParseOnly: false,
204                sVideoProperty: SVideoProperty {
205                    size: 0,
206                    eVideoBsType: 0,
207                },
208            },
209            num_threads: 0,
210            debug: WELS_LOG_QUIET,
211            error_concealment: 0,
212            flush_after_decode: Flush::Flush,
213        }
214    }
215
216    /// Sets the number of threads; will probably segfault the decoder, see below.<sup>⚠️</sup>
217    ///
218    /// # Safety
219    ///
220    /// This setting might work on some platforms but will probably just segfault.
221    /// Consider this a _highly_ experimental option we only expose to test if and
222    /// where threading actually works. Ultimately you should consult with the upstream
223    /// OpenH264 project where and when it is safe to set this.
224    ///
225    /// See [this issue](https://github.com/ralfbiedert/openh264-rust/issues/10) for details.
226    pub const unsafe fn num_threads(mut self, num_threads: u32) -> Self {
227        self.num_threads = num_threads as i32;
228        self
229    }
230
231    /// Enables detailed console logging inside OpenH264.
232    pub const fn debug(mut self, value: bool) -> Self {
233        self.debug = if value { WELS_LOG_DETAIL } else { WELS_LOG_QUIET };
234        self
235    }
236
237    /// Sets the default flush behavior after decode operations..
238    pub const fn flush_after_decode(mut self, flush_behavior: Flush) -> Self {
239        self.flush_after_decode = flush_behavior;
240        self
241    }
242}
243
244/// Configuration for the current decode operation.
245#[derive(Default, Clone, Debug, Eq, PartialEq)]
246pub struct DecodeOptions {
247    flush_after_decode: Flush,
248}
249
250impl DecodeOptions {
251    /// Creates new decoder options.
252    #[must_use]
253    pub const fn new() -> Self {
254        Self {
255            flush_after_decode: Flush::Auto,
256        }
257    }
258
259    /// Sets the flush behavior for the upcoming decode operation.
260    #[must_use]
261    pub const fn flush_after_decode(mut self, value: Flush) -> Self {
262        self.flush_after_decode = value;
263        self
264    }
265}
266
267/// An [OpenH264](https://github.com/cisco/openh264) decoder.
268pub struct Decoder {
269    raw_api: DecoderRawAPI,
270    config: DecoderConfig,
271}
272
273impl Decoder {
274    /// Create a decoder with default settings and the built-in decoder.
275    ///
276    /// This method is only available when compiling with the `source` feature.
277    ///
278    /// # Errors
279    ///
280    /// This should never error, but the underlying OpenH264 decoder has an error indication and
281    /// since we don't know their code that well we just can't guarantee it.
282    #[cfg(feature = "source")]
283    pub fn new() -> Result<Self, Error> {
284        let api = OpenH264API::from_source();
285        Self::with_api_config(api, DecoderConfig::new())
286    }
287
288    /// Create a decoder with the provided [API](OpenH264API) and [configuration](DecoderConfig).
289    ///
290    /// # Errors
291    ///
292    /// Might fail if the provided encoder parameters had issues.
293    pub fn with_api_config(api: OpenH264API, mut config: DecoderConfig) -> Result<Self, Error> {
294        let raw_api = DecoderRawAPI::new(api)?;
295
296        // config.params.sVideoProperty.eVideoBsType = VIDEO_BITSTREAM_AVC;
297
298        #[rustfmt::skip]
299        unsafe {
300            raw_api.initialize(&raw const config.params).ok()?;
301            raw_api.set_option(DECODER_OPTION_TRACE_LEVEL, addr_of_mut!(config.debug).cast()).ok()?;
302            raw_api.set_option(DECODER_OPTION_NUM_OF_THREADS, addr_of_mut!(config.num_threads).cast()).ok()?;
303            raw_api.set_option(DECODER_OPTION_ERROR_CON_IDC, addr_of_mut!(config.error_concealment).cast()).ok()?;
304        };
305
306        Ok(Self { raw_api, config })
307    }
308
309    /// Decodes a series of H.264 NAL packets and returns the latest picture.
310    ///
311    /// This is a convenience wrapper around [`decode_with_options`](Self::decode_with_options) that uses default decoding options.
312    ///
313    /// # Errors
314    ///
315    /// The function returns an error if the bitstream was corrupted.
316    pub fn decode(&mut self, packet: &[u8]) -> Result<Option<DecodedYUV<'_>>, Error> {
317        self.decode_with_options(packet, DecodeOptions::default())
318    }
319
320    /// Decodes a series of H.264 NAL packets and returns the latest picture.
321    ///
322    /// This function can be called with:
323    ///
324    /// - only the complete SPS / PPS header (usually the first some 30 bytes of a H.264 stream),
325    /// - the headers and series of complete frames,
326    /// - new frames after previous headers and frames were successfully decoded.
327    ///
328    /// In each case, it will return `Some(decoded)` image in YUV format if an image was available, or `None`
329    /// if more data needs to be provided. If `options` contains [`Flush`](Flush::Flush) (or if this
330    /// is set as the decoder default), it will try to flush a frame no image was available.
331    ///
332    /// In any case, it is probably a good idea to call [`Decoder::flush_remaining`] after you
333    /// finished decoding all available NAL units.
334    ///
335    /// # Errors
336    ///
337    /// - The function returns an error if the bitstream was corrupted.
338    /// - Also, flushing best practices are somewhat hard to come by in OpenH264. You might get errors
339    ///   if you flushed when you shouldn't have, although we cannot exactly tell you when that is.
340    ///   If you have more information on how to make this more robust, a PR would be greatly welcome.
341    pub fn decode_with_options(&mut self, packet: &[u8], options: DecodeOptions) -> Result<Option<DecodedYUV<'_>>, Error> {
342        let mut dst = [null_mut::<u8>(); 3];
343        let mut buffer_info = SBufferInfo::default();
344        let flush = self.config.flush_after_decode.should_flush(options);
345
346        unsafe {
347            self.raw_api
348                .decode_frame_no_delay(
349                    packet.as_ptr(),
350                    packet.len() as i32,
351                    from_mut(&mut dst).cast(),
352                    &raw mut buffer_info,
353                )
354                .ok()?;
355        }
356
357        match (buffer_info.iBufferStatus, flush) {
358            // No outstanding images, but asked to flush, and flushable frames available?
359            (0, true) if self.num_frames_in_buffer()? > 0 => {
360                let (dst, buffer_info) = self.flush_single_frame_raw()?;
361
362                if buffer_info.iBufferStatus == 0 {
363                    return Err(Error::msg(
364                        "Buffer status invalid, we have outstanding frames but failed to flush them.",
365                    ));
366                }
367
368                unsafe { Ok(DecodedYUV::from_raw_open264_ptrs(&dst, &buffer_info)) }
369            }
370            // No outstanding images otherwise? Nothing to do.
371            (0, _) => Ok(None),
372            // Outstanding images otherwise? Return one.
373            _ => unsafe { Ok(DecodedYUV::from_raw_open264_ptrs(&dst, &buffer_info)) },
374        }
375    }
376
377    /// Flush and return all remaining frames in the buffer.
378    ///
379    /// This function should be called after decoding all frames of a NAL stream.
380    ///
381    /// # Errors
382    ///
383    /// The function returns an error if the bitstream was corrupted.
384    pub fn flush_remaining(&'_ mut self) -> Result<Vec<DecodedYUV<'_>>, Error> {
385        let mut frames = Vec::new();
386
387        for _ in 0..self.num_frames_in_buffer()? {
388            let (dst, buffer_info) = self.flush_single_frame_raw()?;
389
390            if let Some(image) = unsafe { DecodedYUV::from_raw_open264_ptrs(&dst, &buffer_info) } {
391                frames.push(image);
392            }
393        }
394
395        Ok(frames)
396    }
397
398    /// Obtain the raw API for advanced use cases.
399    ///
400    /// When resorting to this call, please consider filing an issue / PR.
401    ///
402    /// # Safety
403    ///
404    /// You must not set parameters the decoder relies on, we recommend checking the source.
405    ///
406    /// # Example
407    ///
408    /// ```
409    /// use openh264::decoder::{DecoderConfig, Decoder};
410    ///
411    /// # use openh264::{Error, OpenH264API};
412    /// #
413    /// # fn try_main() -> Result<(), Error> {
414    /// let api = OpenH264API::from_source();
415    /// let config = DecoderConfig::default();
416    /// let mut decoder = Decoder::with_api_config(api, config)?;
417    ///
418    /// unsafe {
419    ///     _ = decoder.raw_api();
420    /// }
421    /// # Ok(())
422    /// # }
423    /// ```
424    pub const unsafe fn raw_api(&mut self) -> &mut DecoderRawAPI {
425        &mut self.raw_api
426    }
427
428    /// Returns the number of frames currently remaining in the buffer.
429    fn num_frames_in_buffer(&mut self) -> Result<usize, Error> {
430        let mut num_frames: DECODER_OPTION = 0;
431        unsafe {
432            self.raw_api()
433                .get_option(
434                    DECODER_OPTION_NUM_OF_FRAMES_REMAINING_IN_BUFFER,
435                    addr_of_mut!(num_frames).cast(),
436                )
437                .ok()?;
438        }
439
440        Ok(num_frames as usize)
441    }
442
443    /// Attempts to flush a single frame (i.e., produce a new YUV from previously passed bitstream data), if available.
444    fn flush_single_frame_raw(&mut self) -> Result<([*mut u8; 3], TagBufferInfo), Error> {
445        let mut dst = [null_mut::<u8>(); 3];
446        let mut buffer_info = SBufferInfo::default();
447
448        unsafe {
449            self.raw_api()
450                .flush_frame(from_mut(&mut dst).cast(), &raw mut buffer_info)
451                .ok()?;
452            Ok((dst, buffer_info))
453        }
454    }
455}
456
457impl Drop for Decoder {
458    fn drop(&mut self) {
459        // Safe because when we drop the pointer must have been initialized.
460        unsafe {
461            self.raw_api.uninitialize();
462        }
463    }
464}
465
466/// Frame returned by the [`Decoder`] and provides safe data access.
467#[derive(Debug)]
468pub struct DecodedYUV<'a> {
469    info: SSysMEMBuffer,
470    timestamp: Timestamp,
471
472    y: &'a [u8],
473    u: &'a [u8],
474    v: &'a [u8],
475}
476
477impl DecodedYUV<'_> {
478    /// Attempts to create a decoded YUV wrapper from a set of Open264 pointers.
479    ///
480    /// This can soft-fail (return `None`) because we might still have gotten `null` pointers from
481    /// OpenH264 despite it not having returned an error on decode.
482    const unsafe fn from_raw_open264_ptrs(dst: &[*mut u8; 3], buffer_info: &TagBufferInfo) -> Option<Self> {
483        unsafe {
484            let info = buffer_info.UsrData.sSystemBuffer;
485            let timestamp = Timestamp::from_millis(buffer_info.uiInBsTimeStamp); // TODO: Is this the right one?
486
487            // Apparently it is ok for `decode_frame_no_delay` to not return an error _and_ to return null buffers. In this case
488            // the user should try to continue decoding.
489            if dst[0].is_null() || dst[1].is_null() || dst[2].is_null() {
490                None
491            } else {
492                // https://github.com/cisco/openh264/issues/2379
493                let y = std::slice::from_raw_parts(dst[0], (info.iHeight * info.iStride[0]) as usize);
494                let u = std::slice::from_raw_parts(dst[1], (info.iHeight * info.iStride[1] / 2) as usize);
495                let v = std::slice::from_raw_parts(dst[2], (info.iHeight * info.iStride[1] / 2) as usize);
496
497                Some(Self {
498                    info,
499                    timestamp,
500                    y,
501                    u,
502                    v,
503                })
504            }
505        }
506    }
507
508    /// Returns the unpadded U size.
509    ///
510    /// This is often smaller (by half) than the image size.
511    #[must_use]
512    pub const fn dimensions_uv(&self) -> (usize, usize) {
513        (self.info.iWidth as usize / 2, self.info.iHeight as usize / 2)
514    }
515
516    /// Timestamp of this frame in milliseconds(?) with respect to the video stream.
517    #[must_use]
518    pub const fn timestamp(&self) -> Timestamp {
519        self.timestamp
520    }
521
522    /// Cut the YUV buffer into vertical sections.
523    ///
524    /// The slices do not overlap. If N does not divide the buffer, then the last YUVSlice has fewer pixel rows.
525    pub fn split<const N: usize>(&'_ self) -> [YUVSlices<'_>; N] {
526        if N == 1 {
527            return [YUVSlices::new((self.y, self.u, self.v), self.dimensions(), self.strides()); N];
528        }
529
530        let y_chunks: Vec<&[u8]> = self.y.chunks(self.y.len() / N).collect();
531        let u_chunks: Vec<&[u8]> = self.u.chunks(self.u.len() / N).collect();
532        let v_chunks: Vec<&[u8]> = self.v.chunks(self.v.len() / N).collect();
533
534        let mut parts = [YUVSlices::new((self.y, self.u, self.v), self.dimensions(), self.strides()); N];
535        for i in 0..N {
536            parts[i] = YUVSlices::new(
537                (y_chunks[i], u_chunks[i], v_chunks[i]),
538                (self.dimensions().0, self.info.iHeight as usize / N),
539                self.strides(),
540            );
541        }
542
543        parts
544    }
545
546    // TODO: Ideally we'd like to move these out into a converter in `formats`.
547    /// Writes the image into a byte buffer of size `w*h*3`.
548    ///
549    /// # Panics
550    ///
551    /// Panics if the target image dimension don't match the configured format.
552    #[allow(clippy::unnecessary_cast)]
553    pub fn write_rgb8(&self, target: &mut [u8]) {
554        let dim = self.dimensions();
555        let strides = self.strides();
556        let wanted = dim.0 * dim.1 * 3;
557
558        // This needs some love, and better architecture.
559        assert_eq!(self.info.iFormat, videoFormatI420 as i32);
560        assert_eq!(
561            target.len(),
562            wanted,
563            "Target RGB8 array does not match image dimensions. Wanted: {} * {} * 3 = {}, got {}",
564            dim.0,
565            dim.1,
566            wanted,
567            target.len()
568        );
569
570        // for f32x8 math, image needs to:
571        //   - have a width divisible by 8
572        //   - have at least two rows
573        if dim.0 % 8 == 0 && dim.1 >= 2 {
574            write_rgb8_f32x8(self.y, self.u, self.v, dim, strides, target);
575        } else {
576            write_rgb8_scalar(self.y, self.u, self.v, dim, strides, target);
577        }
578    }
579
580    // TODO: Ideally we'd like to move these out into a converter in `formats`.
581    /// Writes the image into a byte buffer of size `w*h*4`.
582    ///
583    /// # Panics
584    ///
585    /// Panics if the target image dimension don't match the configured format.
586    #[allow(clippy::unnecessary_cast)]
587    pub fn write_rgba8(&self, target: &mut [u8]) {
588        let dim = self.dimensions();
589        let strides = self.strides();
590        let wanted = dim.0 * dim.1 * 4;
591
592        // This needs some love, and better architecture.
593        assert_eq!(self.info.iFormat, videoFormatI420 as i32);
594        assert_eq!(
595            target.len(),
596            wanted,
597            "Target RGBA8 array does not match image dimensions. Wanted: {} * {} * 4 = {}, got {}",
598            dim.0,
599            dim.1,
600            wanted,
601            target.len()
602        );
603        // for f32x8 math, image needs to:
604        //   - have a width divisible by 8
605        //   - have at least two rows
606        if dim.0 % 8 == 0 && dim.1 >= 2 {
607            write_rgba8_f32x8(self.y, self.u, self.v, dim, strides, target);
608        } else {
609            write_rgba8_scalar(self.y, self.u, self.v, dim, strides, target);
610        }
611    }
612}
613
614impl YUVSource for DecodedYUV<'_> {
615    fn dimensions_i32(&self) -> (i32, i32) {
616        (self.info.iWidth, self.info.iHeight)
617    }
618
619    fn dimensions(&self) -> (usize, usize) {
620        (self.info.iWidth as usize, self.info.iHeight as usize)
621    }
622
623    fn strides(&self) -> (usize, usize, usize) {
624        // iStride is an array of size 2, so indices are really (0, 1, 1)
625        (
626            self.info.iStride[0] as usize,
627            self.info.iStride[1] as usize,
628            self.info.iStride[1] as usize,
629        )
630    }
631
632    fn strides_i32(&self) -> (i32, i32, i32) {
633        // iStride is an array of size 2, so indices are really (0, 1, 1)
634        (self.info.iStride[0], self.info.iStride[1], self.info.iStride[1])
635    }
636
637    fn y(&self) -> &[u8] {
638        self.y
639    }
640
641    fn u(&self) -> &[u8] {
642        self.u
643    }
644
645    fn v(&self) -> &[u8] {
646        self.v
647    }
648}
649
650#[cfg(test)]
651mod test {
652    use openh264_sys2::SSysMEMBuffer;
653
654    use crate::{
655        Timestamp,
656        formats::{YUVSlices, YUVSource},
657    };
658
659    use super::DecodedYUV;
660
661    /// Create YUV420 plane buffers.
662    ///
663    /// Usage: `let (y, u, v) = planes!(strides: (132, 132), dim: (128, 128));`
664    macro_rules! yuv420_planes {
665        (y_stride: $y_stride:literal, height: $height:literal) => {{
666            // iterate over numbers from 0..255 and start from 0 after 255
667            let numbers = (0..u32::MAX).map(|i| (i % 256) as u8);
668
669            let y_plane_len = ($y_stride * $height) as usize;
670            let y = numbers.clone().take(y_plane_len).collect::<Vec<_>>();
671
672            // u & v planes are a quarter of the y plane length in YUV420
673            let uv_plane_len = ($y_stride * $height / 4) as usize;
674            let u = numbers.clone().take(uv_plane_len).collect::<Vec<_>>();
675            let v = numbers.clone().take(uv_plane_len).collect::<Vec<_>>();
676
677            (y, u, v)
678        }};
679    }
680
681    /// Create a mock DecodedYUV without iFormat and Timestamp::ZERO
682    ///
683    /// Usage: `let buf = decoded_yuv!(strides: (132, 132), dim: (128, 128), &y, &u, &v);`
684    macro_rules! decoded_yuv420 {
685        (y_stride: $y_stride:literal, dim: ($width:literal, $height:literal), $y:expr, $u:expr, $v:expr) => {
686            DecodedYUV {
687                info: SSysMEMBuffer {
688                    iWidth: $width,
689                    iHeight: $height,
690                    // YUV420 see: https://github.com/cisco/openh264/blob/0c9a557a9a6f1d267c4d372221669a8ae69ccda0/codec/api/wels/codec_def.h#L56
691                    iFormat: 23,
692                    iStride: [$y_stride as i32, $y_stride / 2 as i32],
693                },
694                timestamp: Timestamp::ZERO,
695                y: $y,
696                u: $u,
697                v: $v,
698            }
699        };
700    }
701
702    #[test]
703    fn test_split_01() {
704        // smallest possible buffer in YUV420
705        let (y, u, v) = yuv420_planes!(y_stride: 4, height: 4);
706        let buf = decoded_yuv420!(y_stride: 4, dim: (4, 4), &y, &u, &v);
707
708        let parts: [YUVSlices; 1] = buf.split();
709        assert_eq!(1, parts.len());
710        assert_eq!(parts[0].y(), y.as_slice());
711        assert_eq!(parts[0].u(), u.as_slice());
712        assert_eq!(parts[0].v(), v.as_slice());
713    }
714
715    #[test]
716    fn test_split_02() {
717        let (y, u, v) = yuv420_planes!(y_stride: 132, height: 128);
718        let buf = decoded_yuv420!(y_stride: 132, dim: (128, 128), &y, &u, &v);
719
720        let parts: [YUVSlices; 4] = buf.split();
721
722        let (mut y_plane, mut u_plane, mut v_plane) = (vec![], vec![], vec![]);
723        for slice in parts {
724            y_plane.extend_from_slice(slice.y());
725            u_plane.extend_from_slice(slice.u());
726            v_plane.extend_from_slice(slice.v());
727        }
728
729        assert_eq!(buf.y().len(), y_plane.len());
730        assert_eq!(buf.y(), y_plane);
731        assert_eq!(buf.u().len(), u_plane.len());
732        assert_eq!(buf.u(), u_plane);
733        assert_eq!(buf.v().len(), v_plane.len());
734        assert_eq!(buf.v(), v_plane);
735    }
736}