oximedia-core 0.1.2

Core types and traits for OxiMedia
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
//! Decoder traits for video and audio.
//!
//! This module provides traits for implementing video and audio decoders.
//! These traits define the interface that all decoder implementations must follow.

use crate::error::OxiResult;
use crate::types::{CodecId, PixelFormat, SampleFormat, Timestamp};

/// Video frame produced by a video decoder.
///
/// Represents a decoded video frame with pixel data and metadata.
#[derive(Debug)]
pub struct VideoFrame {
    /// Pixel format of the frame data.
    pub format: PixelFormat,
    /// Width of the frame in pixels.
    pub width: u32,
    /// Height of the frame in pixels.
    pub height: u32,
    /// Timestamp information for this frame.
    pub timestamp: Timestamp,
    /// Plane data for the frame.
    /// For planar formats, contains one slice per plane.
    /// For packed formats, contains a single slice.
    pub planes: Vec<Vec<u8>>,
    /// Stride (bytes per row) for each plane.
    pub strides: Vec<usize>,
    /// Whether this frame is a keyframe.
    pub is_keyframe: bool,
}

impl VideoFrame {
    /// Creates a new video frame with the given parameters.
    #[must_use]
    #[allow(clippy::too_many_arguments)]
    pub fn new(
        format: PixelFormat,
        width: u32,
        height: u32,
        timestamp: Timestamp,
        planes: Vec<Vec<u8>>,
        strides: Vec<usize>,
        is_keyframe: bool,
    ) -> Self {
        Self {
            format,
            width,
            height,
            timestamp,
            planes,
            strides,
            is_keyframe,
        }
    }
}

/// Audio frame produced by an audio decoder.
///
/// Represents decoded audio samples with metadata.
#[derive(Debug)]
pub struct AudioFrame {
    /// Sample format of the audio data.
    pub format: SampleFormat,
    /// Sample rate in Hz.
    pub sample_rate: u32,
    /// Number of channels.
    pub channels: u16,
    /// Number of samples per channel in this frame.
    pub samples: usize,
    /// Timestamp information for this frame.
    pub timestamp: Timestamp,
    /// Audio data.
    /// For interleaved formats, contains a single buffer.
    /// For planar formats, contains one buffer per channel.
    pub data: Vec<Vec<u8>>,
}

impl AudioFrame {
    /// Creates a new audio frame with the given parameters.
    #[must_use]
    pub fn new(
        format: SampleFormat,
        sample_rate: u32,
        channels: u16,
        samples: usize,
        timestamp: Timestamp,
        data: Vec<Vec<u8>>,
    ) -> Self {
        Self {
            format,
            sample_rate,
            channels,
            samples,
            timestamp,
            data,
        }
    }

    /// Returns the duration of this frame in seconds.
    #[must_use]
    #[allow(clippy::cast_precision_loss)]
    pub fn duration_seconds(&self) -> f64 {
        self.samples as f64 / f64::from(self.sample_rate)
    }
}

/// Trait for video decoder implementations.
///
/// Implementors decode compressed video data into raw video frames.
///
/// # Examples
///
/// ```ignore
/// use oximedia_core::traits::{VideoDecoder, VideoFrame};
///
/// fn decode_video(decoder: &mut impl VideoDecoder, data: &[u8]) -> Vec<VideoFrame> {
///     decoder.send_packet(data)?;
///     let mut frames = Vec::new();
///     while let Some(frame) = decoder.receive_frame()? {
///         frames.push(frame);
///     }
///     frames
/// }
/// ```
pub trait VideoDecoder {
    /// Returns the codec ID this decoder handles.
    fn codec_id(&self) -> CodecId;

    /// Returns the output pixel format.
    fn output_format(&self) -> PixelFormat;

    /// Returns the output dimensions (width, height).
    fn output_dimensions(&self) -> (u32, u32);

    /// Sends a compressed packet to the decoder.
    ///
    /// # Arguments
    ///
    /// * `data` - Compressed video data
    ///
    /// # Errors
    ///
    /// Returns an error if the packet is invalid or the decoder is in an error state.
    fn send_packet(&mut self, data: &[u8]) -> OxiResult<()>;

    /// Receives a decoded frame from the decoder.
    ///
    /// Returns `None` if no frame is currently available.
    /// Call repeatedly after `send_packet` to get all decoded frames.
    ///
    /// # Errors
    ///
    /// Returns an error if decoding fails.
    fn receive_frame(&mut self) -> OxiResult<Option<VideoFrame>>;

    /// Flushes the decoder, signaling end of stream.
    ///
    /// After calling flush, continue calling `receive_frame` to get
    /// any remaining buffered frames.
    ///
    /// # Errors
    ///
    /// Returns an error if flushing fails.
    fn flush(&mut self) -> OxiResult<()>;

    /// Resets the decoder state.
    ///
    /// Call this when seeking to a new position in the stream.
    ///
    /// # Errors
    ///
    /// Returns an error if reset fails.
    fn reset(&mut self) -> OxiResult<()>;
}

/// Trait for audio decoder implementations.
///
/// Implementors decode compressed audio data into raw audio frames.
///
/// # Examples
///
/// ```ignore
/// use oximedia_core::traits::{AudioDecoder, AudioFrame};
///
/// fn decode_audio(decoder: &mut impl AudioDecoder, data: &[u8]) -> Vec<AudioFrame> {
///     decoder.send_packet(data)?;
///     let mut frames = Vec::new();
///     while let Some(frame) = decoder.receive_frame()? {
///         frames.push(frame);
///     }
///     frames
/// }
/// ```
pub trait AudioDecoder {
    /// Returns the codec ID this decoder handles.
    fn codec_id(&self) -> CodecId;

    /// Returns the output sample format.
    fn output_format(&self) -> SampleFormat;

    /// Returns the sample rate in Hz.
    fn sample_rate(&self) -> u32;

    /// Returns the number of output channels.
    fn channels(&self) -> u16;

    /// Sends a compressed packet to the decoder.
    ///
    /// # Arguments
    ///
    /// * `data` - Compressed audio data
    ///
    /// # Errors
    ///
    /// Returns an error if the packet is invalid or the decoder is in an error state.
    fn send_packet(&mut self, data: &[u8]) -> OxiResult<()>;

    /// Receives a decoded frame from the decoder.
    ///
    /// Returns `None` if no frame is currently available.
    /// Call repeatedly after `send_packet` to get all decoded frames.
    ///
    /// # Errors
    ///
    /// Returns an error if decoding fails.
    fn receive_frame(&mut self) -> OxiResult<Option<AudioFrame>>;

    /// Flushes the decoder, signaling end of stream.
    ///
    /// After calling flush, continue calling `receive_frame` to get
    /// any remaining buffered frames.
    ///
    /// # Errors
    ///
    /// Returns an error if flushing fails.
    fn flush(&mut self) -> OxiResult<()>;

    /// Resets the decoder state.
    ///
    /// Call this when seeking to a new position in the stream.
    ///
    /// # Errors
    ///
    /// Returns an error if reset fails.
    fn reset(&mut self) -> OxiResult<()>;
}

/// Subtitle cue/event produced by a subtitle decoder.
///
/// Represents a single subtitle entry with timing and text/markup.
#[derive(Debug, Clone)]
pub struct SubtitleFrame {
    /// Start timestamp for this subtitle.
    pub start: Timestamp,
    /// End timestamp for this subtitle (duration).
    pub end: Timestamp,
    /// Text content of the subtitle.
    /// For text-based formats (SRT, `WebVTT`), contains the formatted text.
    /// For markup formats (ASS/SSA), contains the markup string.
    pub text: String,
    /// Optional subtitle layer (for overlapping subtitles).
    pub layer: u32,
    /// Optional subtitle position/alignment settings.
    pub settings: Option<SubtitleSettings>,
}

impl SubtitleFrame {
    /// Creates a new subtitle frame with the given parameters.
    #[must_use]
    pub fn new(start: Timestamp, end: Timestamp, text: impl Into<String>) -> Self {
        Self {
            start,
            end,
            text: text.into(),
            layer: 0,
            settings: None,
        }
    }

    /// Returns the duration of this subtitle in seconds.
    #[must_use]
    #[allow(clippy::cast_precision_loss)]
    pub fn duration_seconds(&self) -> f64 {
        let start_sec = self.start.to_seconds();
        let end_sec = self.end.to_seconds();
        end_sec - start_sec
    }

    /// Sets the layer for this subtitle.
    #[must_use]
    pub const fn with_layer(mut self, layer: u32) -> Self {
        self.layer = layer;
        self
    }

    /// Sets the settings for this subtitle.
    #[must_use]
    pub fn with_settings(mut self, settings: SubtitleSettings) -> Self {
        self.settings = Some(settings);
        self
    }
}

/// Subtitle positioning and styling settings.
///
/// Provides optional positioning, alignment, and styling information
/// for subtitle rendering.
#[derive(Debug, Clone, Default)]
pub struct SubtitleSettings {
    /// Horizontal alignment (left, center, right).
    pub align_h: Option<HorizontalAlign>,
    /// Vertical alignment (top, middle, bottom).
    pub align_v: Option<VerticalAlign>,
    /// Position on screen (0.0 - 1.0).
    pub position: Option<(f32, f32)>,
    /// Size of subtitle region (width, height in 0.0 - 1.0).
    pub size: Option<(f32, f32)>,
}

/// Horizontal text alignment.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum HorizontalAlign {
    /// Left-aligned.
    Left,
    /// Center-aligned.
    Center,
    /// Right-aligned.
    Right,
}

/// Vertical text alignment.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum VerticalAlign {
    /// Top-aligned.
    Top,
    /// Middle-aligned.
    Middle,
    /// Bottom-aligned.
    Bottom,
}

/// Trait for subtitle decoder implementations.
///
/// Implementors decode subtitle data into subtitle frames/cues.
///
/// # Examples
///
/// ```ignore
/// use oximedia_core::traits::{SubtitleDecoder, SubtitleFrame};
///
/// fn decode_subtitle(decoder: &mut impl SubtitleDecoder, data: &[u8]) -> Vec<SubtitleFrame> {
///     decoder.send_packet(data)?;
///     let mut frames = Vec::new();
///     while let Some(frame) = decoder.receive_frame()? {
///         frames.push(frame);
///     }
///     frames
/// }
/// ```
pub trait SubtitleDecoder {
    /// Returns the codec ID this decoder handles.
    fn codec_id(&self) -> CodecId;

    /// Sends a subtitle packet to the decoder.
    ///
    /// # Arguments
    ///
    /// * `data` - Subtitle data (text or binary)
    ///
    /// # Errors
    ///
    /// Returns an error if the packet is invalid or the decoder is in an error state.
    fn send_packet(&mut self, data: &[u8]) -> OxiResult<()>;

    /// Receives a decoded subtitle frame from the decoder.
    ///
    /// Returns `None` if no frame is currently available.
    /// Call repeatedly after `send_packet` to get all decoded frames.
    ///
    /// # Errors
    ///
    /// Returns an error if decoding fails.
    fn receive_frame(&mut self) -> OxiResult<Option<SubtitleFrame>>;

    /// Flushes the decoder, signaling end of stream.
    ///
    /// After calling flush, continue calling `receive_frame` to get
    /// any remaining buffered frames.
    ///
    /// # Errors
    ///
    /// Returns an error if flushing fails.
    fn flush(&mut self) -> OxiResult<()>;

    /// Resets the decoder state.
    ///
    /// Call this when seeking to a new position in the stream.
    ///
    /// # Errors
    ///
    /// Returns an error if reset fails.
    fn reset(&mut self) -> OxiResult<()>;
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::types::Rational;

    #[test]
    fn test_video_frame_new() {
        let timestamp = Timestamp::new(0, Rational::new(1, 1000));
        let frame = VideoFrame::new(
            PixelFormat::Yuv420p,
            1920,
            1080,
            timestamp,
            vec![
                vec![0u8; 1920 * 1080],
                vec![0u8; 960 * 540],
                vec![0u8; 960 * 540],
            ],
            vec![1920, 960, 960],
            true,
        );

        assert_eq!(frame.format, PixelFormat::Yuv420p);
        assert_eq!(frame.width, 1920);
        assert_eq!(frame.height, 1080);
        assert!(frame.is_keyframe);
    }

    #[test]
    fn test_audio_frame_new() {
        let timestamp = Timestamp::new(0, Rational::new(1, 48000));
        let frame = AudioFrame::new(
            SampleFormat::F32,
            48000,
            2,
            1024,
            timestamp,
            vec![vec![0u8; 1024 * 2 * 4]],
        );

        assert_eq!(frame.format, SampleFormat::F32);
        assert_eq!(frame.sample_rate, 48000);
        assert_eq!(frame.channels, 2);
        assert_eq!(frame.samples, 1024);
    }

    #[test]
    fn test_audio_frame_duration() {
        let timestamp = Timestamp::new(0, Rational::new(1, 48000));
        let frame = AudioFrame::new(
            SampleFormat::F32,
            48000,
            2,
            48000, // 1 second of samples
            timestamp,
            vec![vec![0u8; 48000 * 2 * 4]],
        );

        assert!((frame.duration_seconds() - 1.0).abs() < f64::EPSILON);
    }
}