adder_codec_rs/transcoder/source/
framed.rs

1use crate::transcoder::source::video::SourceError;
2use crate::transcoder::source::video::Video;
3use crate::transcoder::source::video::{Source, VideoBuilder};
4use adder_codec_core::Mode::FramePerfect;
5use adder_codec_core::{DeltaT, Event, PixelMultiMode, PlaneSize, SourceCamera, TimeMode};
6
7use crate::utils::viz::ShowFeatureMode;
8use adder_codec_core::codec::{EncoderOptions, EncoderType};
9
10use crate::utils::cv::handle_color;
11#[cfg(feature = "feature-logging")]
12use crate::utils::cv::{calculate_quality_metrics, QualityMetrics};
13
14use rayon::ThreadPool;
15use std::io::Write;
16use std::path::PathBuf;
17
18#[cfg(feature = "feature-logging")]
19use chrono::Local;
20use tokio::runtime::Runtime;
21use video_rs_adder_dep::{self, Decoder, Frame, Locator, Options, Resize};
22
23/// Attributes of a framed video -> ADΔER transcode
24pub struct Framed<W: Write + 'static + std::marker::Send + std::marker::Sync> {
25    cap: Decoder,
26    pub(crate) input_frame: Frame,
27
28    /// Index of the first frame to be read from the input video
29    pub frame_idx_start: u32,
30
31    /// FPS of the input video. Set automatically by `Framed::new()`
32    pub source_fps: f32,
33
34    /// Scale of the input video. Input frames are resized to this scale before transcoding.
35    pub scale: f64,
36
37    /// Whether the input video is color
38    color_input: bool,
39
40    pub(crate) video: Video<W>,
41}
42unsafe impl<W: Write + std::marker::Send + std::marker::Sync> Sync for Framed<W> {}
43
44impl<W: Write + 'static + std::marker::Send + std::marker::Sync> Framed<W> {
45    /// Create a new `Framed` source
46    pub fn new(
47        input_path: PathBuf,
48        color_input: bool,
49        scale: f64,
50    ) -> Result<Framed<W>, SourceError> {
51        let source = Locator::Path(input_path);
52        let mut cap = Decoder::new(&source)?;
53        let (width, height) = cap.size();
54        let width = ((width as f64) * scale) as u32;
55        let height = ((height as f64) * scale) as u32;
56
57        cap = Decoder::new_with_options_and_resize(
58            &source,
59            &Options::default(),
60            Resize::Fit(width, height),
61        )?;
62
63        // Calculate TPS based on ticks per frame and source FPS
64        let source_fps = cap.frame_rate();
65        let (width, height) = cap.size_out();
66
67        let plane = PlaneSize::new(width as u16, height as u16, if color_input { 3 } else { 1 })?;
68
69        let video = Video::new(plane, FramePerfect, None)?;
70
71        Ok(Framed {
72            cap,
73            input_frame: Frame::default((height as usize, width as usize, 3)), // Note that this will be limited to 8-bit precision (due to video-rs crate)
74            frame_idx_start: 0,
75            source_fps,
76            scale,
77            color_input,
78            video,
79        })
80    }
81
82    /// Set the start frame of the source
83    pub fn frame_start(mut self, frame_idx_start: u32) -> Result<Self, SourceError> {
84        let video_frame_count = self.cap.frame_count();
85        if frame_idx_start >= video_frame_count as u32 {
86            return Err(SourceError::StartOutOfBounds(frame_idx_start));
87        };
88        let ts_millis = (frame_idx_start as f32 / self.source_fps * 1000.0) as i64;
89        self.cap.reader.seek(ts_millis)?;
90
91        self.frame_idx_start = frame_idx_start;
92        Ok(self)
93    }
94
95    /// Automatically derive the ticks per second from the source FPS and `ref_time`
96    pub fn auto_time_parameters(
97        mut self,
98        ref_time: DeltaT,
99        delta_t_max: DeltaT,
100        time_mode: Option<TimeMode>,
101    ) -> Result<Self, SourceError> {
102        if delta_t_max % ref_time == 0 {
103            let tps = (ref_time as f32 * self.source_fps) as DeltaT;
104            self.video = self
105                .video
106                .time_parameters(tps, ref_time, delta_t_max, time_mode)?;
107        } else {
108            return Err(SourceError::BadParams(
109                "delta_t_max must be a multiple of ref_time".to_string(),
110            ));
111        }
112        Ok(self)
113    }
114
115    /// Get the number of ticks each frame is said to span
116    pub fn get_ref_time(&self) -> u32 {
117        self.video.state.params.ref_time
118    }
119
120    /// Get the previous input frame
121    pub fn get_last_input_frame(&self) -> &Frame {
122        &self.input_frame
123    }
124}
125
126impl<W: Write + 'static + std::marker::Send + std::marker::Sync> Source<W> for Framed<W> {
127    /// Get pixel-wise intensities directly from source frame, and integrate them with
128    /// `ref_time` (the number of ticks each frame is said to span)
129    fn consume(&mut self) -> Result<Vec<Vec<Event>>, SourceError> {
130        let (_, frame) = self.cap.decode()?;
131        self.input_frame = handle_color(frame, self.color_input)?;
132
133        let res = self.video.integrate_matrix(
134            self.input_frame.clone(),
135            self.video.state.params.ref_time as f32,
136        );
137        #[cfg(feature = "feature-logging")]
138        {
139            if let Some(handle) = &mut self.video.state.feature_log_handle {
140                // Calculate the quality metrics
141                let mut image_mat = self.video.state.running_intensities.clone();
142
143                #[rustfmt::skip]
144                    let metrics = calculate_quality_metrics(
145                    &self.input_frame,
146                    &image_mat,
147                    QualityMetrics {
148                        mse: Some(0.0),
149                        psnr: Some(0.0),
150                        ssim: None,
151                    });
152
153                let metrics = metrics.unwrap();
154                let bytes = serde_pickle::to_vec(&metrics, Default::default()).unwrap();
155                handle.write_all(&bytes).unwrap();
156            }
157        }
158        res
159    }
160
161    fn crf(&mut self, crf: u8) {
162        self.video.update_crf(crf);
163    }
164
165    fn get_video_mut(&mut self) -> &mut Video<W> {
166        &mut self.video
167    }
168
169    fn get_video_ref(&self) -> &Video<W> {
170        &self.video
171    }
172
173    fn get_video(self) -> Video<W> {
174        todo!()
175    }
176
177    fn get_input(&self) -> Option<&Frame> {
178        Some(self.get_last_input_frame())
179    }
180
181    fn get_running_input_bitrate(&self) -> f64 {
182        let video = self.get_video_ref();
183        video.get_tps() as f64 / video.get_ref_time() as f64
184            * video.state.plane.volume() as f64
185            * 8.0
186    }
187}
188
189impl<W: Write + 'static + std::marker::Send + std::marker::Sync> VideoBuilder<W> for Framed<W> {
190    fn crf(mut self, crf: u8) -> Self {
191        self.video.update_crf(crf);
192        self
193    }
194
195    fn quality_manual(
196        mut self,
197        c_thresh_baseline: u8,
198        c_thresh_max: u8,
199        delta_t_max_multiplier: u32,
200        c_increase_velocity: u8,
201        feature_c_radius_denom: f32,
202    ) -> Self {
203        self.video.update_quality_manual(
204            c_thresh_baseline,
205            c_thresh_max,
206            delta_t_max_multiplier,
207            c_increase_velocity,
208            feature_c_radius_denom,
209        );
210        self
211    }
212
213    fn chunk_rows(mut self, chunk_rows: usize) -> Self {
214        self.video = self.video.chunk_rows(chunk_rows);
215        self
216    }
217
218    fn time_parameters(
219        mut self,
220        tps: DeltaT,
221        ref_time: DeltaT,
222        delta_t_max: DeltaT,
223        time_mode: Option<TimeMode>,
224    ) -> Result<Self, SourceError> {
225        if delta_t_max % ref_time == 0 {
226            self.video = self
227                .video
228                .time_parameters(tps, ref_time, delta_t_max, time_mode)?;
229        } else {
230            eprintln!("delta_t_max must be a multiple of ref_time");
231        }
232        Ok(self)
233    }
234
235    fn write_out(
236        mut self,
237        source_camera: SourceCamera,
238        time_mode: TimeMode,
239        pixel_multi_mode: PixelMultiMode,
240        adu_interval: Option<usize>,
241        encoder_type: EncoderType,
242        encoder_options: EncoderOptions,
243        write: W,
244    ) -> Result<Box<Self>, SourceError> {
245        self.video = self.video.write_out(
246            Some(source_camera),
247            Some(time_mode),
248            Some(pixel_multi_mode),
249            adu_interval,
250            encoder_type,
251            encoder_options,
252            write,
253        )?;
254        Ok(Box::new(self))
255    }
256
257    fn detect_features(mut self, detect_features: bool, show_features: ShowFeatureMode) -> Self {
258        self.video = self.video.detect_features(detect_features, show_features);
259        self
260    }
261
262    #[cfg(feature = "feature-logging")]
263    fn log_path(mut self, name: String) -> Self {
264        let date_time = Local::now();
265        let formatted = format!("{}_{}.log", name, date_time.format("%d_%m_%Y_%H_%M_%S"));
266        let log_handle = std::fs::File::create(formatted).ok();
267        self.video.state.feature_log_handle = log_handle;
268
269        // Write the plane size to the log file
270        if let Some(handle) = &mut self.video.state.feature_log_handle {
271            writeln!(
272                handle,
273                "{}x{}x{}",
274                self.video.state.plane.w(),
275                self.video.state.plane.h(),
276                self.video.state.plane.c()
277            )
278            .unwrap();
279        }
280        self
281    }
282}