adder_codec_rs/transcoder/source/
video.rs

1#[cfg(feature = "open-cv")]
2use opencv::core::{Mat, Size};
3#[cfg(feature = "opencv")]
4use opencv::prelude::*;
5use std::cmp::min;
6use std::collections::HashSet;
7#[cfg(feature = "feature-logging")]
8use std::ffi::c_void;
9use std::io::{sink, Write};
10use std::mem::swap;
11
12use adder_codec_core::codec::empty::stream::EmptyOutput;
13use adder_codec_core::codec::encoder::Encoder;
14use adder_codec_core::codec::raw::stream::RawOutput;
15use adder_codec_core::codec::{
16    CodecError, CodecMetadata, EncoderOptions, EncoderType, LATEST_CODEC_VERSION,
17};
18use adder_codec_core::{
19    Coord, DeltaT, Event, Mode, PixelAddress, PixelMultiMode, PlaneError, PlaneSize, SourceCamera,
20    SourceType, TimeMode, D_EMPTY, D_ZERO_INTEGRATION,
21};
22use bumpalo::Bump;
23
24use std::sync::mpsc::{channel, Sender};
25use std::time::Instant;
26
27use crate::framer::scale_intensity::{FrameValue, SaeTime};
28use crate::transcoder::event_pixel_tree::{Intensity32, PixelArena};
29use adder_codec_core::D;
30#[cfg(feature = "opencv")]
31use davis_edi_rs::util::reconstructor::ReconstructionError;
32#[cfg(feature = "opencv")]
33use opencv::{highgui, imgproc::resize};
34
35#[cfg(feature = "compression")]
36use adder_codec_core::codec::compressed::stream::CompressedOutput;
37use adder_codec_core::Mode::Continuous;
38use itertools::Itertools;
39use ndarray::{Array, Array3, Axis, ShapeError};
40use rayon::iter::IndexedParallelIterator;
41use rayon::iter::IntoParallelIterator;
42use rayon::iter::ParallelIterator;
43use rayon::ThreadPool;
44
45use crate::transcoder::source::video::FramedViewMode::SAE;
46use crate::utils::cv::is_feature;
47
48use crate::utils::viz::{draw_feature_coord, draw_rect, ShowFeatureMode};
49use adder_codec_core::codec::rate_controller::{Crf, CrfParameters};
50use kiddo::{KdTree, SquaredEuclidean};
51use thiserror::Error;
52use tokio::task::JoinError;
53use video_rs_adder_dep::Frame;
54
55/// Various errors that can occur during an ADΔER transcode
56#[derive(Error, Debug)]
57pub enum SourceError {
58    /// Could not open source file
59    #[error("Could not open source file")]
60    Open,
61
62    /// Incorrect parameters for the given source
63    #[error("ADDER parameters are invalid for the given source: `{0}`")]
64    BadParams(String),
65
66    /// When a [Framed](crate::transcoder::source::framed::Framed) source is used, but the start frame is out of bounds"
67    #[error("start frame `{0}` is out of bounds")]
68    StartOutOfBounds(u32),
69
70    /// No more data to consume from the video source
71    #[error("Source buffer is empty")]
72    BufferEmpty,
73
74    /// Source buffer channel is closed
75    #[error("Source buffer channel is closed")]
76    BufferChannelClosed,
77
78    /// No data from next spot in buffer
79    #[error("No data from next spot in buffer")]
80    NoData,
81
82    /// Data not initialized
83    #[error("Data not initialized")]
84    UninitializedData,
85
86    #[cfg(feature = "open-cv")]
87    /// OpenCV error
88    #[error("OpenCV error")]
89    OpencvError(opencv::Error),
90
91    /// video-rs error
92    #[error("video-rs error")]
93    VideoError(video_rs_adder_dep::Error),
94
95    /// Codec error
96    #[error("Codec core error")]
97    CodecError(CodecError),
98
99    #[cfg(feature = "open-cv")]
100    /// EDI error
101    #[error("EDI error")]
102    EdiError(ReconstructionError),
103
104    /// Shape error
105    #[error("Shape error")]
106    ShapeError(#[from] ShapeError),
107
108    /// Plane error
109    #[error("Plane error")]
110    PlaneError(#[from] PlaneError),
111
112    /// Handle join error
113    #[error("Handle join error")]
114    JoinError(#[from] JoinError),
115
116    /// Vision application error
117    #[error("Vision application error")]
118    VisionError(String),
119
120    /// I/O error
121    #[error("I/O error")]
122    IoError(#[from] std::io::Error),
123}
124
125#[cfg(feature = "open-cv")]
126impl From<opencv::Error> for SourceError {
127    fn from(value: opencv::Error) -> Self {
128        SourceError::OpencvError(value)
129    }
130}
131impl From<adder_codec_core::codec::CodecError> for SourceError {
132    fn from(value: CodecError) -> Self {
133        SourceError::CodecError(value)
134    }
135}
136
137impl From<video_rs_adder_dep::Error> for SourceError {
138    fn from(value: video_rs_adder_dep::Error) -> Self {
139        SourceError::VideoError(value)
140    }
141}
142
143/// The display mode
144#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
145pub enum FramedViewMode {
146    /// Visualize the intensity (2^[`D`] / [`DeltaT`]) of each pixel's most recent event
147    #[default]
148    Intensity,
149
150    /// Visualize the [`D`] component of each pixel's most recent event
151    D,
152
153    /// Visualize the temporal component ([`DeltaT`]) of each pixel's most recent event
154    DeltaT,
155
156    /// Surface of Active Events. Visualize the time elapsed since each pixel last fired an event
157    /// (most recent events will have greater values)
158    SAE,
159}
160
161#[derive(Debug)]
162pub struct VideoStateParams {
163    pub(crate) pixel_tree_mode: Mode,
164
165    pub pixel_multi_mode: PixelMultiMode,
166
167    /// The maximum time difference between events of the same pixel, in ticks
168    pub delta_t_max: u32,
169
170    /// The reference time in ticks
171    pub ref_time: u32,
172}
173
174impl Default for VideoStateParams {
175    fn default() -> Self {
176        Self {
177            pixel_tree_mode: Continuous,
178            pixel_multi_mode: Default::default(),
179            delta_t_max: 7650,
180            ref_time: 255,
181        }
182    }
183}
184
185/// Running state of the video transcode
186#[derive(Debug)]
187pub struct VideoState {
188    pub params: VideoStateParams,
189
190    /// The size of the imaging plane
191    pub plane: PlaneSize,
192
193    /// The number of rows of pixels to process at a time (per thread)
194    pub chunk_rows: usize,
195
196    /// The number of input intervals (of fixed time) processed so far
197    pub in_interval_count: u32,
198
199    /// The number of ticks per second
200    pub tps: DeltaT,
201
202    /// Whether or not to detect features
203    pub feature_detection: bool,
204
205    /// The current instantaneous frame, for determining features
206    pub running_intensities: Array3<u8>,
207
208    /// Whether or not to draw the features on the display mat, and the mode to do it in
209    show_features: ShowFeatureMode,
210
211    features: Vec<HashSet<Coord>>,
212
213    pub feature_log_handle: Option<std::fs::File>,
214    feature_rate_adjustment: bool,
215    feature_cluster: bool,
216}
217
218impl Default for VideoState {
219    fn default() -> Self {
220        VideoState {
221            plane: PlaneSize::default(),
222            params: VideoStateParams::default(),
223            chunk_rows: 1,
224            in_interval_count: 1,
225            tps: 7650,
226            feature_detection: false,
227            running_intensities: Default::default(),
228            show_features: ShowFeatureMode::Off,
229            features: Default::default(),
230            feature_log_handle: None,
231            feature_rate_adjustment: false,
232            feature_cluster: false,
233        }
234    }
235}
236
237// impl VideoState {
238//     fn update_crf(&mut self, crf: u8) {
239//         self.crf_quality = crf;
240//         self.c_thresh_baseline = CRF[crf as usize][0] as u8;
241//         self.c_thresh_max = CRF[crf as usize][1] as u8;
242//
243//         self.c_increase_velocity = CRF[crf as usize][2] as u8;
244//         self.feature_c_radius = (CRF[crf as usize][3] * self.plane.min_resolution() as f32) as u16;
245//     }
246//
247//     fn update_quality_manual(
248//         &mut self,
249//         c_thresh_baseline: u8,
250//         c_thresh_max: u8,
251//         delta_t_max_multiplier: u32,
252//         c_increase_velocity: u8,
253//         feature_c_radius: f32,
254//     ) {
255//         self.c_thresh_baseline = c_thresh_baseline;
256//         self.c_thresh_max = c_thresh_max;
257//         self.delta_t_max = delta_t_max_multiplier * self.ref_time;
258//         self.c_increase_velocity = c_increase_velocity;
259//         self.feature_c_radius = feature_c_radius as u16; // The absolute pixel count radius
260//     }
261// }
262
263/// A builder for a [`Video`]
264pub trait VideoBuilder<W> {
265    /// Set the Constant Rate Factor (CRF) quality setting for the encoder. 0 is lossless, 9 is worst quality.
266    fn crf(self, crf: u8) -> Self;
267
268    /// Manually set the parameters dictating quality
269    fn quality_manual(
270        self,
271        c_thresh_baseline: u8,
272        c_thresh_max: u8,
273        delta_t_max_multiplier: u32,
274        c_increase_velocity: u8,
275        feature_c_radius_denom: f32,
276    ) -> Self;
277
278    /// Set the chunk rows
279    fn chunk_rows(self, chunk_rows: usize) -> Self;
280
281    /// Set the time parameters
282    fn time_parameters(
283        self,
284        tps: DeltaT,
285        ref_time: DeltaT,
286        delta_t_max: DeltaT,
287        time_mode: Option<TimeMode>,
288    ) -> Result<Self, SourceError>
289    where
290        Self: std::marker::Sized;
291
292    /// Set the [`Encoder`]
293    fn write_out(
294        self,
295        source_camera: SourceCamera,
296        time_mode: TimeMode,
297        pixel_multi_mode: PixelMultiMode,
298        adu_interval: Option<usize>,
299        encoder_type: EncoderType,
300        encoder_options: EncoderOptions,
301        write: W,
302    ) -> Result<Box<Self>, SourceError>;
303
304    /// Set whether or not to detect features, and whether or not to display the features
305    fn detect_features(self, detect_features: bool, show_features: ShowFeatureMode) -> Self;
306
307    #[cfg(feature = "feature-logging")]
308    fn log_path(self, name: String) -> Self;
309}
310
311// impl VideoBuilder for Video {}
312
313/// Attributes common to ADΔER transcode process
314pub struct Video<W: Write + std::marker::Send + std::marker::Sync + 'static> {
315    /// The current state of the video transcode
316    pub state: VideoState,
317    pub(crate) event_pixel_trees: Array3<PixelArena>,
318
319    /// The current instantaneous display frame with the features drawn on it
320    pub display_frame_features: Frame,
321
322    /// The current view mode of the instantaneous frame
323    pub instantaneous_view_mode: FramedViewMode,
324
325    /// Channel for sending events to the encoder
326    pub event_sender: Sender<Vec<Event>>,
327
328    /// The object that takes in ADDER events, potentially transforms them in some way,
329    /// and writes them somewhere
330    pub encoder: Encoder<W>,
331
332    /// The type of encoder being used (e.g., compressed or raw)
333    pub encoder_type: EncoderType,
334    // TODO: Hold multiple encoder options and an enum, so that boxing isn't required.
335    // Also hold a state for whether or not to write out events at all, so that a null writer isn't required.
336    // Eric: this is somewhat addressed above
337}
338unsafe impl<W: Write + std::marker::Send + std::marker::Sync + 'static> Send for Video<W> {}
339
340impl<W: Write + 'static + std::marker::Send + std::marker::Sync + 'static> Video<W> {
341    /// Initialize the Video with default parameters.
342    pub(crate) fn new(
343        plane: PlaneSize,
344        pixel_tree_mode: Mode,
345        writer: Option<W>,
346    ) -> Result<Video<W>, SourceError> {
347        let mut state = VideoState {
348            params: VideoStateParams {
349                pixel_tree_mode,
350                ..Default::default()
351            },
352            running_intensities: Array::zeros((plane.h_usize(), plane.w_usize(), plane.c_usize())),
353            ..Default::default()
354        };
355
356        let mut data = Vec::new();
357        for y in 0..plane.h() {
358            for x in 0..plane.w() {
359                for c in 0..plane.c() {
360                    let px = PixelArena::new(
361                        1.0,
362                        Coord {
363                            x,
364                            y,
365                            c: match &plane.c() {
366                                1 => None,
367                                _ => Some(c),
368                            },
369                        },
370                    );
371                    data.push(px);
372                }
373            }
374        }
375
376        let event_pixel_trees: Array3<PixelArena> =
377            Array3::from_shape_vec((plane.h_usize(), plane.w_usize(), plane.c_usize()), data)?;
378        let instantaneous_frame =
379            Array3::zeros((plane.h_usize(), plane.w_usize(), plane.c_usize()));
380
381        state.plane = plane;
382        let instantaneous_view_mode = FramedViewMode::Intensity;
383        let (event_sender, _) = channel();
384        let meta = CodecMetadata {
385            codec_version: LATEST_CODEC_VERSION,
386            header_size: 0,
387            time_mode: TimeMode::AbsoluteT,
388            plane: state.plane,
389            tps: state.tps,
390            ref_interval: state.params.ref_time,
391            delta_t_max: state.params.delta_t_max,
392            event_size: 0,
393            source_camera: SourceCamera::default(), // TODO: Allow for setting this
394            adu_interval: Default::default(),
395        };
396
397        match writer {
398            None => {
399                let encoder: Encoder<W> = Encoder::new_empty(
400                    EmptyOutput::new(meta, sink()),
401                    EncoderOptions::default(state.plane),
402                );
403                Ok(Video {
404                    state,
405                    event_pixel_trees,
406                    display_frame_features: instantaneous_frame,
407                    instantaneous_view_mode,
408                    event_sender,
409                    encoder,
410                    encoder_type: EncoderType::Empty,
411                })
412            }
413            Some(w) => {
414                let encoder = Encoder::new_raw(
415                    // TODO: Allow for compressed representation (not just raw)
416                    RawOutput::new(meta, w),
417                    EncoderOptions::default(state.plane),
418                );
419                Ok(Video {
420                    state,
421                    event_pixel_trees,
422                    display_frame_features: instantaneous_frame,
423                    instantaneous_view_mode,
424                    event_sender,
425                    encoder,
426                    encoder_type: EncoderType::Empty,
427                })
428            }
429        }
430    }
431
432    /// Set the positive contrast threshold
433    #[deprecated(
434        since = "0.3.4",
435        note = "please use `update_crf` or `update_quality_manual` instead"
436    )]
437    pub fn c_thresh_pos(mut self, c_thresh_pos: u8) -> Self {
438        for px in self.event_pixel_trees.iter_mut() {
439            px.c_thresh = c_thresh_pos;
440        }
441        dbg!("t");
442        self.encoder
443            .options
444            .crf
445            .override_c_thresh_baseline(c_thresh_pos);
446        self
447    }
448
449    /// Set the negative contrast threshold
450    #[deprecated(
451        since = "0.3.4",
452        note = "please use `update_crf` or `update_quality_manual` instead"
453    )]
454    pub fn c_thresh_neg(self, _c_thresh_neg: u8) -> Self {
455        unimplemented!();
456        // for px in self.event_pixel_trees.iter_mut() {
457        //     px.c_thresh = c_thresh_neg;
458        // }
459        // self
460    }
461
462    /// Set the number of rows to process at a time (in each thread)
463    pub fn chunk_rows(mut self, chunk_rows: usize) -> Self {
464        self.state.chunk_rows = chunk_rows;
465        let mut num_chunks = self.state.plane.h_usize() / chunk_rows;
466        if self.state.plane.h_usize() % chunk_rows != 0 {
467            num_chunks += 1;
468        }
469        self.state.features = vec![HashSet::new(); num_chunks];
470        self
471    }
472
473    /// Set the time parameters for the video.
474    ///
475    /// These parameters, in conjunction, determine the temporal resolution and maximum transcode
476    /// accuracy/quality.
477    ///
478    /// # Arguments
479    ///
480    /// * `tps`: ticks per second
481    /// * `ref_time`: reference time in ticks.
482    /// * `delta_t_max`: maximum time difference between events of the same pixel, in ticks
483    ///
484    /// returns: `Result<Video<W>, Box<dyn Error, Global>>`
485    pub fn time_parameters(
486        mut self,
487        tps: DeltaT,
488        ref_time: DeltaT,
489        delta_t_max: DeltaT,
490        time_mode: Option<TimeMode>,
491    ) -> Result<Self, SourceError> {
492        self.event_pixel_trees.par_map_inplace(|px| {
493            px.time_mode(time_mode);
494        });
495
496        if ref_time > f32::MAX as u32 {
497            eprintln!(
498                "Reference time {} is too large. Keeping current value of {}.",
499                ref_time, self.state.params.ref_time
500            );
501            return Ok(self);
502        }
503        if tps > f32::MAX as u32 {
504            eprintln!(
505                "Time per sample {} is too large. Keeping current value of {}.",
506                tps, self.state.tps
507            );
508            return Ok(self);
509        }
510        if delta_t_max > f32::MAX as u32 {
511            eprintln!(
512                "Delta t max {} is too large. Keeping current value of {}.",
513                delta_t_max, self.state.params.delta_t_max
514            );
515            return Ok(self);
516        }
517        if delta_t_max < ref_time {
518            eprintln!(
519                "Delta t max {} is smaller than reference time {}. Keeping current value of {}.",
520                delta_t_max, ref_time, self.state.params.delta_t_max
521            );
522            return Ok(self);
523        }
524        self.state.params.delta_t_max = delta_t_max;
525        self.state.params.ref_time = ref_time;
526        self.state.tps = tps;
527
528        Ok(self)
529    }
530
531    /// Write out the video to a file.
532    ///
533    /// # Arguments
534    ///
535    /// * `source_camera`: the type of video source
536    /// * `time_mode`: the time mode of the video
537    /// * `write`: the output stream to write to
538    pub fn write_out(
539        mut self,
540        source_camera: Option<SourceCamera>,
541        time_mode: Option<TimeMode>,
542        pixel_multi_mode: Option<PixelMultiMode>,
543        adu_interval: Option<usize>,
544        encoder_type: EncoderType,
545        encoder_options: EncoderOptions,
546        write: W,
547    ) -> Result<Self, SourceError> {
548        let encoder: Encoder<_> = match encoder_type {
549            EncoderType::Compressed => {
550                #[cfg(feature = "compression")]
551                {
552                    self.state.params.pixel_multi_mode =
553                        pixel_multi_mode.unwrap_or(PixelMultiMode::Collapse);
554                    let compression = CompressedOutput::new(
555                        CodecMetadata {
556                            codec_version: LATEST_CODEC_VERSION,
557                            header_size: 0,
558                            time_mode: time_mode.unwrap_or_default(),
559                            plane: self.state.plane,
560                            tps: self.state.tps,
561                            ref_interval: self.state.params.ref_time,
562                            delta_t_max: self.state.params.delta_t_max,
563                            event_size: 0,
564                            source_camera: source_camera.unwrap_or_default(),
565                            adu_interval: adu_interval.unwrap_or_default(),
566                        },
567                        write,
568                    );
569                    Encoder::new_compressed(compression, encoder_options)
570                }
571                #[cfg(not(feature = "compression"))]
572                {
573                    return Err(SourceError::BadParams(
574                        "Compressed representation is experimental and is not enabled by default!"
575                            .to_string(),
576                    ));
577                }
578            }
579            EncoderType::Raw => {
580                self.state.params.pixel_multi_mode =
581                    pixel_multi_mode.unwrap_or(PixelMultiMode::Collapse);
582                let compression = RawOutput::new(
583                    CodecMetadata {
584                        codec_version: LATEST_CODEC_VERSION,
585                        header_size: 0,
586                        time_mode: time_mode.unwrap_or_default(),
587                        plane: self.state.plane,
588                        tps: self.state.tps,
589                        ref_interval: self.state.params.ref_time,
590                        delta_t_max: self.state.params.delta_t_max,
591                        event_size: 0,
592                        source_camera: source_camera.unwrap_or_default(),
593                        adu_interval: Default::default(),
594                    },
595                    write,
596                );
597                Encoder::new_raw(compression, encoder_options)
598            }
599            EncoderType::Empty => {
600                self.state.params.pixel_multi_mode =
601                    pixel_multi_mode.unwrap_or(PixelMultiMode::Collapse);
602                let compression = EmptyOutput::new(
603                    CodecMetadata {
604                        codec_version: LATEST_CODEC_VERSION,
605                        header_size: 0,
606                        time_mode: time_mode.unwrap_or_default(),
607                        plane: self.state.plane,
608                        tps: self.state.tps,
609                        ref_interval: self.state.params.ref_time,
610                        delta_t_max: self.state.params.delta_t_max,
611                        event_size: 0,
612                        source_camera: source_camera.unwrap_or_default(),
613                        adu_interval: Default::default(),
614                    },
615                    sink(),
616                );
617                Encoder::new_empty(compression, encoder_options)
618            }
619        };
620
621        self.encoder = encoder;
622        self.encoder_type = encoder_type;
623
624        self.event_pixel_trees.par_map_inplace(|px| {
625            px.time_mode(time_mode);
626        });
627        Ok(self)
628    }
629
630    /// Close and flush the stream writer.
631    /// # Errors
632    /// Returns an error if the stream writer cannot be closed cleanly.
633    pub fn end_write_stream(&mut self) -> Result<Option<W>, SourceError> {
634        let mut tmp: Encoder<W> = Encoder::new_empty(
635            EmptyOutput::new(CodecMetadata::default(), sink()),
636            self.encoder.options,
637        );
638        swap(&mut self.encoder, &mut tmp);
639        Ok(tmp.close_writer()?)
640    }
641
642    #[allow(clippy::needless_pass_by_value)]
643    pub(crate) fn integrate_matrix(
644        &mut self,
645        matrix: Frame,
646        time_spanned: f32,
647    ) -> Result<Vec<Vec<Event>>, SourceError> {
648        if self.state.in_interval_count == 0 {
649            self.set_initial_d(&matrix);
650        }
651
652        let parameters = *self.encoder.options.crf.get_parameters();
653
654        self.state.in_interval_count += 1;
655
656        // let matrix_f32 = convert_u8_to_f32_simd(&matrix.into_raw_vec());
657        let matrix = matrix.mapv(f32::from);
658
659        // TODO: When there's full support for various bit-depth sources, modify this accordingly
660        let practical_d_max = fast_math::log2_raw(
661            255.0 * (self.state.params.delta_t_max / self.state.params.ref_time) as f32,
662        );
663
664        let tpf = self.state.params.ref_time as f64;
665
666        let params = &self.state.params;
667        // Important: if framing the events simultaneously, then the chunk division must be
668        // exactly the same as it is for the framer
669        let big_buffer: Vec<Vec<Event>> = self
670            .event_pixel_trees
671            .axis_chunks_iter_mut(Axis(0), self.state.chunk_rows)
672            .into_par_iter()
673            .zip(
674                matrix
675                    .axis_chunks_iter(Axis(0), self.state.chunk_rows)
676                    .into_par_iter(),
677            )
678            .zip(
679                self.state
680                    .running_intensities
681                    .axis_chunks_iter_mut(Axis(0), self.state.chunk_rows)
682                    .into_par_iter(),
683            )
684            .map(|((mut px_chunk, matrix_chunk), mut running_chunk)| {
685                let mut buffer: Vec<Event> = Vec::with_capacity(10);
686                let bump = Bump::new();
687                let base_val = bump.alloc(0);
688
689                for ((px, input), running) in px_chunk
690                    .iter_mut()
691                    .zip(matrix_chunk.iter())
692                    .zip(running_chunk.iter_mut())
693                {
694                    integrate_for_px(
695                        px,
696                        base_val,
697                        *input as u8,
698                        *input, // In this case, frame val is the same as intensity to integrate
699                        time_spanned,
700                        &mut buffer,
701                        params,
702                        &parameters,
703                    );
704
705                    if let Some(event) = px.arena[0].best_event {
706                        *running = u8::get_frame_value(
707                            &event.into(),
708                            SourceType::U8,
709                            tpf,
710                            practical_d_max,
711                            self.state.params.delta_t_max,
712                            self.instantaneous_view_mode,
713                            if self.instantaneous_view_mode == SAE {
714                                Some(SaeTime {
715                                    running_t: px.running_t as DeltaT,
716                                    last_fired_t: px.last_fired_t as DeltaT,
717                                })
718                            } else {
719                                None
720                            },
721                        );
722                    };
723                }
724                buffer
725            })
726            .collect();
727
728        for events in &big_buffer {
729            for e1 in events.iter() {
730                self.encoder.ingest_event(*e1)?;
731            }
732        }
733
734        self.display_frame_features = self.state.running_intensities.clone();
735
736        self.handle_features(&big_buffer)?;
737
738        #[cfg(feature = "feature-logging")]
739        {
740            if let Some(handle) = &mut self.state.feature_log_handle {
741                // Calculate current bitrate
742                let mut events_per_sec = 0.0;
743                for events_vec in &big_buffer {
744                    events_per_sec += events_vec.len() as f64;
745                }
746
747                events_per_sec *= self.state.tps as f64 / self.state.params.ref_time as f64;
748
749                let bitrate =
750                    events_per_sec * if self.state.plane.c() == 1 { 9.0 } else { 11.0 } * 8.0;
751
752                handle
753                    .write_all(
754                        &serde_pickle::to_vec(&format!("\nbps: {}", bitrate), Default::default())
755                            .unwrap(),
756                    )
757                    .unwrap();
758
759                handle
760                    .write_all(
761                        &serde_pickle::to_vec(&"\n".to_string(), Default::default()).unwrap(),
762                    )
763                    .unwrap();
764            }
765        }
766
767        Ok(big_buffer)
768    }
769
770    fn set_initial_d(&mut self, frame: &Frame) {
771        self.event_pixel_trees
772            .axis_chunks_iter_mut(Axis(0), self.state.chunk_rows)
773            .into_par_iter()
774            .zip(
775                frame
776                    .axis_chunks_iter(Axis(0), self.state.chunk_rows)
777                    .into_par_iter(),
778            )
779            .for_each(|(mut px, frame_chunk)| {
780                for (px, frame_val) in px.iter_mut().zip(frame_chunk.iter()) {
781                    let d_start = if *frame_val == 0 {
782                        D_ZERO_INTEGRATION
783                    } else {
784                        (f32::from(*frame_val)).log2().floor() as D
785                    };
786
787                    px.arena[0].set_d(d_start);
788                    px.base_val = *frame_val as u8;
789                }
790            });
791    }
792
793    /// Get `ref_time`
794    pub fn get_ref_time(&self) -> u32 {
795        self.state.params.ref_time
796    }
797
798    /// Get `delta_t_max`
799    pub fn get_delta_t_max(&self) -> u32 {
800        self.state.params.delta_t_max
801    }
802
803    /// Get `tps`
804    pub fn get_tps(&self) -> u32 {
805        self.state.tps
806    }
807
808    /// Set a new value for `delta_t_max`
809    pub fn update_delta_t_max(&mut self, dtm: u32) {
810        // Validate new value
811        self.state.params.delta_t_max = self.state.params.ref_time.max(dtm);
812    }
813
814    /// Set a new bool for `feature_detection`
815    pub fn update_detect_features(
816        &mut self,
817        detect_features: bool,
818        show_features: ShowFeatureMode,
819        feature_rate_adjustment: bool,
820        feature_cluster: bool,
821    ) {
822        // Validate new value
823        self.state.feature_detection = detect_features;
824        self.state.show_features = show_features;
825        self.state.feature_rate_adjustment = feature_rate_adjustment;
826        self.state.feature_cluster = feature_cluster;
827    }
828
829    /// Set a new value for `c_thresh_pos`
830    #[deprecated(
831        since = "0.3.4",
832        note = "please use `update_crf` or `update_quality_manual` instead"
833    )]
834    pub fn update_adder_thresh_pos(&mut self, c: u8) {
835        for px in self.event_pixel_trees.iter_mut() {
836            px.c_thresh = c;
837        }
838        dbg!("t1");
839        self.encoder.options.crf.override_c_thresh_baseline(c)
840    }
841
842    /// Set a new value for `c_thresh_neg`
843    #[deprecated(
844        since = "0.3.4",
845        note = "please use `update_crf` or `update_quality_manual` instead"
846    )]
847    pub fn update_adder_thresh_neg(&mut self, _c: u8) {
848        unimplemented!();
849        // for px in self.event_pixel_trees.iter_mut() {
850        //     px.c_thresh = c;
851        // }
852        // self.state.c_thresh_neg = c;
853    }
854
855    pub(crate) fn handle_features(&mut self, big_buffer: &[Vec<Event>]) -> Result<(), SourceError> {
856        // if !cfg!(feature = "feature-logging") && !self.state.feature_detection {
857        if !self.state.feature_detection {
858            return Ok(()); // Early return
859        }
860        let mut new_features: Vec<Vec<Coord>> =
861            vec![Vec::with_capacity(self.state.features[0].len()); self.state.features.len()];
862
863        let _start = Instant::now();
864
865        big_buffer
866            // .par_iter()
867            // .zip(self.state.features.par_iter_mut())
868            // .zip(new_features.par_iter_mut())
869            .iter()
870            .zip(self.state.features.iter_mut())
871            .zip(new_features.iter_mut())
872            .for_each(|((events, feature_set), new_features)| {
873                for (e1, e2) in events.iter().circular_tuple_windows() {
874                    if (e1.coord.c.is_none() || e1.coord.c == Some(0))
875                        && e1.coord != e2.coord
876                        && (!cfg!(feature = "feature-logging-nonmaxsuppression") || e2.t != e1.t)
877                        && e1.d != D_EMPTY
878                    {
879                        if is_feature(e1.coord, self.state.plane, &self.state.running_intensities)
880                            .unwrap()
881                        {
882                            if feature_set.insert(e1.coord) {
883                                new_features.push(e1.coord);
884                            };
885                        } else {
886                            feature_set.remove(&e1.coord);
887                        }
888                    }
889                }
890            });
891
892        let mut new_features = new_features
893            .iter()
894            .flat_map(|feature_set| feature_set.iter().map(|coord| [coord.x, coord.y]))
895            .collect::<Vec<[u16; 2]>>();
896        let new_features: HashSet<[u16; 2]> = new_features.drain(..).collect();
897
898        #[cfg(feature = "feature-logging")]
899        {
900            let total_duration_nanos = _start.elapsed().as_nanos();
901
902            if let Some(handle) = &mut self.state.feature_log_handle {
903                for feature_set in &self.state.features {
904                    // for (coord) in feature_set {
905                    //     let bytes = serde_pickle::to_vec(
906                    //         &LogFeature::from_coord(
907                    //             *coord,
908                    //             LogFeatureSource::ADDER,
909                    //             cfg!(feature = "feature-logging-nonmaxsuppression"),
910                    //         ),
911                    //         Default::default(),
912                    //     )
913                    //     .unwrap();
914                    //     handle.write_all(&bytes).unwrap();
915                    // }
916                    handle
917                        .write_all(
918                            &serde_pickle::to_vec(&feature_set.len(), Default::default()).unwrap(),
919                        )
920                        .unwrap();
921                }
922
923                let out = format!("\nADDER FAST: {}\n", total_duration_nanos);
924                handle
925                    .write_all(&serde_pickle::to_vec(&out, Default::default()).unwrap())
926                    .unwrap();
927            }
928
929            // Convert the running intensities to a Mat
930            let cv_type = match self.state.running_intensities.shape()[2] {
931                1 => opencv::core::CV_8UC1,
932                _ => opencv::core::CV_8UC3,
933            };
934
935            let mut cv_mat = unsafe {
936                let raw_parts::RawParts {
937                    ptr,
938                    length: _,
939                    capacity: _,
940                } = raw_parts::RawParts::from_vec(
941                    self.display_frame_features.clone().into_raw_vec(),
942                ); // pixels will be move into_raw_parts,and return a manually drop pointer.
943                let mut cv_mat = opencv::core::Mat::new_rows_cols_with_data(
944                    self.state.plane.h() as i32,
945                    self.state.plane.w() as i32,
946                    cv_type,
947                    ptr as *mut c_void,
948                    opencv::core::Mat_AUTO_STEP,
949                )
950                .unwrap();
951                cv_mat.addref().unwrap(); // ???
952
953                cv_mat
954            };
955
956            let tmp = cv_mat.clone();
957            if cv_type == opencv::core::CV_8UC3 {
958                opencv::imgproc::cvt_color(&tmp, &mut cv_mat, opencv::imgproc::COLOR_BGR2GRAY, 0)?;
959            }
960
961            let start = Instant::now();
962            let mut keypoints = opencv::core::Vector::<opencv::core::KeyPoint>::new();
963
964            opencv::features2d::fast(
965                &cv_mat,
966                &mut keypoints,
967                crate::utils::cv::INTENSITY_THRESHOLD.into(),
968                cfg!(feature = "feature-logging-nonmaxsuppression"),
969            )?;
970
971            let duration = start.elapsed();
972            if let Some(handle) = &mut self.state.feature_log_handle {
973                // for keypoint in &keypoints {
974                //     let bytes = serde_pickle::to_vec(
975                //         &LogFeature::from_keypoint(
976                //             &keypoint,
977                //             LogFeatureSource::OpenCV,
978                //             cfg!(feature = "feature-logging-nonmaxsuppression"),
979                //         ),
980                //         Default::default(),
981                //     )
982                //     .unwrap();
983                //     handle.write_all(&bytes).unwrap();
984                // }
985                handle
986                    .write_all(&serde_pickle::to_vec(&keypoints.len(), Default::default()).unwrap())
987                    .unwrap();
988
989                let out = format!("\nOpenCV FAST: {}\n", duration.as_nanos());
990                handle
991                    .write_all(&serde_pickle::to_vec(&out, Default::default()).unwrap())
992                    .unwrap();
993
994                // Combine self.state.features into one hashset:
995                let mut combined_features = HashSet::new();
996                for feature_set in &self.state.features {
997                    for coord in feature_set {
998                        combined_features.insert(*coord);
999                    }
1000                }
1001                let (precision, recall, accuracy) =
1002                    crate::utils::cv::feature_precision_recall_accuracy(
1003                        &keypoints,
1004                        &combined_features,
1005                        self.state.plane,
1006                    );
1007                let out = "\nFeature results: \n".to_string();
1008                handle
1009                    .write_all(&serde_pickle::to_vec(&out, Default::default()).unwrap())
1010                    .unwrap();
1011                handle
1012                    .write_all(&serde_pickle::to_vec(&precision, Default::default()).unwrap())
1013                    .unwrap();
1014                handle
1015                    .write_all(&serde_pickle::to_vec(&recall, Default::default()).unwrap())
1016                    .unwrap();
1017                handle
1018                    .write_all(&serde_pickle::to_vec(&accuracy, Default::default()).unwrap())
1019                    .unwrap();
1020            }
1021
1022            let mut keypoint_mat = Mat::default();
1023            opencv::features2d::draw_keypoints(
1024                &cv_mat,
1025                &keypoints,
1026                &mut keypoint_mat,
1027                opencv::core::Scalar::new(0.0, 0.0, 255.0, 0.0),
1028                opencv::features2d::DrawMatchesFlags::DEFAULT,
1029            )?;
1030
1031            // show_display_force("keypoints", &keypoint_mat, 1)?;
1032        }
1033
1034        if self.state.show_features == ShowFeatureMode::Hold {
1035            // Display the feature on the viz frame
1036            for feature_set in &self.state.features {
1037                for coord in feature_set {
1038                    draw_feature_coord(
1039                        coord.x,
1040                        coord.y,
1041                        &mut self.display_frame_features,
1042                        self.state.plane.c() != 1,
1043                        None,
1044                    );
1045                }
1046            }
1047        }
1048
1049        let parameters = self.encoder.options.crf.get_parameters();
1050
1051        for coord in &new_features {
1052            if self.state.show_features == ShowFeatureMode::Instant {
1053                draw_feature_coord(
1054                    coord[0],
1055                    coord[1],
1056                    &mut self.display_frame_features,
1057                    self.state.plane.c() != 1,
1058                    None,
1059                );
1060            }
1061            if self.state.feature_rate_adjustment && parameters.feature_c_radius > 0 {
1062                eprintln!("Adjusting feature rate");
1063                let radius = parameters.feature_c_radius as i32;
1064                for row in (coord[1] as i32 - radius).max(0)
1065                    ..=(coord[1] as i32 + radius).min(self.state.plane.h() as i32 - 1)
1066                {
1067                    for col in (coord[0] as i32 - radius).max(0)
1068                        ..=(coord[0] as i32 + radius).min(self.state.plane.w() as i32 - 1)
1069                    {
1070                        for c in 0..self.state.plane.c() {
1071                            self.event_pixel_trees[[row as usize, col as usize, c as usize]]
1072                                .c_thresh = min(parameters.c_thresh_baseline, 2);
1073                        }
1074                    }
1075                }
1076            }
1077        }
1078
1079        if self.state.feature_cluster {
1080            self.cluster(&new_features);
1081        }
1082
1083        Ok(())
1084    }
1085
1086    fn cluster(&mut self, set: &HashSet<[u16; 2]>) {
1087        let points: Vec<[f32; 2]> = set
1088            .into_iter()
1089            .map(|coord| [coord[0] as f32, coord[1] as f32])
1090            .collect();
1091        let tree: KdTree<f32, 2> = (&points).into();
1092
1093        if points.len() < 3 {
1094            return;
1095        }
1096
1097        // DBSCAN algorithm to cluster the features
1098
1099        let eps = self.state.plane.min_resolution() as f32 / 3.0;
1100        let min_pts = 3;
1101
1102        let mut visited = vec![false; points.len()];
1103        let mut clusters = Vec::new();
1104
1105        for (i, point) in points.iter().enumerate() {
1106            if visited[i] {
1107                continue;
1108            }
1109            visited[i] = true;
1110
1111            let mut neighbors = tree.within_unsorted::<SquaredEuclidean>(point, eps);
1112
1113            if neighbors.len() < min_pts {
1114                continue;
1115            }
1116
1117            let mut cluster = HashSet::new();
1118            cluster.insert(i as u64);
1119
1120            let mut index = 0;
1121
1122            while index < neighbors.len() {
1123                let current_point = neighbors[index];
1124                if !visited[current_point.item as usize] {
1125                    visited[current_point.item as usize] = true;
1126
1127                    let current_neighbors = tree.within_unsorted::<SquaredEuclidean>(
1128                        &points[current_point.item as usize],
1129                        eps,
1130                    );
1131
1132                    if current_neighbors.len() >= min_pts {
1133                        neighbors.extend(
1134                            current_neighbors
1135                                .into_iter()
1136                                .filter(|&i| !cluster.contains(&i.item)),
1137                        );
1138                    }
1139                }
1140
1141                if !cluster.contains(&current_point.item) {
1142                    cluster.insert(current_point.item);
1143                }
1144
1145                index += 1;
1146            }
1147
1148            clusters.push(cluster);
1149        }
1150
1151        let mut bboxes = Vec::new();
1152        for cluster in clusters {
1153            let random_color = [
1154                rand::random::<u8>(),
1155                rand::random::<u8>(),
1156                rand::random::<u8>(),
1157            ];
1158
1159            let mut min_x = self.state.plane.w_usize();
1160            let mut max_x = 0;
1161            let mut min_y = self.state.plane.h_usize();
1162            let mut max_y = 0;
1163
1164            for i in cluster {
1165                let coord = points[i as usize];
1166                min_x = min_x.min(coord[0] as usize);
1167                max_x = max_x.max(coord[0] as usize);
1168                min_y = min_y.min(coord[1] as usize);
1169                max_y = max_y.max(coord[1] as usize);
1170
1171                if self.state.show_features != ShowFeatureMode::Off {
1172                    draw_feature_coord(
1173                        points[i as usize][0] as PixelAddress,
1174                        points[i as usize][1] as PixelAddress,
1175                        &mut self.display_frame_features,
1176                        self.state.plane.c() != 1,
1177                        Some(random_color),
1178                    );
1179                }
1180            }
1181
1182            // If area is less then 1/4 the size of the frame, push it
1183            if (max_x - min_x) * (max_y - min_y) < self.state.plane.area_wh() / 4 {
1184                bboxes.push((min_x, min_y, max_x, max_y));
1185
1186                // if self.state.show_features != ShowFeatureMode::Off {
1187                draw_rect(
1188                    min_x as PixelAddress,
1189                    min_y as PixelAddress,
1190                    max_x as PixelAddress,
1191                    max_y as PixelAddress,
1192                    &mut self.display_frame_features,
1193                    self.state.plane.c() != 1,
1194                    Some(random_color),
1195                );
1196                // }
1197            }
1198        }
1199    }
1200
1201    /// Set whether or not to detect features, and whether or not to display the features
1202    pub fn detect_features(
1203        mut self,
1204        detect_features: bool,
1205        show_features: ShowFeatureMode,
1206    ) -> Self {
1207        self.state.feature_detection = detect_features;
1208        self.state.show_features = show_features;
1209        self
1210    }
1211
1212    /// Update the CRF value and set the baseline c for all pixels
1213    pub(crate) fn update_crf(&mut self, crf: u8) {
1214        self.encoder.options.crf = Crf::new(Some(crf), self.state.plane);
1215        self.encoder.sync_crf();
1216
1217        let c_thresh_baseline = self.encoder.options.crf.get_parameters().c_thresh_baseline;
1218
1219        for px in self.event_pixel_trees.iter_mut() {
1220            px.c_thresh = c_thresh_baseline;
1221            px.c_increase_counter = 0;
1222        }
1223    }
1224
1225    /// Get the encoder options
1226    pub fn get_encoder_options(&self) -> EncoderOptions {
1227        self.encoder.get_options()
1228    }
1229
1230    /// Get the time mode of the video
1231    pub fn get_time_mode(&self) -> TimeMode {
1232        self.encoder.meta().time_mode
1233    }
1234
1235    /// Manually set the parameters dictating quality
1236    pub fn update_quality_manual(
1237        &mut self,
1238        c_thresh_baseline: u8,
1239        c_thresh_max: u8,
1240        delta_t_max_multiplier: u32,
1241        c_increase_velocity: u8,
1242        feature_c_radius: f32,
1243    ) {
1244        {
1245            let crf = &mut self.encoder.options.crf;
1246
1247            crf.override_c_thresh_baseline(c_thresh_baseline);
1248            crf.override_c_thresh_max(c_thresh_max);
1249            crf.override_c_increase_velocity(c_increase_velocity);
1250            crf.override_feature_c_radius(feature_c_radius as u16); // The absolute pixel count radius
1251        }
1252        self.state.params.delta_t_max = delta_t_max_multiplier * self.state.params.ref_time;
1253        self.encoder.sync_crf();
1254
1255        for px in self.event_pixel_trees.iter_mut() {
1256            px.c_thresh = c_thresh_baseline;
1257            px.c_increase_counter = 0;
1258        }
1259    }
1260
1261    pub fn update_encoder_options(&mut self, options: EncoderOptions) {
1262        self.encoder.options = options;
1263    }
1264
1265    /// Get the size of the raw events (in bytes)
1266    pub fn get_event_size(&self) -> u8 {
1267        self.encoder.meta().event_size
1268    }
1269}
1270
1271/// Integrate an intensity value for a pixel, over a given time span
1272///
1273/// # Arguments
1274///
1275/// * `px`: the pixel to integrate
1276/// * `base_val`: holder for the base intensity value of the pixel
1277/// * `frame_val`: the intensity value, normalized to a fixed-length period defined by `ref_time`.
1278/// Used for determining if the pixel must pop its events.
1279/// * `intensity`: the intensity to integrate
1280/// * `time_spanned`: the time spanned by the intensity value
1281/// * `buffer`: the buffer to push events to
1282/// * `state`: the state of the video source
1283///
1284/// returns: ()
1285#[inline(always)]
1286pub fn integrate_for_px(
1287    px: &mut PixelArena,
1288    base_val: &mut u8,
1289    mut frame_val: u8,
1290    mut intensity: Intensity32,
1291    time_spanned: f32,
1292    buffer: &mut Vec<Event>,
1293    params: &VideoStateParams,
1294    parameters: &CrfParameters,
1295) -> bool {
1296    let _start_len = buffer.len();
1297    let mut grew_buffer = false;
1298    if px.need_to_pop_top {
1299        buffer.push(px.pop_top_event(intensity, params.pixel_tree_mode, params.ref_time));
1300        grew_buffer = true;
1301    }
1302
1303    *base_val = px.base_val;
1304
1305    if frame_val < base_val.saturating_sub(px.c_thresh)
1306        || frame_val > base_val.saturating_add(px.c_thresh)
1307    {
1308        let _tmp = buffer.len();
1309        px.pop_best_events(
1310            buffer,
1311            params.pixel_tree_mode,
1312            params.pixel_multi_mode,
1313            params.ref_time,
1314            intensity,
1315        );
1316        grew_buffer = true;
1317        px.base_val = frame_val;
1318
1319        // If continuous mode and the D value needs to be different now
1320        if let Continuous = params.pixel_tree_mode {
1321            match px.set_d_for_continuous(intensity, params.ref_time) {
1322                None => {}
1323                Some(event) => buffer.push(event),
1324            };
1325        }
1326    }
1327
1328    px.integrate(
1329        intensity,
1330        time_spanned,
1331        params.pixel_tree_mode,
1332        params.delta_t_max,
1333        params.ref_time,
1334        parameters.c_thresh_max,
1335        parameters.c_increase_velocity,
1336        params.pixel_multi_mode,
1337    );
1338
1339    if px.need_to_pop_top {
1340        buffer.push(px.pop_top_event(intensity, params.pixel_tree_mode, params.ref_time));
1341        grew_buffer = true;
1342    }
1343
1344    // if buffer.len() - start_len > 5 {
1345    //     dbg!("hm", buffer.len() - start_len);
1346    // }
1347    grew_buffer
1348}
1349
1350#[cfg(feature = "open-cv")]
1351/// Shows the given [`Mat`] in an `OpenCV` window with the given name.
1352/// This function is the same as [`show_display`], except that it does not check
1353/// [`Video::show_display`].
1354/// This function is useful for debugging.
1355/// # Errors
1356/// Returns an [`opencv::Error`] if the window cannot be shown, or the [`Mat`] cannot be scaled as
1357/// needed.
1358pub fn show_display_force(window_name: &str, mat: &Mat, wait: i32) -> opencv::Result<()> {
1359    let mut tmp = Mat::default();
1360
1361    if mat.rows() == 940 {
1362        highgui::imshow(window_name, mat)?;
1363    } else {
1364        let factor = mat.rows() as f32 / 940.0;
1365        resize(
1366            mat,
1367            &mut tmp,
1368            Size {
1369                width: (mat.cols() as f32 / factor) as i32,
1370                height: 940,
1371            },
1372            0.0,
1373            0.0,
1374            0,
1375        )?;
1376        highgui::imshow(window_name, &tmp)?;
1377    }
1378
1379    highgui::wait_key(wait)?;
1380    Ok(())
1381}
1382
1383use enum_dispatch::enum_dispatch;
1384
1385/// A trait for objects that can be used as a source of data for the ADΔER transcode model.
1386#[enum_dispatch]
1387pub trait Source<W: Write + std::marker::Send + std::marker::Sync + 'static> {
1388    /// Intake one input interval worth of data from the source stream into the ADΔER model as
1389    /// intensities.
1390    fn consume(&mut self) -> Result<Vec<Vec<Event>>, SourceError>;
1391
1392    /// Set the Constant Rate Factor (CRF) quality setting for the encoder. 0 is lossless, 9 is worst quality.
1393    fn crf(&mut self, crf: u8);
1394
1395    /// Get a mutable reference to the [`Video`] object associated with this [`Source`].
1396    fn get_video_mut(&mut self) -> &mut Video<W>;
1397
1398    /// Get an immutable reference to the [`Video`] object associated with this [`Source`].
1399    fn get_video_ref(&self) -> &Video<W>;
1400
1401    /// Get the [`Video`] object associated with this [`Source`], consuming the [`Source`] in the
1402    /// process.
1403    fn get_video(self) -> Video<W>;
1404
1405    /// Get the input frame from the source
1406    fn get_input(&self) -> Option<&Frame>;
1407
1408    /// Get the last-calculated bitrate of the input (in bits per second)
1409    fn get_running_input_bitrate(&self) -> f64;
1410}
1411
1412// fn convert_u8_to_f32_simd(input: &[u8]) -> Vec<f32> {
1413//     // Ensure that the input length is a multiple of 16
1414//     let len = input.len() / 16 * 16;
1415//
1416//     // Use the simd crate to load u8x16 vectors and convert to f32x4 vectors
1417//     let mut result: Vec<f32> = Vec::with_capacity(len / 4);
1418//     for i in (0..len).step_by(16) {
1419//         let u8_slice = &input[i..i + 16];
1420//         let u8x16_vector: u8x16 = u8_slice.load_unaligned().into();
1421//         let f32x4_vector: f32x4 = unsafe { std::mem::transmute(u8x16_vector) };
1422//         for j in 0..4 {
1423//             result.push(f32x4_vector.extract(j));
1424//         }
1425//     }
1426//
1427//     result
1428// }