adder_codec_rs/transcoder/source/
video.rs

1#[cfg(feature = "open-cv")]
2use {
3    davis_edi_rs::util::reconstructor::ReconstructionError,
4    opencv::core::{Mat, Size},
5    opencv::prelude::*,
6    opencv::{highgui, imgproc::resize},
7};
8
9use std::cmp::min;
10use std::collections::HashSet;
11#[cfg(feature = "feature-logging")]
12use std::ffi::c_void;
13use std::io::{sink, Write};
14use std::mem::swap;
15
16use adder_codec_core::codec::empty::stream::EmptyOutput;
17use adder_codec_core::codec::encoder::Encoder;
18use adder_codec_core::codec::raw::stream::RawOutput;
19use adder_codec_core::codec::{
20    CodecError, CodecMetadata, EncoderOptions, EncoderType, LATEST_CODEC_VERSION,
21};
22use adder_codec_core::{
23    Coord, DeltaT, Event, Mode, PixelAddress, PixelMultiMode, PlaneError, PlaneSize, SourceCamera,
24    SourceType, TimeMode, D_EMPTY, D_ZERO_INTEGRATION,
25};
26use bumpalo::Bump;
27
28use std::sync::mpsc::{channel, Sender};
29use std::time::Instant;
30
31use crate::framer::scale_intensity::{FrameValue, SaeTime};
32use crate::transcoder::event_pixel_tree::{Intensity32, PixelArena};
33use adder_codec_core::D;
34
35#[cfg(feature = "compression")]
36use adder_codec_core::codec::compressed::stream::CompressedOutput;
37use adder_codec_core::Mode::Continuous;
38use itertools::Itertools;
39use ndarray::{Array, Array3, Axis, ShapeError};
40use rayon::iter::IndexedParallelIterator;
41use rayon::iter::IntoParallelIterator;
42use rayon::iter::ParallelIterator;
43
44use crate::transcoder::source::video::FramedViewMode::SAE;
45use crate::utils::cv::is_feature;
46
47use crate::utils::viz::{draw_feature_coord, draw_rect, ShowFeatureMode};
48use adder_codec_core::codec::rate_controller::{Crf, CrfParameters};
49use kiddo::{KdTree, SquaredEuclidean};
50use thiserror::Error;
51use tokio::task::JoinError;
52use video_rs_adder_dep::Frame;
53
54/// Various errors that can occur during an ADΔER transcode
55#[derive(Error, Debug)]
56pub enum SourceError {
57    /// Could not open source file
58    #[error("Could not open source file")]
59    Open,
60
61    /// Incorrect parameters for the given source
62    #[error("ADDER parameters are invalid for the given source: `{0}`")]
63    BadParams(String),
64
65    /// When a [Framed](crate::transcoder::source::framed::Framed) source is used, but the start frame is out of bounds"
66    #[error("start frame `{0}` is out of bounds")]
67    StartOutOfBounds(u32),
68
69    /// No more data to consume from the video source
70    #[error("Source buffer is empty")]
71    BufferEmpty,
72
73    /// Source buffer channel is closed
74    #[error("Source buffer channel is closed")]
75    BufferChannelClosed,
76
77    /// No data from next spot in buffer
78    #[error("No data from next spot in buffer")]
79    NoData,
80
81    /// Data not initialized
82    #[error("Data not initialized")]
83    UninitializedData,
84
85    #[cfg(feature = "open-cv")]
86    /// OpenCV error
87    #[error("OpenCV error")]
88    OpencvError(opencv::Error),
89
90    /// video-rs error
91    #[error("video-rs error")]
92    VideoError(video_rs_adder_dep::Error),
93
94    /// Codec error
95    #[error("Codec core error")]
96    CodecError(CodecError),
97
98    #[cfg(feature = "open-cv")]
99    /// EDI error
100    #[error("EDI error")]
101    EdiError(ReconstructionError),
102
103    /// Shape error
104    #[error("Shape error")]
105    ShapeError(#[from] ShapeError),
106
107    /// Plane error
108    #[error("Plane error")]
109    PlaneError(#[from] PlaneError),
110
111    /// Handle join error
112    #[error("Handle join error")]
113    JoinError(#[from] JoinError),
114
115    /// Vision application error
116    #[error("Vision application error")]
117    VisionError(String),
118
119    /// I/O error
120    #[error("I/O error")]
121    IoError(#[from] std::io::Error),
122}
123
124#[cfg(feature = "open-cv")]
125impl From<opencv::Error> for SourceError {
126    fn from(value: opencv::Error) -> Self {
127        SourceError::OpencvError(value)
128    }
129}
130impl From<adder_codec_core::codec::CodecError> for SourceError {
131    fn from(value: CodecError) -> Self {
132        SourceError::CodecError(value)
133    }
134}
135
136impl From<video_rs_adder_dep::Error> for SourceError {
137    fn from(value: video_rs_adder_dep::Error) -> Self {
138        SourceError::VideoError(value)
139    }
140}
141
142/// The display mode
143#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
144pub enum FramedViewMode {
145    /// Visualize the intensity (2^[`D`] / [`DeltaT`]) of each pixel's most recent event
146    #[default]
147    Intensity,
148
149    /// Visualize the [`D`] component of each pixel's most recent event
150    D,
151
152    /// Visualize the temporal component ([`DeltaT`]) of each pixel's most recent event
153    DeltaT,
154
155    /// Surface of Active Events. Visualize the time elapsed since each pixel last fired an event
156    /// (most recent events will have greater values)
157    SAE,
158}
159
160#[derive(Debug)]
161pub struct VideoStateParams {
162    pub(crate) pixel_tree_mode: Mode,
163
164    pub pixel_multi_mode: PixelMultiMode,
165
166    /// The maximum time difference between events of the same pixel, in ticks
167    pub delta_t_max: u32,
168
169    /// The reference time in ticks
170    pub ref_time: u32,
171}
172
173impl Default for VideoStateParams {
174    fn default() -> Self {
175        Self {
176            pixel_tree_mode: Continuous,
177            pixel_multi_mode: Default::default(),
178            delta_t_max: 7650,
179            ref_time: 255,
180        }
181    }
182}
183
184/// Running state of the video transcode
185#[derive(Debug)]
186pub struct VideoState {
187    pub params: VideoStateParams,
188
189    /// The size of the imaging plane
190    pub plane: PlaneSize,
191
192    /// The number of rows of pixels to process at a time (per thread)
193    pub chunk_rows: usize,
194
195    /// The number of input intervals (of fixed time) processed so far
196    pub in_interval_count: u32,
197
198    /// The number of ticks per second
199    pub tps: DeltaT,
200
201    /// Whether or not to detect features
202    pub feature_detection: bool,
203
204    /// The current instantaneous frame, for determining features
205    pub running_intensities: Array3<u8>,
206
207    /// Whether or not to draw the features on the display mat, and the mode to do it in
208    show_features: ShowFeatureMode,
209
210    features: Vec<HashSet<Coord>>,
211
212    pub feature_log_handle: Option<std::fs::File>,
213    feature_rate_adjustment: bool,
214    feature_cluster: bool,
215
216    roi: Option<Roi>,
217}
218
219#[derive(Debug, Copy, Clone, PartialEq)]
220pub struct Roi {
221    pub start: Coord,
222    pub end: Coord,
223}
224
225impl Default for VideoState {
226    fn default() -> Self {
227        VideoState {
228            plane: PlaneSize::default(),
229            params: VideoStateParams::default(),
230            chunk_rows: 1,
231            in_interval_count: 1,
232            tps: 7650,
233            feature_detection: false,
234            running_intensities: Default::default(),
235            show_features: ShowFeatureMode::Off,
236            features: Default::default(),
237            feature_log_handle: None,
238            feature_rate_adjustment: false,
239            feature_cluster: false,
240            roi: None,
241        }
242    }
243}
244
245// impl VideoState {
246//     fn update_crf(&mut self, crf: u8) {
247//         self.crf_quality = crf;
248//         self.c_thresh_baseline = CRF[crf as usize][0] as u8;
249//         self.c_thresh_max = CRF[crf as usize][1] as u8;
250//
251//         self.c_increase_velocity = CRF[crf as usize][2] as u8;
252//         self.feature_c_radius = (CRF[crf as usize][3] * self.plane.min_resolution() as f32) as u16;
253//     }
254//
255//     fn update_quality_manual(
256//         &mut self,
257//         c_thresh_baseline: u8,
258//         c_thresh_max: u8,
259//         delta_t_max_multiplier: u32,
260//         c_increase_velocity: u8,
261//         feature_c_radius: f32,
262//     ) {
263//         self.c_thresh_baseline = c_thresh_baseline;
264//         self.c_thresh_max = c_thresh_max;
265//         self.delta_t_max = delta_t_max_multiplier * self.ref_time;
266//         self.c_increase_velocity = c_increase_velocity;
267//         self.feature_c_radius = feature_c_radius as u16; // The absolute pixel count radius
268//     }
269// }
270
271/// A builder for a [`Video`]
272pub trait VideoBuilder<W> {
273    /// Set the Constant Rate Factor (CRF) quality setting for the encoder. 0 is lossless, 9 is worst quality.
274    fn crf(self, crf: u8) -> Self;
275
276    /// Manually set the parameters dictating quality
277    fn quality_manual(
278        self,
279        c_thresh_baseline: u8,
280        c_thresh_max: u8,
281        delta_t_max_multiplier: u32,
282        c_increase_velocity: u8,
283        feature_c_radius_denom: f32,
284    ) -> Self;
285
286    /// Set the chunk rows
287    fn chunk_rows(self, chunk_rows: usize) -> Self;
288
289    /// Set the time parameters
290    fn time_parameters(
291        self,
292        tps: DeltaT,
293        ref_time: DeltaT,
294        delta_t_max: DeltaT,
295        time_mode: Option<TimeMode>,
296    ) -> Result<Self, SourceError>
297    where
298        Self: std::marker::Sized;
299
300    /// Set the [`Encoder`]
301    fn write_out(
302        self,
303        source_camera: SourceCamera,
304        time_mode: TimeMode,
305        pixel_multi_mode: PixelMultiMode,
306        adu_interval: Option<usize>,
307        encoder_type: EncoderType,
308        encoder_options: EncoderOptions,
309        write: W,
310    ) -> Result<Box<Self>, SourceError>;
311
312    /// Set whether or not to detect features, and whether or not to display the features
313    fn detect_features(self, detect_features: bool, show_features: ShowFeatureMode) -> Self;
314
315    #[cfg(feature = "feature-logging")]
316    fn log_path(self, name: String) -> Self;
317}
318
319// impl VideoBuilder for Video {}
320
321/// Attributes common to ADΔER transcode process
322pub struct Video<W: Write + std::marker::Send + std::marker::Sync + 'static> {
323    /// The current state of the video transcode
324    pub state: VideoState,
325    pub(crate) event_pixel_trees: Array3<PixelArena>,
326
327    /// The current instantaneous display frame with the features drawn on it
328    pub display_frame_features: Frame,
329
330    /// The current view mode of the instantaneous frame
331    pub instantaneous_view_mode: FramedViewMode,
332
333    /// Channel for sending events to the encoder
334    pub event_sender: Sender<Vec<Event>>,
335
336    /// The object that takes in ADDER events, potentially transforms them in some way,
337    /// and writes them somewhere
338    pub encoder: Encoder<W>,
339
340    /// The type of encoder being used (e.g., compressed or raw)
341    pub encoder_type: EncoderType,
342    // TODO: Hold multiple encoder options and an enum, so that boxing isn't required.
343    // Also hold a state for whether or not to write out events at all, so that a null writer isn't required.
344    // Eric: this is somewhat addressed above
345}
346unsafe impl<W: Write + std::marker::Send + std::marker::Sync + 'static> Send for Video<W> {}
347
348impl<W: Write + 'static + std::marker::Send + std::marker::Sync + 'static> Video<W> {
349    /// Initialize the Video with default parameters.
350    pub(crate) fn new(
351        plane: PlaneSize,
352        pixel_tree_mode: Mode,
353        writer: Option<W>,
354    ) -> Result<Video<W>, SourceError> {
355        let mut state = VideoState {
356            params: VideoStateParams {
357                pixel_tree_mode,
358                ..Default::default()
359            },
360            running_intensities: Array::zeros((plane.h_usize(), plane.w_usize(), plane.c_usize())),
361            ..Default::default()
362        };
363
364        let mut data = Vec::new();
365        for y in 0..plane.h() {
366            for x in 0..plane.w() {
367                for c in 0..plane.c() {
368                    let px = PixelArena::new(
369                        1.0,
370                        Coord {
371                            x,
372                            y,
373                            c: match &plane.c() {
374                                1 => None,
375                                _ => Some(c),
376                            },
377                        },
378                    );
379                    data.push(px);
380                }
381            }
382        }
383
384        let event_pixel_trees: Array3<PixelArena> =
385            Array3::from_shape_vec((plane.h_usize(), plane.w_usize(), plane.c_usize()), data)?;
386        let instantaneous_frame =
387            Array3::zeros((plane.h_usize(), plane.w_usize(), plane.c_usize()));
388
389        state.plane = plane;
390        let instantaneous_view_mode = FramedViewMode::Intensity;
391        let (event_sender, _) = channel();
392        let meta = CodecMetadata {
393            codec_version: LATEST_CODEC_VERSION,
394            header_size: 0,
395            time_mode: TimeMode::AbsoluteT,
396            plane: state.plane,
397            tps: state.tps,
398            ref_interval: state.params.ref_time,
399            delta_t_max: state.params.delta_t_max,
400            event_size: 0,
401            source_camera: SourceCamera::default(), // TODO: Allow for setting this
402            adu_interval: Default::default(),
403        };
404
405        match writer {
406            None => {
407                let encoder: Encoder<W> = Encoder::new_empty(
408                    EmptyOutput::new(meta, sink()),
409                    EncoderOptions::default(state.plane),
410                );
411                Ok(Video {
412                    state,
413                    event_pixel_trees,
414                    display_frame_features: instantaneous_frame,
415                    instantaneous_view_mode,
416                    event_sender,
417                    encoder,
418                    encoder_type: EncoderType::Empty,
419                })
420            }
421            Some(w) => {
422                let encoder = Encoder::new_raw(
423                    // TODO: Allow for compressed representation (not just raw)
424                    RawOutput::new(meta, w),
425                    EncoderOptions::default(state.plane),
426                );
427                Ok(Video {
428                    state,
429                    event_pixel_trees,
430                    display_frame_features: instantaneous_frame,
431                    instantaneous_view_mode,
432                    event_sender,
433                    encoder,
434                    encoder_type: EncoderType::Empty,
435                })
436            }
437        }
438    }
439
440    /// Set the positive contrast threshold
441    #[deprecated(
442        since = "0.3.4",
443        note = "please use `update_crf` or `update_quality_manual` instead"
444    )]
445    pub fn c_thresh_pos(mut self, c_thresh_pos: u8) -> Self {
446        for px in self.event_pixel_trees.iter_mut() {
447            px.c_thresh = c_thresh_pos;
448        }
449        dbg!("t");
450        self.encoder
451            .options
452            .crf
453            .override_c_thresh_baseline(c_thresh_pos);
454        self
455    }
456
457    /// Set the negative contrast threshold
458    #[deprecated(
459        since = "0.3.4",
460        note = "please use `update_crf` or `update_quality_manual` instead"
461    )]
462    pub fn c_thresh_neg(self, _c_thresh_neg: u8) -> Self {
463        unimplemented!();
464        // for px in self.event_pixel_trees.iter_mut() {
465        //     px.c_thresh = c_thresh_neg;
466        // }
467        // self
468    }
469
470    /// Set the number of rows to process at a time (in each thread)
471    pub fn chunk_rows(mut self, chunk_rows: usize) -> Self {
472        self.state.chunk_rows = chunk_rows;
473        let mut num_chunks = self.state.plane.h_usize() / chunk_rows;
474        if self.state.plane.h_usize() % chunk_rows != 0 {
475            num_chunks += 1;
476        }
477        self.state.features = vec![HashSet::new(); num_chunks];
478        self
479    }
480
481    /// Set the time parameters for the video.
482    ///
483    /// These parameters, in conjunction, determine the temporal resolution and maximum transcode
484    /// accuracy/quality.
485    ///
486    /// # Arguments
487    ///
488    /// * `tps`: ticks per second
489    /// * `ref_time`: reference time in ticks.
490    /// * `delta_t_max`: maximum time difference between events of the same pixel, in ticks
491    ///
492    /// returns: `Result<Video<W>, Box<dyn Error, Global>>`
493    pub fn time_parameters(
494        mut self,
495        tps: DeltaT,
496        ref_time: DeltaT,
497        delta_t_max: DeltaT,
498        time_mode: Option<TimeMode>,
499    ) -> Result<Self, SourceError> {
500        self.event_pixel_trees.par_map_inplace(|px| {
501            px.time_mode(time_mode);
502        });
503
504        if ref_time > f32::MAX as u32 {
505            eprintln!(
506                "Reference time {} is too large. Keeping current value of {}.",
507                ref_time, self.state.params.ref_time
508            );
509            return Ok(self);
510        }
511        if tps > f32::MAX as u32 {
512            eprintln!(
513                "Time per sample {} is too large. Keeping current value of {}.",
514                tps, self.state.tps
515            );
516            return Ok(self);
517        }
518        if delta_t_max > f32::MAX as u32 {
519            eprintln!(
520                "Delta t max {} is too large. Keeping current value of {}.",
521                delta_t_max, self.state.params.delta_t_max
522            );
523            return Ok(self);
524        }
525        if delta_t_max < ref_time {
526            eprintln!(
527                "Delta t max {} is smaller than reference time {}. Keeping current value of {}.",
528                delta_t_max, ref_time, self.state.params.delta_t_max
529            );
530            return Ok(self);
531        }
532        self.state.params.delta_t_max = delta_t_max;
533        self.state.params.ref_time = ref_time;
534        self.state.tps = tps;
535
536        Ok(self)
537    }
538
539    /// Write out the video to a file.
540    ///
541    /// # Arguments
542    ///
543    /// * `source_camera`: the type of video source
544    /// * `time_mode`: the time mode of the video
545    /// * `write`: the output stream to write to
546    pub fn write_out(
547        mut self,
548        source_camera: Option<SourceCamera>,
549        time_mode: Option<TimeMode>,
550        pixel_multi_mode: Option<PixelMultiMode>,
551        adu_interval: Option<usize>,
552        encoder_type: EncoderType,
553        encoder_options: EncoderOptions,
554        write: W,
555    ) -> Result<Self, SourceError> {
556        let encoder: Encoder<_> = match encoder_type {
557            EncoderType::Compressed => {
558                #[cfg(feature = "compression")]
559                {
560                    self.state.params.pixel_multi_mode =
561                        pixel_multi_mode.unwrap_or(PixelMultiMode::Collapse);
562                    let compression = CompressedOutput::new(
563                        CodecMetadata {
564                            codec_version: LATEST_CODEC_VERSION,
565                            header_size: 0,
566                            time_mode: time_mode.unwrap_or_default(),
567                            plane: self.state.plane,
568                            tps: self.state.tps,
569                            ref_interval: self.state.params.ref_time,
570                            delta_t_max: self.state.params.delta_t_max,
571                            event_size: 0,
572                            source_camera: source_camera.unwrap_or_default(),
573                            adu_interval: adu_interval.unwrap_or_default(),
574                        },
575                        write,
576                    );
577                    Encoder::new_compressed(compression, encoder_options)
578                }
579                #[cfg(not(feature = "compression"))]
580                {
581                    return Err(SourceError::BadParams(
582                        "Compressed representation is experimental and is not enabled by default!"
583                            .to_string(),
584                    ));
585                }
586            }
587            EncoderType::Raw => {
588                self.state.params.pixel_multi_mode =
589                    pixel_multi_mode.unwrap_or(PixelMultiMode::Collapse);
590                let compression = RawOutput::new(
591                    CodecMetadata {
592                        codec_version: LATEST_CODEC_VERSION,
593                        header_size: 0,
594                        time_mode: time_mode.unwrap_or_default(),
595                        plane: self.state.plane,
596                        tps: self.state.tps,
597                        ref_interval: self.state.params.ref_time,
598                        delta_t_max: self.state.params.delta_t_max,
599                        event_size: 0,
600                        source_camera: source_camera.unwrap_or_default(),
601                        adu_interval: Default::default(),
602                    },
603                    write,
604                );
605                Encoder::new_raw(compression, encoder_options)
606            }
607            EncoderType::Empty => {
608                self.state.params.pixel_multi_mode =
609                    pixel_multi_mode.unwrap_or(PixelMultiMode::Collapse);
610                let compression = EmptyOutput::new(
611                    CodecMetadata {
612                        codec_version: LATEST_CODEC_VERSION,
613                        header_size: 0,
614                        time_mode: time_mode.unwrap_or_default(),
615                        plane: self.state.plane,
616                        tps: self.state.tps,
617                        ref_interval: self.state.params.ref_time,
618                        delta_t_max: self.state.params.delta_t_max,
619                        event_size: 0,
620                        source_camera: source_camera.unwrap_or_default(),
621                        adu_interval: Default::default(),
622                    },
623                    sink(),
624                );
625                Encoder::new_empty(compression, encoder_options)
626            }
627        };
628
629        self.encoder = encoder;
630        self.encoder_type = encoder_type;
631
632        self.event_pixel_trees.par_map_inplace(|px| {
633            px.time_mode(time_mode);
634        });
635        Ok(self)
636    }
637
638    /// Close and flush the stream writer.
639    /// # Errors
640    /// Returns an error if the stream writer cannot be closed cleanly.
641    pub fn end_write_stream(&mut self) -> Result<Option<W>, SourceError> {
642        let mut tmp: Encoder<W> = Encoder::new_empty(
643            EmptyOutput::new(CodecMetadata::default(), sink()),
644            self.encoder.options,
645        );
646        swap(&mut self.encoder, &mut tmp);
647        Ok(tmp.close_writer()?)
648    }
649
650    #[allow(clippy::needless_pass_by_value)]
651    pub(crate) fn integrate_matrix(
652        &mut self,
653        matrix: Frame,
654        time_spanned: f32,
655    ) -> Result<Vec<Vec<Event>>, SourceError> {
656        if self.state.in_interval_count == 0 {
657            self.set_initial_d(&matrix);
658        }
659
660        let parameters = *self.encoder.options.crf.get_parameters();
661
662        self.state.in_interval_count += 1;
663
664        // let matrix_f32 = convert_u8_to_f32_simd(&matrix.into_raw_vec());
665        let matrix = matrix.mapv(f32::from);
666
667        // TODO: When there's full support for various bit-depth sources, modify this accordingly
668        let practical_d_max = fast_math::log2_raw(
669            255.0 * (self.state.params.delta_t_max / self.state.params.ref_time) as f32,
670        );
671
672        let tpf = self.state.params.ref_time as f64;
673
674        let params = &self.state.params;
675        // Important: if framing the events simultaneously, then the chunk division must be
676        // exactly the same as it is for the framer
677        let big_buffer: Vec<Vec<Event>> = self
678            .event_pixel_trees
679            .axis_chunks_iter_mut(Axis(0), self.state.chunk_rows)
680            .into_par_iter()
681            .zip(
682                matrix
683                    .axis_chunks_iter(Axis(0), self.state.chunk_rows)
684                    .into_par_iter(),
685            )
686            .zip(
687                self.state
688                    .running_intensities
689                    .axis_chunks_iter_mut(Axis(0), self.state.chunk_rows)
690                    .into_par_iter(),
691            )
692            .map(|((mut px_chunk, matrix_chunk), mut running_chunk)| {
693                let mut buffer: Vec<Event> = Vec::with_capacity(10);
694                let bump = Bump::new();
695                let base_val = bump.alloc(0);
696
697                for ((px, input), running) in px_chunk
698                    .iter_mut()
699                    .zip(matrix_chunk.iter())
700                    .zip(running_chunk.iter_mut())
701                {
702                    integrate_for_px(
703                        px,
704                        base_val,
705                        *input as u8,
706                        *input, // In this case, frame val is the same as intensity to integrate
707                        time_spanned,
708                        &mut buffer,
709                        params,
710                        &parameters,
711                    );
712
713                    if let Some(event) = px.arena[0].best_event {
714                        *running = u8::get_frame_value(
715                            &event.into(),
716                            SourceType::U8,
717                            tpf,
718                            practical_d_max,
719                            self.state.params.delta_t_max,
720                            self.instantaneous_view_mode,
721                            if self.instantaneous_view_mode == SAE {
722                                Some(SaeTime {
723                                    running_t: px.running_t as DeltaT,
724                                    last_fired_t: px.last_fired_t as DeltaT,
725                                })
726                            } else {
727                                None
728                            },
729                        );
730                    };
731                }
732                buffer
733            })
734            .collect();
735
736        for events in &big_buffer {
737            for e1 in events.iter() {
738                self.encoder.ingest_event(*e1)?;
739            }
740        }
741
742        self.display_frame_features = self.state.running_intensities.clone();
743
744        self.handle_features(&big_buffer)?;
745
746        #[cfg(feature = "feature-logging")]
747        {
748            if let Some(handle) = &mut self.state.feature_log_handle {
749                // Calculate current bitrate
750                let mut events_per_sec = 0.0;
751                for events_vec in &big_buffer {
752                    events_per_sec += events_vec.len() as f64;
753                }
754
755                events_per_sec *= self.state.tps as f64 / self.state.params.ref_time as f64;
756
757                let bitrate =
758                    events_per_sec * if self.state.plane.c() == 1 { 9.0 } else { 11.0 } * 8.0;
759
760                handle
761                    .write_all(
762                        &serde_pickle::to_vec(&format!("\nbps: {}", bitrate), Default::default())
763                            .unwrap(),
764                    )
765                    .unwrap();
766
767                handle
768                    .write_all(
769                        &serde_pickle::to_vec(&"\n".to_string(), Default::default()).unwrap(),
770                    )
771                    .unwrap();
772            }
773        }
774
775        self.handle_roi();
776
777        Ok(big_buffer)
778    }
779
780    fn set_initial_d(&mut self, frame: &Frame) {
781        self.event_pixel_trees
782            .axis_chunks_iter_mut(Axis(0), self.state.chunk_rows)
783            .into_par_iter()
784            .zip(
785                frame
786                    .axis_chunks_iter(Axis(0), self.state.chunk_rows)
787                    .into_par_iter(),
788            )
789            .for_each(|(mut px, frame_chunk)| {
790                for (px, frame_val) in px.iter_mut().zip(frame_chunk.iter()) {
791                    let d_start = if *frame_val == 0 {
792                        D_ZERO_INTEGRATION
793                    } else {
794                        (f32::from(*frame_val)).log2().floor() as D
795                    };
796
797                    px.arena[0].set_d(d_start);
798                    px.base_val = *frame_val;
799                }
800            });
801    }
802
803    /// Get `ref_time`
804    pub fn get_ref_time(&self) -> u32 {
805        self.state.params.ref_time
806    }
807
808    /// Get `delta_t_max`
809    pub fn get_delta_t_max(&self) -> u32 {
810        self.state.params.delta_t_max
811    }
812
813    /// Get `tps`
814    pub fn get_tps(&self) -> u32 {
815        self.state.tps
816    }
817
818    /// Set a new value for `delta_t_max`
819    pub fn update_delta_t_max(&mut self, dtm: u32) {
820        // Validate new value
821        self.state.params.delta_t_max = self.state.params.ref_time.max(dtm);
822    }
823
824    /// Set a new bool for `feature_detection`
825    pub fn update_detect_features(
826        &mut self,
827        detect_features: bool,
828        show_features: ShowFeatureMode,
829        feature_rate_adjustment: bool,
830        feature_cluster: bool,
831    ) {
832        // Validate new value
833        self.state.feature_detection = detect_features;
834        self.state.show_features = show_features;
835        self.state.feature_rate_adjustment = feature_rate_adjustment;
836        self.state.feature_cluster = feature_cluster;
837    }
838
839    /// Set a new value for `c_thresh_pos`
840    #[deprecated(
841        since = "0.3.4",
842        note = "please use `update_crf` or `update_quality_manual` instead"
843    )]
844    pub fn update_adder_thresh_pos(&mut self, c: u8) {
845        for px in self.event_pixel_trees.iter_mut() {
846            px.c_thresh = c;
847        }
848        dbg!("t1");
849        self.encoder.options.crf.override_c_thresh_baseline(c)
850    }
851
852    /// Set a new value for `c_thresh_neg`
853    #[deprecated(
854        since = "0.3.4",
855        note = "please use `update_crf` or `update_quality_manual` instead"
856    )]
857    pub fn update_adder_thresh_neg(&mut self, _c: u8) {
858        unimplemented!();
859        // for px in self.event_pixel_trees.iter_mut() {
860        //     px.c_thresh = c;
861        // }
862        // self.state.c_thresh_neg = c;
863    }
864
865    fn handle_roi(&mut self) {
866        if self.state.roi.is_none() {
867            return;
868        }
869        let roi = self.state.roi.unwrap();
870
871        // For each pixel within the roi, set a low c_thresh
872        let parameters = self.encoder.options.crf.get_parameters();
873        for y in roi.start.y as usize..=roi.end.y as usize {
874            for x in roi.start.x as usize..=roi.end.x as usize {
875                for c in 0..self.state.plane.c_usize() {
876                    self.event_pixel_trees[[y, x, c]].c_thresh =
877                        min(parameters.c_thresh_baseline, 2);
878                }
879            }
880        }
881    }
882
883    pub(crate) fn handle_features(&mut self, big_buffer: &[Vec<Event>]) -> Result<(), SourceError> {
884        // if !cfg!(feature = "feature-logging") && !self.state.feature_detection {
885        if !self.state.feature_detection {
886            return Ok(()); // Early return
887        }
888        let mut new_features: Vec<Vec<Coord>> =
889            vec![Vec::with_capacity(self.state.features[0].len()); self.state.features.len()];
890
891        let _start = Instant::now();
892
893        big_buffer
894            // .par_iter()
895            // .zip(self.state.features.par_iter_mut())
896            // .zip(new_features.par_iter_mut())
897            .iter()
898            .zip(self.state.features.iter_mut())
899            .zip(new_features.iter_mut())
900            .for_each(|((events, feature_set), new_features)| {
901                for (e1, e2) in events.iter().circular_tuple_windows() {
902                    if (e1.coord.c.is_none() || e1.coord.c == Some(0))
903                        && e1.coord != e2.coord
904                        && (!cfg!(feature = "feature-logging-nonmaxsuppression") || e2.t != e1.t)
905                        && e1.d != D_EMPTY
906                    {
907                        if is_feature(e1.coord, self.state.plane, &self.state.running_intensities)
908                            .unwrap()
909                        {
910                            if feature_set.insert(e1.coord) {
911                                new_features.push(e1.coord);
912                            };
913                        } else {
914                            feature_set.remove(&e1.coord);
915                        }
916                    }
917                }
918            });
919
920        let mut new_features = new_features
921            .iter()
922            .flat_map(|feature_set| feature_set.iter().map(|coord| [coord.x, coord.y]))
923            .collect::<Vec<[u16; 2]>>();
924        let new_features: HashSet<[u16; 2]> = new_features.drain(..).collect();
925
926        #[cfg(feature = "feature-logging")]
927        {
928            let total_duration_nanos = _start.elapsed().as_nanos();
929
930            if let Some(handle) = &mut self.state.feature_log_handle {
931                for feature_set in &self.state.features {
932                    // for (coord) in feature_set {
933                    //     let bytes = serde_pickle::to_vec(
934                    //         &LogFeature::from_coord(
935                    //             *coord,
936                    //             LogFeatureSource::ADDER,
937                    //             cfg!(feature = "feature-logging-nonmaxsuppression"),
938                    //         ),
939                    //         Default::default(),
940                    //     )
941                    //     .unwrap();
942                    //     handle.write_all(&bytes).unwrap();
943                    // }
944                    handle
945                        .write_all(
946                            &serde_pickle::to_vec(&feature_set.len(), Default::default()).unwrap(),
947                        )
948                        .unwrap();
949                }
950
951                let out = format!("\nADDER FAST: {}\n", total_duration_nanos);
952                handle
953                    .write_all(&serde_pickle::to_vec(&out, Default::default()).unwrap())
954                    .unwrap();
955            }
956
957            // Convert the running intensities to a Mat
958            let cv_type = match self.state.running_intensities.shape()[2] {
959                1 => opencv::core::CV_8UC1,
960                _ => opencv::core::CV_8UC3,
961            };
962
963            let mut cv_mat = unsafe {
964                let raw_parts::RawParts {
965                    ptr,
966                    length: _,
967                    capacity: _,
968                } = raw_parts::RawParts::from_vec(
969                    self.display_frame_features.clone().into_raw_vec(),
970                ); // pixels will be move into_raw_parts,and return a manually drop pointer.
971                let mut cv_mat = opencv::core::Mat::new_rows_cols_with_data(
972                    self.state.plane.h() as i32,
973                    self.state.plane.w() as i32,
974                    cv_type,
975                    ptr as *mut c_void,
976                    opencv::core::Mat_AUTO_STEP,
977                )
978                .unwrap();
979                cv_mat.addref().unwrap(); // ???
980
981                cv_mat
982            };
983
984            let tmp = cv_mat.clone();
985            if cv_type == opencv::core::CV_8UC3 {
986                opencv::imgproc::cvt_color(&tmp, &mut cv_mat, opencv::imgproc::COLOR_BGR2GRAY, 0)?;
987            }
988
989            let start = Instant::now();
990            let mut keypoints = opencv::core::Vector::<opencv::core::KeyPoint>::new();
991
992            opencv::features2d::fast(
993                &cv_mat,
994                &mut keypoints,
995                crate::utils::cv::INTENSITY_THRESHOLD.into(),
996                cfg!(feature = "feature-logging-nonmaxsuppression"),
997            )?;
998
999            let duration = start.elapsed();
1000            if let Some(handle) = &mut self.state.feature_log_handle {
1001                // for keypoint in &keypoints {
1002                //     let bytes = serde_pickle::to_vec(
1003                //         &LogFeature::from_keypoint(
1004                //             &keypoint,
1005                //             LogFeatureSource::OpenCV,
1006                //             cfg!(feature = "feature-logging-nonmaxsuppression"),
1007                //         ),
1008                //         Default::default(),
1009                //     )
1010                //     .unwrap();
1011                //     handle.write_all(&bytes).unwrap();
1012                // }
1013                handle
1014                    .write_all(&serde_pickle::to_vec(&keypoints.len(), Default::default()).unwrap())
1015                    .unwrap();
1016
1017                let out = format!("\nOpenCV FAST: {}\n", duration.as_nanos());
1018                handle
1019                    .write_all(&serde_pickle::to_vec(&out, Default::default()).unwrap())
1020                    .unwrap();
1021
1022                // Combine self.state.features into one hashset:
1023                let mut combined_features = HashSet::new();
1024                for feature_set in &self.state.features {
1025                    for coord in feature_set {
1026                        combined_features.insert(*coord);
1027                    }
1028                }
1029                let (precision, recall, accuracy) =
1030                    crate::utils::cv::feature_precision_recall_accuracy(
1031                        &keypoints,
1032                        &combined_features,
1033                        self.state.plane,
1034                    );
1035                let out = "\nFeature results: \n".to_string();
1036                handle
1037                    .write_all(&serde_pickle::to_vec(&out, Default::default()).unwrap())
1038                    .unwrap();
1039                handle
1040                    .write_all(&serde_pickle::to_vec(&precision, Default::default()).unwrap())
1041                    .unwrap();
1042                handle
1043                    .write_all(&serde_pickle::to_vec(&recall, Default::default()).unwrap())
1044                    .unwrap();
1045                handle
1046                    .write_all(&serde_pickle::to_vec(&accuracy, Default::default()).unwrap())
1047                    .unwrap();
1048            }
1049
1050            let mut keypoint_mat = Mat::default();
1051            opencv::features2d::draw_keypoints(
1052                &cv_mat,
1053                &keypoints,
1054                &mut keypoint_mat,
1055                opencv::core::Scalar::new(0.0, 0.0, 255.0, 0.0),
1056                opencv::features2d::DrawMatchesFlags::DEFAULT,
1057            )?;
1058
1059            // show_display_force("keypoints", &keypoint_mat, 1)?;
1060        }
1061
1062        if self.state.show_features == ShowFeatureMode::Hold {
1063            // Display the feature on the viz frame
1064            for feature_set in &self.state.features {
1065                for coord in feature_set {
1066                    draw_feature_coord(
1067                        coord.x,
1068                        coord.y,
1069                        &mut self.display_frame_features,
1070                        self.state.plane.c() != 1,
1071                        None,
1072                    );
1073                }
1074            }
1075        }
1076
1077        let parameters = self.encoder.options.crf.get_parameters();
1078
1079        for coord in &new_features {
1080            if self.state.show_features == ShowFeatureMode::Instant {
1081                draw_feature_coord(
1082                    coord[0],
1083                    coord[1],
1084                    &mut self.display_frame_features,
1085                    self.state.plane.c() != 1,
1086                    None,
1087                );
1088            }
1089            if self.state.feature_rate_adjustment && parameters.feature_c_radius > 0 {
1090                eprintln!("Adjusting feature rate");
1091                let radius = parameters.feature_c_radius as i32;
1092                for row in (coord[1] as i32 - radius).max(0)
1093                    ..=(coord[1] as i32 + radius).min(self.state.plane.h() as i32 - 1)
1094                {
1095                    for col in (coord[0] as i32 - radius).max(0)
1096                        ..=(coord[0] as i32 + radius).min(self.state.plane.w() as i32 - 1)
1097                    {
1098                        for c in 0..self.state.plane.c() {
1099                            self.event_pixel_trees[[row as usize, col as usize, c as usize]]
1100                                .c_thresh = min(parameters.c_thresh_baseline, 2);
1101                        }
1102                    }
1103                }
1104            }
1105        }
1106
1107        if self.state.feature_cluster {
1108            self.cluster(&new_features);
1109        }
1110
1111        Ok(())
1112    }
1113
1114    fn cluster(&mut self, set: &HashSet<[u16; 2]>) {
1115        let points: Vec<[f32; 2]> = set
1116            .iter()
1117            .map(|coord| [coord[0] as f32, coord[1] as f32])
1118            .collect();
1119        let tree: KdTree<f32, 2> = (&points).into();
1120
1121        if points.len() < 3 {
1122            return;
1123        }
1124
1125        // DBSCAN algorithm to cluster the features
1126
1127        let eps = self.state.plane.min_resolution() as f32 / 3.0;
1128        let min_pts = 3;
1129
1130        let mut visited = vec![false; points.len()];
1131        let mut clusters = Vec::new();
1132
1133        for (i, point) in points.iter().enumerate() {
1134            if visited[i] {
1135                continue;
1136            }
1137            visited[i] = true;
1138
1139            let mut neighbors = tree.within_unsorted::<SquaredEuclidean>(point, eps);
1140
1141            if neighbors.len() < min_pts {
1142                continue;
1143            }
1144
1145            let mut cluster = HashSet::new();
1146            cluster.insert(i as u64);
1147
1148            let mut index = 0;
1149
1150            while index < neighbors.len() {
1151                let current_point = neighbors[index];
1152                if !visited[current_point.item as usize] {
1153                    visited[current_point.item as usize] = true;
1154
1155                    let current_neighbors = tree.within_unsorted::<SquaredEuclidean>(
1156                        &points[current_point.item as usize],
1157                        eps,
1158                    );
1159
1160                    if current_neighbors.len() >= min_pts {
1161                        neighbors.extend(
1162                            current_neighbors
1163                                .into_iter()
1164                                .filter(|&i| !cluster.contains(&i.item)),
1165                        );
1166                    }
1167                }
1168
1169                if !cluster.contains(&current_point.item) {
1170                    cluster.insert(current_point.item);
1171                }
1172
1173                index += 1;
1174            }
1175
1176            clusters.push(cluster);
1177        }
1178
1179        let mut bboxes = Vec::new();
1180        for cluster in clusters {
1181            let random_color = [
1182                rand::random::<u8>(),
1183                rand::random::<u8>(),
1184                rand::random::<u8>(),
1185            ];
1186
1187            let mut min_x = self.state.plane.w_usize();
1188            let mut max_x = 0;
1189            let mut min_y = self.state.plane.h_usize();
1190            let mut max_y = 0;
1191
1192            for i in cluster {
1193                let coord = points[i as usize];
1194                min_x = min_x.min(coord[0] as usize);
1195                max_x = max_x.max(coord[0] as usize);
1196                min_y = min_y.min(coord[1] as usize);
1197                max_y = max_y.max(coord[1] as usize);
1198
1199                if self.state.show_features != ShowFeatureMode::Off {
1200                    draw_feature_coord(
1201                        points[i as usize][0] as PixelAddress,
1202                        points[i as usize][1] as PixelAddress,
1203                        &mut self.display_frame_features,
1204                        self.state.plane.c() != 1,
1205                        Some(random_color),
1206                    );
1207                }
1208            }
1209
1210            // If area is less then 1/4 the size of the frame, push it
1211            if (max_x - min_x) * (max_y - min_y) < self.state.plane.area_wh() / 4 {
1212                bboxes.push((min_x, min_y, max_x, max_y));
1213
1214                // if self.state.show_features != ShowFeatureMode::Off {
1215                draw_rect(
1216                    min_x as PixelAddress,
1217                    min_y as PixelAddress,
1218                    max_x as PixelAddress,
1219                    max_y as PixelAddress,
1220                    &mut self.display_frame_features,
1221                    self.state.plane.c() != 1,
1222                    Some(random_color),
1223                );
1224                // }
1225            }
1226        }
1227    }
1228
1229    /// Set whether or not to detect features, and whether or not to display the features
1230    pub fn detect_features(
1231        mut self,
1232        detect_features: bool,
1233        show_features: ShowFeatureMode,
1234    ) -> Self {
1235        self.state.feature_detection = detect_features;
1236        self.state.show_features = show_features;
1237        self
1238    }
1239
1240    /// Update the CRF value and set the baseline c for all pixels
1241    pub(crate) fn update_crf(&mut self, crf: u8) {
1242        self.encoder.options.crf = Crf::new(Some(crf), self.state.plane);
1243        self.encoder.sync_crf();
1244
1245        let c_thresh_baseline = self.encoder.options.crf.get_parameters().c_thresh_baseline;
1246
1247        for px in self.event_pixel_trees.iter_mut() {
1248            px.c_thresh = c_thresh_baseline;
1249            px.c_increase_counter = 0;
1250        }
1251    }
1252
1253    /// Get the encoder options
1254    pub fn get_encoder_options(&self) -> EncoderOptions {
1255        self.encoder.get_options()
1256    }
1257
1258    /// Get the time mode of the video
1259    pub fn get_time_mode(&self) -> TimeMode {
1260        self.encoder.meta().time_mode
1261    }
1262
1263    /// Manually set the parameters dictating quality
1264    pub fn update_quality_manual(
1265        &mut self,
1266        c_thresh_baseline: u8,
1267        c_thresh_max: u8,
1268        delta_t_max_multiplier: u32,
1269        c_increase_velocity: u8,
1270        feature_c_radius: f32,
1271    ) {
1272        {
1273            let crf = &mut self.encoder.options.crf;
1274
1275            crf.override_c_thresh_baseline(c_thresh_baseline);
1276            crf.override_c_thresh_max(c_thresh_max);
1277            crf.override_c_increase_velocity(c_increase_velocity);
1278            crf.override_feature_c_radius(feature_c_radius as u16); // The absolute pixel count radius
1279        }
1280        self.state.params.delta_t_max = delta_t_max_multiplier * self.state.params.ref_time;
1281        self.encoder.sync_crf();
1282
1283        for px in self.event_pixel_trees.iter_mut() {
1284            px.c_thresh = c_thresh_baseline;
1285            px.c_increase_counter = 0;
1286        }
1287    }
1288
1289    pub fn update_encoder_options(&mut self, options: EncoderOptions) {
1290        self.encoder.options = options;
1291    }
1292
1293    pub fn update_roi(&mut self, roi: Option<Roi>) {
1294        self.state.roi = roi;
1295    }
1296
1297    /// Get the size of the raw events (in bytes)
1298    pub fn get_event_size(&self) -> u8 {
1299        self.encoder.meta().event_size
1300    }
1301}
1302
1303/// Integrate an intensity value for a pixel, over a given time span
1304///
1305/// # Arguments
1306///
1307/// * `px`: the pixel to integrate
1308/// * `base_val`: holder for the base intensity value of the pixel
1309/// * `frame_val`: the intensity value, normalized to a fixed-length period defined by `ref_time`.
1310///   Used for determining if the pixel must pop its events.
1311/// * `intensity`: the intensity to integrate
1312/// * `time_spanned`: the time spanned by the intensity value
1313/// * `buffer`: the buffer to push events to
1314/// * `state`: the state of the video source
1315///
1316/// returns: ()
1317#[inline(always)]
1318pub fn integrate_for_px(
1319    px: &mut PixelArena,
1320    base_val: &mut u8,
1321    frame_val: u8,
1322    intensity: Intensity32,
1323    time_spanned: f32,
1324    buffer: &mut Vec<Event>,
1325    params: &VideoStateParams,
1326    parameters: &CrfParameters,
1327) -> bool {
1328    let _start_len = buffer.len();
1329    let mut grew_buffer = if px.need_to_pop_top {
1330        buffer.push(px.pop_top_event(intensity, params.pixel_tree_mode, params.ref_time));
1331        true
1332    } else {
1333        false
1334    };
1335
1336    *base_val = px.base_val;
1337
1338    if frame_val < base_val.saturating_sub(px.c_thresh)
1339        || frame_val > base_val.saturating_add(px.c_thresh)
1340    {
1341        let _tmp = buffer.len();
1342        px.pop_best_events(
1343            buffer,
1344            params.pixel_tree_mode,
1345            params.pixel_multi_mode,
1346            params.ref_time,
1347            intensity,
1348        );
1349        grew_buffer = true;
1350        px.base_val = frame_val;
1351
1352        // If continuous mode and the D value needs to be different now
1353        if params.pixel_tree_mode == Continuous {
1354            if let Some(event) = px.set_d_for_continuous(intensity, params.ref_time) {
1355                buffer.push(event)
1356            };
1357        }
1358    }
1359
1360    px.integrate(
1361        intensity,
1362        time_spanned,
1363        params.pixel_tree_mode,
1364        params.delta_t_max,
1365        params.ref_time,
1366        parameters.c_thresh_max,
1367        parameters.c_increase_velocity,
1368        params.pixel_multi_mode,
1369    );
1370
1371    if px.need_to_pop_top {
1372        buffer.push(px.pop_top_event(intensity, params.pixel_tree_mode, params.ref_time));
1373        grew_buffer = true;
1374    }
1375
1376    // if buffer.len() - start_len > 5 {
1377    //     dbg!("hm", buffer.len() - start_len);
1378    // }
1379    grew_buffer
1380}
1381
1382#[cfg(feature = "open-cv")]
1383/// Shows the given [`Mat`] in an `OpenCV` window with the given name.
1384/// This function is the same as [`show_display`], except that it does not check
1385/// [`Video::show_display`].
1386/// This function is useful for debugging.
1387/// # Errors
1388/// Returns an [`opencv::Error`] if the window cannot be shown, or the [`Mat`] cannot be scaled as
1389/// needed.
1390pub fn show_display_force(window_name: &str, mat: &Mat, wait: i32) -> opencv::Result<()> {
1391    let mut tmp = Mat::default();
1392
1393    if mat.rows() == 940 {
1394        highgui::imshow(window_name, mat)?;
1395    } else {
1396        let factor = mat.rows() as f32 / 940.0;
1397        resize(
1398            mat,
1399            &mut tmp,
1400            Size {
1401                width: (mat.cols() as f32 / factor) as i32,
1402                height: 940,
1403            },
1404            0.0,
1405            0.0,
1406            0,
1407        )?;
1408        highgui::imshow(window_name, &tmp)?;
1409    }
1410
1411    highgui::wait_key(wait)?;
1412    Ok(())
1413}
1414
1415use enum_dispatch::enum_dispatch;
1416
1417/// A trait for objects that can be used as a source of data for the ADΔER transcode model.
1418#[enum_dispatch]
1419pub trait Source<W: Write + std::marker::Send + std::marker::Sync + 'static> {
1420    /// Intake one input interval worth of data from the source stream into the ADΔER model as
1421    /// intensities.
1422    fn consume(&mut self) -> Result<Vec<Vec<Event>>, SourceError>;
1423
1424    /// Set the Constant Rate Factor (CRF) quality setting for the encoder. 0 is lossless, 9 is worst quality.
1425    fn crf(&mut self, crf: u8);
1426
1427    /// Get a mutable reference to the [`Video`] object associated with this [`Source`].
1428    fn get_video_mut(&mut self) -> &mut Video<W>;
1429
1430    /// Get an immutable reference to the [`Video`] object associated with this [`Source`].
1431    fn get_video_ref(&self) -> &Video<W>;
1432
1433    /// Get the [`Video`] object associated with this [`Source`], consuming the [`Source`] in the
1434    /// process.
1435    fn get_video(self) -> Video<W>;
1436
1437    /// Get the input frame from the source
1438    fn get_input(&self) -> Option<&Frame>;
1439
1440    /// Get the last-calculated bitrate of the input (in bits per second)
1441    fn get_running_input_bitrate(&self) -> f64;
1442}
1443
1444// fn convert_u8_to_f32_simd(input: &[u8]) -> Vec<f32> {
1445//     // Ensure that the input length is a multiple of 16
1446//     let len = input.len() / 16 * 16;
1447//
1448//     // Use the simd crate to load u8x16 vectors and convert to f32x4 vectors
1449//     let mut result: Vec<f32> = Vec::with_capacity(len / 4);
1450//     for i in (0..len).step_by(16) {
1451//         let u8_slice = &input[i..i + 16];
1452//         let u8x16_vector: u8x16 = u8_slice.load_unaligned().into();
1453//         let f32x4_vector: f32x4 = unsafe { std::mem::transmute(u8x16_vector) };
1454//         for j in 0..4 {
1455//             result.push(f32x4_vector.extract(j));
1456//         }
1457//     }
1458//
1459//     result
1460// }