1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
// use crate::transcoder::d_controller::DecimationMode;
// use crate::transcoder::source::video::{Source, SourceError, Video};
// use crate::SourceCamera::DavisU8;
// use crate::{DeltaT, Event};
// use davis_edi_rs::util::reconstructor::Reconstructor;
// use davis_edi_rs::*;
// use opencv::core::Mat;
// use opencv::{imgproc, prelude::*, videoio, Result};
// use std::sync::mpsc::{Receiver, Sender};
//
// /// Attributes of a framed video -> ADΔER transcode
// pub struct DavisSource {
//     reconstructor: Reconstructor,
//     pub(crate) input_frame_scaled: Box<Mat>,
//     c_thresh_pos: u8,
//     c_thresh_neg: u8,
//
//     pub(crate) video: Video,
//     image_8u: Mat,
// }
//
// impl DavisSource {
//     /// Initialize the framed source and read first frame of source, in order to get `height`
//     /// and `width` and initialize [`Video`]
//     fn new(
//         mut reconstructor: Reconstructor,
//         output_events_filename: Option<String>,
//         tps: DeltaT,
//         delta_t_max: DeltaT,
//         show_display_b: bool,
//     ) -> Result<DavisSource> {
//         let video = Video::new(
//             reconstructor.width as u16,
//             reconstructor.height as u16,
//             output_events_filename,
//             1,
//             tps,
//             (tps as f64 / reconstructor.output_fps) as u32,
//             delta_t_max,
//             DecimationMode::Manual,
//             true, // TODO
//             true, // TODO
//             show_display_b,
//             DavisU8,
//         );
//         let davis_source = DavisSource {
//             reconstructor,
//             input_frame_scaled: Box::new(Default::default()),
//             c_thresh_pos: 15, // TODO
//             c_thresh_neg: 15, // TODO
//             video,
//             image_8u: Mat::default(),
//         };
//         Ok(davis_source)
//     }
// }
//
// impl Source for DavisSource {
//     fn consume(&mut self, view_interval: u32) -> std::result::Result<Vec<Vec<Event>>, SourceError> {
//         // Attempting new method for integration without requiring a buffer. Could be implemented
//         // for framed source just as easily
//         // Keep running integration starting at D=log_2(current_frame) + 1
//         // --If exceeds 2^D, then store in the pixel object what that event would be.
//         // --Then keep track of two branches:
//         // ----1: continuing the integration for D + 1
//         // ----2: assume that event fired, and integrate for a new event
//         // ---------But this could branch too... some sort of binary tree of pixel objects?
//         // ---------if (1) fills up for the higher D, then delete (2) and
//         //          create a new branch for (2)
//
//         async {
//             match self.reconstructor.next().await {
//                 None => {
//                     println!("\nFinished!");
//                 }
//                 Some(image) => {
//                     // frame_count += 1;
//                     let image = match image {
//                         Ok(a) => a,
//                         Err(_) => {
//                             panic!("No image")
//                         }
//                     };
//                 }
//             }
//         };
//         todo!()
//     }
//
//     fn get_video_mut(&mut self) -> &mut Video {
//         &mut self.video
//     }
//
//     fn get_video(&self) -> &Video {
//         &self.video
//     }
// }