use crate::framer::scale_intensity::{FrameValue, SaeTime};
use crate::transcoder::source::video::FramedViewMode::SAE;
use crate::transcoder::source::video::{
integrate_for_px, Source, SourceError, Video, VideoBuilder,
};
use crate::utils::cv::{clamp_u8, mid_clamp_u8};
use crate::utils::viz::ShowFeatureMode;
use adder_codec_core::codec::{EncoderOptions, EncoderType};
use adder_codec_core::Mode::Continuous;
use adder_codec_core::{
DeltaT, Event, PixelMultiMode, PlaneSize, SourceCamera, SourceType, TimeMode,
};
use ndarray::Array3;
use rayon::ThreadPool;
use serde::{Deserialize, Serialize};
use std::error::Error;
use std::fs::File;
use std::io::{self, BufRead, BufReader, Read, Seek, SeekFrom, Write};
use std::path::PathBuf;
use std::str::FromStr;
use video_rs_adder_dep::Frame;
const PROPHESEE_SOURCE_TPS: u32 = 1000000;
pub struct Prophesee<W: Write> {
pub(crate) video: Video<W>,
input_reader: BufReader<File>,
running_t: u32,
t_subtract: u32,
pub dvs_last_timestamps: Array3<u32>,
pub dvs_last_ln_val: Array3<f64>,
camera_theta: f64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct DvsEvent {
t: u32,
x: u16,
y: u16,
p: u8,
}
unsafe impl<W: Write> Sync for Prophesee<W> {}
impl<W: Write + 'static> Prophesee<W> {
pub fn new(ref_time: u32, input_filename: String) -> Result<Self, Box<dyn Error>> {
let source = File::open(PathBuf::from(input_filename))?;
let mut input_reader = BufReader::new(source);
let (_, _, _, size) = parse_header(&mut input_reader).unwrap();
let plane = PlaneSize::new(size.1 as u16, size.0 as u16, 1)?;
let mut video = Video::new(plane, Continuous, None)?
.chunk_rows(1)
.time_parameters(
ref_time * PROPHESEE_SOURCE_TPS,
ref_time,
ref_time * 2,
Some(TimeMode::AbsoluteT),
)?;
let start_intensities = vec![128_u8; video.state.plane.volume()];
video.state.running_intensities = Array3::from_shape_vec(
(plane.h().into(), plane.w().into(), plane.c().into()),
start_intensities,
)?;
video.display_frame_features = video.state.running_intensities.clone();
let timestamps = vec![2_u32; video.state.plane.volume()];
let dvs_last_timestamps: Array3<u32> = Array3::from_shape_vec(
(plane.h().into(), plane.w().into(), plane.c().into()),
timestamps,
)?;
let plane = &video.state.plane;
let start_vals = vec![(128.0_f64 / 255.0_f64).ln_1p(); video.state.plane.volume()];
let dvs_last_ln_val: Array3<f64> = Array3::from_shape_vec(
(plane.h() as usize, plane.w() as usize, plane.c() as usize),
start_vals,
)?;
let prophesee_source = Prophesee {
video,
input_reader,
running_t: 0,
t_subtract: 0,
dvs_last_timestamps,
dvs_last_ln_val,
camera_theta: 0.02, };
Ok(prophesee_source)
}
}
impl<W: Write + 'static + std::marker::Send> Source<W> for Prophesee<W> {
fn consume(
&mut self,
_thread_pool: &ThreadPool,
) -> Result<Vec<Vec<Event>>, SourceError> {
if self.running_t == 0 {
self.video.integrate_matrix(
self.video.state.running_intensities.clone(),
self.video.state.params.ref_time as f32,
)?;
let first_events: Vec<Event> = self
.video
.integrate_matrix(
self.video.state.running_intensities.clone(),
self.video.state.params.ref_time as f32,
)?
.into_iter()
.flatten()
.collect();
assert_eq!(first_events.len(), self.video.state.plane.volume());
self.running_t = 2;
}
let view_interval = PROPHESEE_SOURCE_TPS / 60;
let mut dvs_events: Vec<DvsEvent> = Vec::new();
let mut dvs_event;
let start_running_t = self.running_t;
loop {
dvs_event = match decode_event(&mut self.input_reader) {
Ok(mut dvs_event) => {
dvs_event.t -= self.t_subtract;
if dvs_event.t > self.running_t {
self.running_t = dvs_event.t;
}
dvs_event
}
Err(e) => {
dbg!("End of input file");
end_events(self);
return Err(e.into());
}
};
dvs_events.push(dvs_event);
if dvs_events.last().unwrap().t > start_running_t + view_interval {
break;
}
}
let mut events: Vec<Event> = Vec::new();
let crf_parameters = *self.video.encoder.options.crf.get_parameters();
for dvs_event in dvs_events {
let x = dvs_event.x as usize;
let y = dvs_event.y as usize;
let p = dvs_event.p as usize;
let t = dvs_event.t;
let last_t = self.dvs_last_timestamps[[y, x, 0]];
if t < last_t {
continue;
}
let mut last_ln_val = self.dvs_last_ln_val[[y, x, 0]];
let px = &mut self.video.event_pixel_trees[[y, x, 0]];
if t > last_t + 1 {
let mut last_val = (last_ln_val.exp() - 1.0) * 255.0;
mid_clamp_u8(&mut last_val, &mut last_ln_val);
let time_spanned = (t - last_t - 1) * self.video.state.params.ref_time;
let intensity_to_integrate = last_val * (t - last_t - 1) as f64;
let mut base_val = 0;
let _ = integrate_for_px(
px,
&mut base_val,
last_val as u8,
intensity_to_integrate as f32,
time_spanned as f32,
&mut events,
&self.video.state.params,
&crf_parameters,
);
}
let mut new_ln_val = match p {
0 => last_ln_val - self.camera_theta,
1 => last_ln_val + self.camera_theta,
_ => panic!("Invalid polarity"),
};
self.dvs_last_ln_val[[y, x, 0]] = new_ln_val;
self.dvs_last_timestamps[[y, x, 0]] = t;
if t > last_t {
let mut new_val = (new_ln_val.exp() - 1.0) * 255.0;
mid_clamp_u8(&mut new_val, &mut new_ln_val);
self.dvs_last_ln_val[[y, x, 0]] = new_ln_val;
let time_spanned = self.video.state.params.ref_time;
let intensity_to_integrate = new_val;
let mut base_val = 0;
let _ = integrate_for_px(
px,
&mut base_val,
new_val as u8,
intensity_to_integrate as f32,
time_spanned as f32,
&mut events,
&self.video.state.params,
&crf_parameters,
);
}
if let Some(event) = px.arena[0].best_event {
self.video.state.running_intensities[[y, x, 0]] = u8::get_frame_value(
&event.into(),
SourceType::U8,
self.video.state.params.ref_time as f64,
32.0,
self.video.state.params.delta_t_max,
self.video.instantaneous_view_mode,
if self.video.instantaneous_view_mode == SAE {
Some(SaeTime {
running_t: px.running_t as DeltaT,
last_fired_t: px.last_fired_t as DeltaT,
})
} else {
None
},
);
self.video.display_frame_features[[y, x, 0]] =
self.video.state.running_intensities[[y, x, 0]];
};
}
if self.video.state.feature_detection {
self.video.display_frame_features = self.video.state.running_intensities.clone();
}
let events_nested: Vec<Vec<Event>> = vec![events];
self.video.handle_features(&events_nested)?;
for events in &events_nested {
for event in events {
self.video.encoder.ingest_event(*event)?;
}
}
Ok(events_nested)
}
fn crf(&mut self, crf: u8) {
self.video.update_crf(crf);
}
fn get_video_mut(&mut self) -> &mut Video<W> {
&mut self.video
}
fn get_video_ref(&self) -> &Video<W> {
&self.video
}
fn get_video(self) -> Video<W> {
self.video
}
fn get_input(&self) -> Option<&Frame> {
None
}
fn get_running_input_bitrate(&self) -> f64 {
0.0
}
}
fn end_events<W: Write + 'static + std::marker::Send>(prophesee: &mut Prophesee<W>) {
let mut events: Vec<Event> = Vec::new();
let crf_parameters = *prophesee.video.encoder.options.crf.get_parameters();
for y in 0..prophesee.video.state.plane.h_usize() {
for x in 0..prophesee.video.state.plane.w_usize() {
let px = &mut prophesee.video.event_pixel_trees[[y, x, 0]];
let mut base_val = 0;
let last_ln_val = prophesee.dvs_last_ln_val[[y, x, 0]];
let last_val = (last_ln_val.exp() - 1.0) * 255.0;
assert!(prophesee.running_t - prophesee.dvs_last_timestamps[[y, x, 0]] > 0);
let time_spanned = (prophesee.running_t - prophesee.dvs_last_timestamps[[y, x, 0]])
* prophesee.video.state.params.ref_time;
let intensity_to_integrate = last_val * time_spanned as f64;
let _ = integrate_for_px(
px,
&mut base_val,
last_val as u8,
intensity_to_integrate as f32,
time_spanned as f32,
&mut events,
&prophesee.video.state.params,
&crf_parameters,
);
}
}
for event in &events {
prophesee.video.encoder.ingest_event(*event).unwrap();
}
}
fn parse_header(file: &mut BufReader<File>) -> io::Result<(u64, u8, u8, (u32, u32))> {
file.seek(SeekFrom::Start(0))?; let mut bod = 0;
let mut end_of_header = false;
let mut num_comment_line = 0;
let mut size = [None, None];
while !end_of_header {
bod = file.seek(SeekFrom::Current(0))?; let mut line = Vec::new(); file.read_until(b'\n', &mut line)?; if line.is_empty() || line[0] != b'%' {
end_of_header = true;
} else {
let words: Vec<&[u8]> = line.split(|&x| x == b' ' || x == b'\t').collect(); if words.len() > 1 {
match words[1] {
b"Height" => {
size[0] = line_to_hw(words);
}
b"Width" => {
size[1] = line_to_hw(words);
}
_ => {}
}
}
num_comment_line += 1;
}
}
file.seek(SeekFrom::Start(bod))?; let (ev_type, ev_size) = if num_comment_line > 0 {
let mut buf = [0; 2]; file.read_exact(&mut buf)?;
let ev_type = buf[0];
let ev_size = buf[1];
if ev_size != 8 || (ev_type != 0 && ev_type != 12) {
panic!("Invalid Prophesee event size");
}
(ev_type, ev_size)
} else {
(0, 0) };
bod = file.seek(SeekFrom::Current(0))?;
Ok((
bod,
ev_type,
ev_size,
(size[0].unwrap_or(70), size[1].unwrap_or(100)),
))
}
fn line_to_hw(words: Vec<&[u8]>) -> Option<u32> {
let word = words.get(2).unwrap();
let mut new_word = *word;
if *word.last().unwrap() == '\n' as u8 {
new_word = &word[..word.len() - 1];
}
std::str::from_utf8(new_word)
.ok()
.and_then(|s| s.parse().ok())
}
fn decode_event(reader: &mut BufReader<File>) -> io::Result<DvsEvent> {
let mut buffer = [0; 8]; reader.read_exact(&mut buffer)?;
let t = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
let data = i32::from_le_bytes([buffer[4], buffer[5], buffer[6], buffer[7]]);
let x = (data & 0x3FF) as u16; let y = ((data & 0xFFFC000) >> 14) as u16; let p = ((data & 0x10000000) >> 28) as u8; Ok(DvsEvent { t, x, y, p })
}
impl<W: Write + 'static> VideoBuilder<W> for Prophesee<W> {
fn crf(mut self, crf: u8) -> Self {
self.video.update_crf(crf);
self
}
fn quality_manual(
mut self,
c_thresh_baseline: u8,
c_thresh_max: u8,
delta_t_max_multiplier: u32,
c_increase_velocity: u8,
feature_c_radius_denom: f32,
) -> Self {
self.video.update_quality_manual(
c_thresh_baseline,
c_thresh_max,
delta_t_max_multiplier,
c_increase_velocity,
feature_c_radius_denom,
);
self
}
fn chunk_rows(mut self, chunk_rows: usize) -> Self {
self.video = self.video.chunk_rows(chunk_rows);
self
}
fn time_parameters(
mut self,
tps: DeltaT,
ref_time: DeltaT,
delta_t_max: DeltaT,
time_mode: Option<TimeMode>,
) -> Result<Self, SourceError> {
eprintln!("setting dtref to {}", ref_time);
self.video = self
.video
.time_parameters(tps, ref_time, delta_t_max, time_mode)?;
Ok(self)
}
fn write_out(
mut self,
source_camera: SourceCamera,
time_mode: TimeMode,
pixel_multi_mode: PixelMultiMode,
adu_interval: Option<usize>,
encoder_type: EncoderType,
encoder_options: EncoderOptions,
write: W,
) -> Result<Box<Self>, SourceError> {
self.video = self.video.write_out(
Some(source_camera),
Some(time_mode),
Some(pixel_multi_mode),
adu_interval,
encoder_type,
encoder_options,
write,
)?;
Ok(Box::new(self))
}
fn detect_features(mut self, detect_features: bool, show_features: ShowFeatureMode) -> Self {
self.video = self.video.detect_features(detect_features, show_features);
self
}
#[cfg(feature = "feature-logging")]
fn log_path(self, _name: String) -> Self {
todo!()
}
}