use Video;
use Audio;
use ColorChannels;
#[derive(Copy, Clone)] pub struct Index(pub u32);
#[derive(Clone)] pub struct VFrame(pub Vec<u8>);
#[derive(Clone)] pub struct AFrame(pub [i16; 2000]);
impl Index {
pub fn to_seconds(self) -> f32 {
(self.0 as f32) / 24.0
}
}
impl VFrame {
pub fn sample_rgba(&self, format: ColorChannels, index: usize)
-> [u8; 4]
{
let mut rgba = [255u8; 4];
let channels = format.n_channels();
for i in 0..channels {
rgba[i] = self.0[index * channels + i];
}
rgba
}
}
pub trait EncoderV where Self: Sized {
fn new(video: &Video) -> Self;
fn run(&mut self, frame: &VFrame) -> Vec<u8>;
fn end(self) -> Vec<u8>;
}
pub trait EncoderA where Self: Sized {
fn new(audio: &Audio) -> Self;
fn run(&mut self, audio: &mut Audio) -> Vec<u8>;
fn end(self) -> Vec<u8>;
}
pub trait EncoderAV where Self: Sized {
fn new(video: &Video, audio: &Audio) -> Self;
fn run(&mut self, audio: &mut Audio, video: &mut Video) -> Vec<u8>;
fn end(self) -> Vec<u8>;
}
pub trait Decoder<T> where Self: Sized {
fn new(data: T, colors: ColorChannels) -> Option<Self>;
fn run(&mut self, audio: &mut Option<Audio>, video: &mut Option<Video>)
-> Option<bool>;
fn get(&self) -> Index;
fn set(&mut self, index: Index);
}