use std::collections::HashMap;
use std::path::Path;
use std::time::Duration;
use ff_decode::VideoDecoder;
use ff_encode::VideoEncoder;
use ff_filter::{
AnimatedValue, AnimationTrack, AudioTrack, MultiTrackAudioMixer, MultiTrackComposer, VideoLayer,
};
use ff_format::ChannelLayout;
use crate::clip::Clip;
use crate::encoder_config::EncoderConfig;
use crate::error::PipelineError;
use crate::pipeline::hwaccel_to_hardware_encoder;
#[derive(Debug, Clone)]
pub struct Timeline {
pub(crate) canvas_width: u32,
pub(crate) canvas_height: u32,
pub(crate) frame_rate: f64,
pub(crate) video_tracks: Vec<Vec<Clip>>,
pub(crate) audio_tracks: Vec<Vec<Clip>>,
pub(crate) video_animations: HashMap<String, AnimationTrack<f64>>,
pub(crate) audio_animations: HashMap<String, AnimationTrack<f64>>,
}
impl Timeline {
pub fn builder() -> TimelineBuilder {
TimelineBuilder::new()
}
pub fn canvas_width(&self) -> u32 {
self.canvas_width
}
pub fn canvas_height(&self) -> u32 {
self.canvas_height
}
pub fn frame_rate(&self) -> f64 {
self.frame_rate
}
pub fn video_tracks(&self) -> &[Vec<Clip>] {
&self.video_tracks
}
pub fn audio_tracks(&self) -> &[Vec<Clip>] {
&self.audio_tracks
}
pub fn render(
self,
output: impl AsRef<Path>,
config: EncoderConfig,
) -> Result<(), PipelineError> {
let output = output.as_ref();
let Timeline {
canvas_width,
canvas_height,
frame_rate,
video_tracks,
audio_tracks,
video_animations,
audio_animations,
} = self;
let nv = video_tracks.len();
let na = audio_tracks.len();
for track in video_tracks.iter().chain(audio_tracks.iter()) {
for clip in track {
if !clip.source.exists() {
return Err(PipelineError::ClipNotFound {
path: clip.source.to_string_lossy().into_owned(),
});
}
}
}
let valid_video_props = ["x", "y", "scale_x", "scale_y", "rotation", "opacity"];
for key in video_animations.keys() {
let parts: Vec<&str> = key.splitn(3, '_').collect();
let ok = parts.len() == 3
&& parts[0] == "video"
&& parts[1].parse::<usize>().is_ok()
&& valid_video_props.contains(&parts[2]);
if !ok {
log::warn!("unknown animation key key={key}");
}
}
let valid_audio_props = ["volume", "pan"];
for key in audio_animations.keys() {
let parts: Vec<&str> = key.splitn(3, '_').collect();
let ok = parts.len() == 3
&& parts[0] == "audio"
&& parts[1].parse::<usize>().is_ok()
&& valid_audio_props.contains(&parts[2]);
if !ok {
log::warn!("unknown animation key key={key}");
}
}
let va = |track_idx: usize, prop: &str, default: f64| -> AnimatedValue<f64> {
let key = format!("video_{track_idx}_{prop}");
video_animations
.get(&key)
.cloned()
.map_or(AnimatedValue::Static(default), AnimatedValue::Track)
};
let aa = |track_idx: usize, prop: &str, default: f64| -> AnimatedValue<f64> {
let key = format!("audio_{track_idx}_{prop}");
audio_animations
.get(&key)
.cloned()
.map_or(AnimatedValue::Static(default), AnimatedValue::Track)
};
let mut video_graph = None;
if !video_tracks.is_empty() {
let mut composer = MultiTrackComposer::new(canvas_width, canvas_height);
for (track_idx, track) in video_tracks.iter().enumerate() {
for clip in track {
composer = composer.add_layer(VideoLayer {
source: clip.source.clone(),
x: va(track_idx, "x", 0.0),
y: va(track_idx, "y", 0.0),
scale_x: va(track_idx, "scale_x", 1.0),
scale_y: va(track_idx, "scale_y", 1.0),
rotation: va(track_idx, "rotation", 0.0),
opacity: va(track_idx, "opacity", 1.0),
z_order: u32::try_from(track_idx).unwrap_or(u32::MAX),
time_offset: clip.timeline_offset,
in_point: clip.in_point,
out_point: clip.out_point,
});
}
}
video_graph = Some(composer.build().map_err(PipelineError::Filter)?);
}
let mut audio_graph = None;
if !audio_tracks.is_empty() {
let mut mixer = MultiTrackAudioMixer::new(48_000, ChannelLayout::Stereo);
for (track_idx, track) in audio_tracks.iter().enumerate() {
for clip in track {
mixer = mixer.add_track(AudioTrack {
source: clip.source.clone(),
volume: aa(track_idx, "volume", 0.0),
pan: aa(track_idx, "pan", 0.0),
time_offset: clip.timeline_offset,
effects: vec![],
sample_rate: 48_000,
channel_layout: ff_format::ChannelLayout::Stereo,
});
}
}
audio_graph = Some(mixer.build().map_err(PipelineError::Filter)?);
}
let hw = hwaccel_to_hardware_encoder(config.hardware);
let mut enc_builder = VideoEncoder::create(output)
.video(canvas_width, canvas_height, frame_rate)
.video_codec(config.video_codec)
.bitrate_mode(config.bitrate_mode)
.hardware_encoder(hw);
if audio_graph.is_some() {
enc_builder = enc_builder.audio(48_000, 2).audio_codec(config.audio_codec);
}
let mut encoder = enc_builder.build().map_err(PipelineError::Encode)?;
if let Some(mut vgraph) = video_graph {
let mut video_idx: u32 = 0;
loop {
#[allow(clippy::cast_precision_loss)]
let pts = Duration::from_secs_f64(f64::from(video_idx) / frame_rate);
vgraph.tick(pts);
match vgraph.pull_video().map_err(PipelineError::Filter)? {
Some(frame) => {
encoder.push_video(&frame).map_err(PipelineError::Encode)?;
video_idx = video_idx.saturating_add(1);
}
None => break,
}
}
}
if let Some(mut agraph) = audio_graph {
let mut audio_pts = Duration::ZERO;
loop {
agraph.tick(audio_pts);
match agraph.pull_audio().map_err(PipelineError::Filter)? {
Some(frame) => {
let chunk_dur = frame.duration();
encoder.push_audio(&frame).map_err(PipelineError::Encode)?;
audio_pts += chunk_dur;
}
None => break,
}
}
}
encoder.finish().map_err(PipelineError::Encode)?;
log::info!(
"timeline render complete output={} video_tracks={nv} audio_tracks={na}",
output.display()
);
Ok(())
}
}
pub struct TimelineBuilder {
canvas_width: Option<u32>,
canvas_height: Option<u32>,
frame_rate: Option<f64>,
video_tracks: Vec<Vec<Clip>>,
audio_tracks: Vec<Vec<Clip>>,
video_animations: HashMap<String, AnimationTrack<f64>>,
audio_animations: HashMap<String, AnimationTrack<f64>>,
}
impl Default for TimelineBuilder {
fn default() -> Self {
Self::new()
}
}
impl TimelineBuilder {
pub fn new() -> Self {
Self {
canvas_width: None,
canvas_height: None,
frame_rate: None,
video_tracks: Vec::new(),
audio_tracks: Vec::new(),
video_animations: HashMap::new(),
audio_animations: HashMap::new(),
}
}
#[must_use]
pub fn canvas(self, width: u32, height: u32) -> Self {
Self {
canvas_width: Some(width),
canvas_height: Some(height),
..self
}
}
#[must_use]
pub fn frame_rate(self, fps: f64) -> Self {
Self {
frame_rate: Some(fps),
..self
}
}
#[must_use]
pub fn video_track(self, clips: Vec<Clip>) -> Self {
let mut video_tracks = self.video_tracks;
video_tracks.push(clips);
Self {
video_tracks,
..self
}
}
#[must_use]
pub fn audio_track(self, clips: Vec<Clip>) -> Self {
let mut audio_tracks = self.audio_tracks;
audio_tracks.push(clips);
Self {
audio_tracks,
..self
}
}
#[must_use]
pub fn video_animation(self, key: impl Into<String>, track: AnimationTrack<f64>) -> Self {
let mut video_animations = self.video_animations;
video_animations.insert(key.into(), track);
Self {
video_animations,
..self
}
}
#[must_use]
pub fn audio_animation(self, key: impl Into<String>, track: AnimationTrack<f64>) -> Self {
let mut audio_animations = self.audio_animations;
audio_animations.insert(key.into(), track);
Self {
audio_animations,
..self
}
}
pub fn build(self) -> Result<Timeline, PipelineError> {
if self.video_tracks.is_empty() && self.audio_tracks.is_empty() {
return Err(PipelineError::NoInput);
}
let (canvas_width, canvas_height, frame_rate) = self.resolve_canvas_and_fps()?;
Ok(Timeline {
canvas_width,
canvas_height,
frame_rate,
video_tracks: self.video_tracks,
audio_tracks: self.audio_tracks,
video_animations: self.video_animations,
audio_animations: self.audio_animations,
})
}
fn resolve_canvas_and_fps(&self) -> Result<(u32, u32, f64), PipelineError> {
let need_probe = self.canvas_width.is_none()
|| self.canvas_height.is_none()
|| self.frame_rate.is_none();
if need_probe && let Some(first_clip) = self.video_tracks.first().and_then(|t| t.first()) {
if !first_clip.source.exists() {
return Err(PipelineError::ClipNotFound {
path: first_clip.source.to_string_lossy().into_owned(),
});
}
let vdec = VideoDecoder::open(&first_clip.source).build()?;
let w = self.canvas_width.unwrap_or_else(|| vdec.width());
let h = self.canvas_height.unwrap_or_else(|| vdec.height());
let fps = self.frame_rate.unwrap_or_else(|| vdec.frame_rate());
return Ok((w, h, fps));
}
Ok((
self.canvas_width.unwrap_or(1920),
self.canvas_height.unwrap_or(1080),
self.frame_rate.unwrap_or(30.0),
))
}
}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
#[test]
fn timeline_builder_should_err_when_no_tracks() {
let result = Timeline::builder().build();
assert!(matches!(result, Err(PipelineError::NoInput)));
}
#[test]
fn timeline_builder_should_succeed_with_video_track() {
let clip = Clip::new("video.mp4");
let timeline = Timeline::builder()
.canvas(1920, 1080)
.frame_rate(30.0)
.video_track(vec![clip])
.build()
.unwrap();
assert_eq!(timeline.canvas_width, 1920);
assert_eq!(timeline.canvas_height, 1080);
assert!((timeline.frame_rate - 30.0).abs() < f64::EPSILON);
assert_eq!(timeline.video_tracks.len(), 1);
assert!(timeline.audio_tracks.is_empty());
}
#[test]
fn timeline_builder_should_store_video_animation_track() {
use ff_filter::{AnimationTrack, Easing, Keyframe};
use std::time::Duration;
let track = AnimationTrack::new()
.push(Keyframe::new(Duration::ZERO, 1.0_f64, Easing::Linear))
.push(Keyframe::new(
Duration::from_secs(2),
0.0_f64,
Easing::Linear,
));
let timeline = Timeline::builder()
.canvas(1920, 1080)
.frame_rate(30.0)
.video_track(vec![Clip::new("video.mp4")])
.video_animation("video_0_opacity", track)
.build()
.unwrap();
assert_eq!(timeline.video_animations.len(), 1);
assert!(timeline.video_animations.contains_key("video_0_opacity"));
}
#[test]
fn timeline_builder_should_store_audio_animation_track() {
use ff_filter::{AnimationTrack, Easing, Keyframe};
use std::time::Duration;
let track = AnimationTrack::new()
.push(Keyframe::new(Duration::ZERO, 0.0_f64, Easing::Linear))
.push(Keyframe::new(
Duration::from_secs(2),
-6.0_f64,
Easing::Linear,
));
let timeline = Timeline::builder()
.canvas(1920, 1080)
.frame_rate(30.0)
.audio_track(vec![Clip::new("audio.mp4")])
.audio_animation("audio_0_volume", track)
.build()
.unwrap();
assert_eq!(timeline.audio_animations.len(), 1);
assert!(timeline.audio_animations.contains_key("audio_0_volume"));
}
}