#![allow(clippy::unwrap_used)]
mod fixtures;
use std::time::Duration;
use ff_encode::{AudioCodec, AudioEncoder, VideoCodec, VideoEncoder};
use ff_filter::{
AnimatedValue, AudioConcatenator, AudioTrack, MultiTrackAudioMixer, MultiTrackComposer,
VideoConcatenator, VideoLayer,
animation::{AnimationTrack, Easing, Keyframe},
};
use ff_format::{AudioFrame, ChannelLayout, SampleFormat, Timestamp};
use fixtures::{FileGuard, make_source_file, test_output_path, yuv420p_frame};
const CANVAS_W: u32 = 320;
const CANVAS_H: u32 = 180;
const FPS: f64 = 30.0;
const FRAME_COUNT: usize = 10; const SAMPLE_RATE: u32 = 48_000;
#[test]
fn multi_track_composition_should_produce_valid_mp4_output() {
let src1_path = test_output_path("composition_src1.mp4");
let src2_path = test_output_path("composition_src2.mp4");
let src3_path = test_output_path("composition_src3.mp4");
let out_path = test_output_path("composition_out.mp4");
let _g1 = FileGuard::new(src1_path.clone());
let _g2 = FileGuard::new(src2_path.clone());
let _g3 = FileGuard::new(src3_path.clone());
let _gout = FileGuard::new(out_path.clone());
if make_source_file(
&src1_path,
CANVAS_W,
CANVAS_H,
FPS,
FRAME_COUNT,
76,
84,
255,
)
.is_none()
{
return;
}
if make_source_file(&src2_path, 160, 90, FPS, FRAME_COUNT, 149, 43, 21).is_none() {
return;
}
if make_source_file(&src3_path, 80, 46, FPS, FRAME_COUNT, 29, 255, 107).is_none() {
return;
}
let mut composer = match MultiTrackComposer::new(CANVAS_W, CANVAS_H)
.add_layer(VideoLayer {
source: src1_path.clone(),
x: AnimatedValue::Static(0.0),
y: AnimatedValue::Static(0.0),
scale_x: AnimatedValue::Static(1.0),
scale_y: AnimatedValue::Static(1.0),
rotation: AnimatedValue::Static(0.0),
opacity: AnimatedValue::Static(1.0),
z_order: 0,
time_offset: Duration::ZERO,
in_point: None,
out_point: None,
in_transition: None,
})
.add_layer(VideoLayer {
source: src2_path.clone(),
x: AnimatedValue::Static(160.0),
y: AnimatedValue::Static(0.0),
scale_x: AnimatedValue::Static(1.0),
scale_y: AnimatedValue::Static(1.0),
rotation: AnimatedValue::Static(0.0),
opacity: AnimatedValue::Static(1.0),
z_order: 1,
time_offset: Duration::ZERO,
in_point: None,
out_point: None,
in_transition: None,
})
.add_layer(VideoLayer {
source: src3_path.clone(),
x: AnimatedValue::Static(0.0),
y: AnimatedValue::Static(134.0),
scale_x: AnimatedValue::Static(1.0),
scale_y: AnimatedValue::Static(1.0),
rotation: AnimatedValue::Static(0.0),
opacity: AnimatedValue::Static(1.0),
z_order: 2,
time_offset: Duration::ZERO,
in_point: None,
out_point: None,
in_transition: None,
})
.build()
{
Ok(g) => g,
Err(e) => {
println!("Skipping: MultiTrackComposer::build failed: {e}");
return;
}
};
let mut mixer = match MultiTrackAudioMixer::new(SAMPLE_RATE, ChannelLayout::Stereo)
.add_track(AudioTrack {
source: src1_path.clone(),
volume: AnimatedValue::Static(0.0),
pan: AnimatedValue::Static(0.0),
time_offset: Duration::ZERO,
effects: vec![],
sample_rate: SAMPLE_RATE,
channel_layout: ChannelLayout::Stereo,
})
.add_track(AudioTrack {
source: src2_path.clone(),
volume: AnimatedValue::Static(-3.0),
pan: AnimatedValue::Static(0.0),
time_offset: Duration::ZERO,
effects: vec![],
sample_rate: SAMPLE_RATE,
channel_layout: ChannelLayout::Stereo,
})
.build()
{
Ok(g) => g,
Err(e) => {
println!("Skipping: MultiTrackAudioMixer::build failed: {e}");
return;
}
};
let mut encoder = match VideoEncoder::create(&out_path)
.video(CANVAS_W, CANVAS_H, FPS)
.video_codec(VideoCodec::Mpeg4)
.audio(SAMPLE_RATE, 2)
.audio_codec(AudioCodec::Aac)
.audio_bitrate(128_000)
.build()
{
Ok(enc) => enc,
Err(e) => {
println!("Skipping: output encoder build failed: {e}");
return;
}
};
let mut video_frame_count = 0usize;
loop {
match composer.pull_video() {
Ok(Some(frame)) => {
let push_frame;
let to_push = if frame.width() == CANVAS_W && frame.height() == CANVAS_H {
&frame
} else {
push_frame = yuv420p_frame(CANVAS_W, CANVAS_H, 16, 128, 128);
&push_frame
};
if let Err(e) = encoder.push_video(to_push) {
println!("Skipping: push_video to output encoder failed: {e}");
return;
}
video_frame_count += 1;
}
Ok(None) => break,
Err(e) => {
println!("Skipping: pull_video from composer failed: {e}");
return;
}
}
}
loop {
match mixer.pull_audio() {
Ok(Some(audio_frame)) => {
let compat_frame: AudioFrame;
let to_push = if audio_frame.format() == SampleFormat::F32 {
&audio_frame
} else {
compat_frame = match AudioFrame::empty(
audio_frame.samples(),
audio_frame.channels(),
audio_frame.sample_rate(),
SampleFormat::F32,
) {
Ok(f) => f,
Err(e) => {
println!("Skipping: audio frame conversion failed: {e}");
return;
}
};
&compat_frame
};
if let Err(e) = encoder.push_audio(to_push) {
println!("Skipping: push_audio to output encoder failed: {e}");
return;
}
}
Ok(None) => break,
Err(e) => {
println!("Skipping: pull_audio from mixer failed: {e}");
return;
}
}
}
if let Err(e) = encoder.finish() {
println!("Skipping: output encoder finish failed: {e}");
return;
}
let info = match ff_probe::open(&out_path) {
Ok(i) => i,
Err(e) => {
println!("Skipping: ff_probe::open failed: {e}");
return;
}
};
assert_eq!(
info.video_stream_count(),
1,
"output must contain exactly one video stream, found {}",
info.video_stream_count()
);
assert_eq!(
info.audio_stream_count(),
1,
"output must contain exactly one audio stream, found {}",
info.audio_stream_count()
);
assert!(
video_frame_count > 0,
"composer must produce at least one video frame"
);
let video = info.video_stream(0).expect("video stream must be present");
assert_eq!(
video.width(),
CANVAS_W,
"output video width must match canvas"
);
assert_eq!(
video.height(),
CANVAS_H,
"output video height must match canvas"
);
}
#[test]
fn video_concatenator_should_produce_output_longer_than_single_clip() {
const W: u32 = 160;
const H: u32 = 90;
const FPS: f64 = 30.0;
const FRAMES_PER_CLIP: usize = 30;
let src1_path = test_output_path("video_concat_src1.mp4");
let src2_path = test_output_path("video_concat_src2.mp4");
let out_path = test_output_path("video_concat_out.mp4");
let _g1 = FileGuard::new(src1_path.clone());
let _g2 = FileGuard::new(src2_path.clone());
let _gout = FileGuard::new(out_path.clone());
if make_source_file(&src1_path, W, H, FPS, FRAMES_PER_CLIP, 76, 84, 255).is_none() {
return;
}
if make_source_file(&src2_path, W, H, FPS, FRAMES_PER_CLIP, 29, 255, 107).is_none() {
return;
}
let mut graph = match VideoConcatenator::new(vec![&src1_path, &src2_path])
.output_resolution(W, H)
.build()
{
Ok(g) => g,
Err(e) => {
println!("Skipping: VideoConcatenator::build failed: {e}");
return;
}
};
let mut encoder = match VideoEncoder::create(&out_path)
.video(W, H, FPS)
.video_codec(VideoCodec::Mpeg4)
.build()
{
Ok(enc) => enc,
Err(e) => {
println!("Skipping: output encoder build failed: {e}");
return;
}
};
let mut frame_count = 0usize;
loop {
match graph.pull_video() {
Ok(Some(frame)) => {
if let Err(e) = encoder.push_video(&frame) {
println!("Skipping: push_video failed: {e}");
return;
}
frame_count += 1;
}
Ok(None) => break,
Err(e) => {
println!("Skipping: pull_video failed: {e}");
return;
}
}
}
if let Err(e) = encoder.finish() {
println!("Skipping: encoder finish failed: {e}");
return;
}
let info = match ff_probe::open(&out_path) {
Ok(i) => i,
Err(e) => {
println!("Skipping: ff_probe::open failed: {e}");
return;
}
};
assert!(
info.has_video(),
"concatenated output must have a video stream"
);
let duration = info.duration();
assert!(
duration >= Duration::from_millis(1500),
"concatenated output must be ≥ 1.5 s (two 1-second clips), got {duration:?}"
);
assert!(
frame_count > FRAMES_PER_CLIP,
"concatenated output must have more frames than a single clip ({FRAMES_PER_CLIP}), got {frame_count}"
);
}
#[test]
fn audio_concatenator_should_produce_output_longer_than_single_clip() {
const SAMPLE_RATE: u32 = 44_100;
const CHANNELS: u32 = 1; const FRAME_SIZE: usize = 1024;
const FRAMES_PER_CLIP: usize = 44;
let src1_path = test_output_path("audio_concat_src1.m4a");
let src2_path = test_output_path("audio_concat_src2.m4a");
let out_path = test_output_path("audio_concat_out.m4a");
let _g1 = FileGuard::new(src1_path.clone());
let _g2 = FileGuard::new(src2_path.clone());
let _gout = FileGuard::new(out_path.clone());
for src_path in [&src1_path, &src2_path] {
let mut enc = match AudioEncoder::create(src_path)
.audio(SAMPLE_RATE, CHANNELS)
.audio_codec(AudioCodec::Aac)
.build()
{
Ok(e) => e,
Err(e) => {
println!("Skipping: AudioEncoder::build failed: {e}");
return;
}
};
for _ in 0..FRAMES_PER_CLIP {
let frame =
match AudioFrame::empty(FRAME_SIZE, CHANNELS, SAMPLE_RATE, SampleFormat::F32) {
Ok(f) => f,
Err(e) => {
println!("Skipping: AudioFrame::empty failed: {e}");
return;
}
};
if let Err(e) = enc.push(&frame) {
println!("Skipping: source push failed: {e}");
return;
}
}
if let Err(e) = enc.finish() {
println!("Skipping: source encoder finish failed: {e}");
return;
}
}
let mut graph = match AudioConcatenator::new(vec![&src1_path, &src2_path])
.output_format(SAMPLE_RATE, ChannelLayout::Mono)
.build()
{
Ok(g) => g,
Err(e) => {
println!("Skipping: AudioConcatenator::build failed: {e}");
return;
}
};
let mut out_enc = match AudioEncoder::create(&out_path)
.audio(SAMPLE_RATE, CHANNELS)
.audio_codec(AudioCodec::Aac)
.build()
{
Ok(e) => e,
Err(e) => {
println!("Skipping: output encoder build failed: {e}");
return;
}
};
loop {
match graph.pull_audio() {
Ok(Some(frame)) => {
if let Err(e) = out_enc.push(&frame) {
println!("Skipping: push to output encoder failed: {e}");
return;
}
}
Ok(None) => break,
Err(e) => {
println!("Skipping: pull_audio failed: {e}");
return;
}
}
}
if let Err(e) = out_enc.finish() {
println!("Skipping: output encoder finish failed: {e}");
return;
}
let info = match ff_probe::open(&out_path) {
Ok(i) => i,
Err(e) => {
println!("Skipping: ff_probe::open failed: {e}");
return;
}
};
assert!(
info.has_audio(),
"concatenated output must have an audio stream"
);
assert_eq!(
info.video_stream_count(),
0,
"audio-only output must not have a video stream"
);
let duration = info.duration();
assert!(
duration >= Duration::from_millis(1500),
"concatenated output must be ≥ 1.5 s (two ≈1-second clips), got {duration:?}"
);
}
#[test]
#[ignore = "requires FFmpeg filter graph; run with -- --include-ignored"]
fn animated_opacity_fade_should_darken_composite_over_time() {
const W: u32 = 64;
const H: u32 = 64;
const FPS: f64 = 30.0;
const FADE_FRAMES: usize = 30;
let bg_path = test_output_path("opacity_bg_64x64.mp4");
let layer_path = test_output_path("opacity_layer_64x64.mp4");
let _bg_guard = FileGuard::new(bg_path.clone());
let _layer_guard = FileGuard::new(layer_path.clone());
if make_source_file(&bg_path, W, H, FPS, FADE_FRAMES, 16, 128, 128).is_none() {
return;
}
if make_source_file(&layer_path, W, H, FPS, FADE_FRAMES, 235, 128, 128).is_none() {
return;
}
let end_pts = Duration::from_secs_f64((FADE_FRAMES as f64 - 1.0) / FPS);
let opacity_track = AnimationTrack::new()
.push(Keyframe::new(Duration::ZERO, 1.0_f64, Easing::Linear))
.push(Keyframe::new(end_pts, 0.0_f64, Easing::Linear));
let mut composer = match MultiTrackComposer::new(W, H)
.add_layer(VideoLayer {
source: bg_path.clone(),
x: AnimatedValue::Static(0.0),
y: AnimatedValue::Static(0.0),
scale_x: AnimatedValue::Static(1.0),
scale_y: AnimatedValue::Static(1.0),
rotation: AnimatedValue::Static(0.0),
opacity: AnimatedValue::Static(1.0),
z_order: 0,
time_offset: Duration::ZERO,
in_point: None,
out_point: None,
in_transition: None,
})
.add_layer(VideoLayer {
source: layer_path.clone(),
x: AnimatedValue::Static(0.0),
y: AnimatedValue::Static(0.0),
scale_x: AnimatedValue::Static(1.0),
scale_y: AnimatedValue::Static(1.0),
rotation: AnimatedValue::Static(0.0),
opacity: AnimatedValue::Track(opacity_track),
z_order: 1,
time_offset: Duration::ZERO,
in_point: None,
out_point: None,
in_transition: None,
})
.build()
{
Ok(g) => g,
Err(e) => {
println!("Skipping: MultiTrackComposer::build failed: {e}");
return;
}
};
let cx = (W / 2) as usize;
let cy = (H / 2) as usize;
let mut lumas: Vec<f64> = Vec::with_capacity(FADE_FRAMES);
for i in 0..FADE_FRAMES {
let pts = Duration::from_secs_f64(i as f64 / FPS);
composer.tick(pts);
let frame = match composer.pull_video() {
Ok(Some(f)) => f,
Ok(None) => {
println!("Skipping: composer ended early at frame {i}");
return;
}
Err(e) => {
println!("Skipping: pull_video failed at frame {i}: {e}");
return;
}
};
if frame.width() != W || frame.height() != H {
println!(
"Skipping: unexpected frame dimensions {}×{} at frame {i}",
frame.width(),
frame.height()
);
return;
}
let stride = frame.stride(0).unwrap_or(W as usize);
let y_plane = match frame.plane(0) {
Some(p) => p,
None => {
println!("Skipping: Y-plane unavailable at frame {i}");
return;
}
};
lumas.push(y_plane[cy * stride + cx] as f64);
}
let first = lumas[0];
let last = lumas[FADE_FRAMES - 1];
assert!(
first > last + 50.0,
"frame 0 (opacity=1.0) luma={first:.1} must be significantly brighter than \
frame {} (opacity=0.0) luma={last:.1}",
FADE_FRAMES - 1
);
}
fn constant_amplitude_frame(amplitude: f32, samples: usize, sample_rate: u32) -> AudioFrame {
let channels = 2usize;
let bytes_per_sample = 4usize; let v = amplitude.to_le_bytes();
let mut buf = vec![0u8; samples * channels * bytes_per_sample];
for i in 0..samples {
let off = i * channels * bytes_per_sample;
buf[off..off + 4].copy_from_slice(&v); buf[off + 4..off + 8].copy_from_slice(&v); }
AudioFrame::new(
vec![buf],
samples,
2,
sample_rate,
SampleFormat::F32,
Timestamp::default(),
)
.expect("constant_amplitude_frame: AudioFrame::new failed")
}
fn rms_bytes(raw: &[u8]) -> f64 {
if raw.len() < 4 {
return 0.0;
}
let n = raw.len() / 4;
let sum_sq: f64 = (0..n)
.map(|i| {
let bytes = [raw[i * 4], raw[i * 4 + 1], raw[i * 4 + 2], raw[i * 4 + 3]];
let s = f32::from_le_bytes(bytes) as f64;
s * s
})
.sum();
(sum_sq / n as f64).sqrt()
}
#[test]
#[ignore = "requires FFmpeg filter graph; run with -- --include-ignored"]
fn volume_automation_should_increase_audio_amplitude_over_time() {
const SAMPLE_RATE: u32 = 48_000;
const CHANNELS: u32 = 2;
const FRAME_SAMPLES: usize = 1024;
const AUDIO_FRAMES: usize = 60;
let src_path = test_output_path("vol_auto_src.m4a");
let _src_guard = FileGuard::new(src_path.clone());
let mut enc = match AudioEncoder::create(&src_path)
.audio(SAMPLE_RATE, CHANNELS)
.audio_codec(AudioCodec::Aac)
.build()
{
Ok(e) => e,
Err(e) => {
println!("Skipping: AudioEncoder::build failed: {e}");
return;
}
};
for _ in 0..AUDIO_FRAMES {
let frame = constant_amplitude_frame(0.5, FRAME_SAMPLES, SAMPLE_RATE);
if let Err(e) = enc.push(&frame) {
println!("Skipping: source push failed: {e}");
return;
}
}
if let Err(e) = enc.finish() {
println!("Skipping: source encoder finish failed: {e}");
return;
}
let total_duration =
Duration::from_secs_f64(AUDIO_FRAMES as f64 * FRAME_SAMPLES as f64 / SAMPLE_RATE as f64);
let vol_track = AnimationTrack::new()
.push(Keyframe::new(Duration::ZERO, -60.0_f64, Easing::Linear))
.push(Keyframe::new(total_duration, 0.0_f64, Easing::Linear));
let mut mixer = match MultiTrackAudioMixer::new(SAMPLE_RATE, ChannelLayout::Stereo)
.add_track(AudioTrack {
source: src_path.clone(),
volume: AnimatedValue::Track(vol_track),
pan: AnimatedValue::Static(0.0),
time_offset: Duration::ZERO,
effects: vec![],
sample_rate: SAMPLE_RATE,
channel_layout: ChannelLayout::Stereo,
})
.build()
{
Ok(g) => g,
Err(e) => {
println!("Skipping: MultiTrackAudioMixer::build failed: {e}");
return;
}
};
let mut pulled: Vec<f64> = Vec::new(); let mut chunk_pts = Duration::ZERO;
let chunk_duration = Duration::from_secs_f64(FRAME_SAMPLES as f64 / SAMPLE_RATE as f64);
loop {
mixer.tick(chunk_pts);
match mixer.pull_audio() {
Ok(Some(frame)) => {
let rms = match frame.plane(0) {
Some(raw) => rms_bytes(raw),
None => 0.0,
};
pulled.push(rms);
chunk_pts += chunk_duration;
}
Ok(None) => break,
Err(e) => {
println!("Skipping: pull_audio failed: {e}");
return;
}
}
}
if pulled.len() < 10 {
println!("Skipping: too few audio chunks pulled ({})", pulled.len());
return;
}
let n = pulled.len();
let window = (n / 5).max(1);
let early_rms: f64 = pulled[..window].iter().sum::<f64>() / window as f64;
let late_rms: f64 = pulled[n - window..].iter().sum::<f64>() / window as f64;
assert!(
late_rms > early_rms * 10.0,
"late-frame RMS ({late_rms:.6}) must be > 10× early-frame RMS ({early_rms:.6}) \
— volume automation did not take effect (total chunks={n})"
);
}
#[test]
fn multi_track_composition_should_produce_yuv420p_frames() {
use ff_format::PixelFormat;
const W: u32 = 64;
const H: u32 = 64;
const FPS_LOCAL: f64 = 30.0;
const FRAMES: usize = 5;
let src_path = test_output_path("yuv420p_check_src.mp4");
let _g = FileGuard::new(src_path.clone());
if make_source_file(&src_path, W, H, FPS_LOCAL, FRAMES, 128, 128, 128).is_none() {
return;
}
let mut composer = match MultiTrackComposer::new(W, H)
.add_layer(VideoLayer {
source: src_path.clone(),
x: AnimatedValue::Static(0.0),
y: AnimatedValue::Static(0.0),
scale_x: AnimatedValue::Static(1.0),
scale_y: AnimatedValue::Static(1.0),
rotation: AnimatedValue::Static(0.0),
opacity: AnimatedValue::Static(1.0),
z_order: 0,
time_offset: Duration::ZERO,
in_point: None,
out_point: None,
in_transition: None,
})
.build()
{
Ok(g) => g,
Err(e) => {
println!("Skipping: MultiTrackComposer::build failed: {e}");
return;
}
};
let frame = match composer.pull_video() {
Ok(Some(f)) => f,
Ok(None) => {
println!("Skipping: composer produced no frames");
return;
}
Err(e) => {
println!("Skipping: pull_video failed: {e}");
return;
}
};
assert_eq!(
frame.format(),
PixelFormat::Yuv420p,
"composition graph must deliver yuv420p frames; got {:?}",
frame.format()
);
}