#![allow(unsafe_code)]
#![allow(unsafe_op_in_unsafe_fn)]
#![allow(clippy::ptr_as_ptr)]
#![allow(clippy::cast_possible_wrap)]
#![allow(clippy::cast_sign_loss)]
#![allow(clippy::cast_possible_truncation)]
#![allow(clippy::cast_lossless)]
#![allow(clippy::too_many_lines)]
#![allow(clippy::borrow_as_ptr)]
#![allow(clippy::ref_as_ptr)]
use std::ffi::CString;
use std::path::Path;
use std::ptr;
use ff_sys::{
AVCodecContext, AVFormatContext, AVFrame, AVPictureType_AV_PICTURE_TYPE_I,
AVPictureType_AV_PICTURE_TYPE_NONE, AVPixelFormat, AVPixelFormat_AV_PIX_FMT_YUV420P,
AVRational, SwrContext, SwsContext, av_frame_alloc, av_frame_free, av_frame_get_buffer,
av_frame_unref, av_opt_set, av_packet_alloc, av_packet_free, av_packet_unref, av_rescale_q,
av_write_trailer, avformat_alloc_output_context2, avformat_free_context, avformat_new_stream,
avformat_write_header,
};
use crate::codec_utils::{ffmpeg_err, ffmpeg_err_msg};
use crate::error::StreamError;
#[allow(clippy::too_many_arguments)]
pub(crate) fn write_hls(
input_path: &str,
output_dir: &str,
segment_duration_secs: f64,
keyframe_interval: u32,
target_bitrate: i64,
target_width: i32,
target_height: i32,
segment_format: crate::hls::HlsSegmentFormat,
) -> Result<(), StreamError> {
std::fs::create_dir_all(output_dir)?;
unsafe {
write_hls_unsafe(
input_path,
output_dir,
segment_duration_secs,
keyframe_interval,
target_bitrate,
target_width,
target_height,
segment_format,
)
}
}
#[allow(clippy::too_many_arguments)]
unsafe fn write_hls_unsafe(
input_path: &str,
output_dir: &str,
segment_duration_secs: f64,
keyframe_interval: u32,
target_bitrate: i64,
target_width: i32,
target_height: i32,
segment_format: crate::hls::HlsSegmentFormat,
) -> Result<(), StreamError> {
ff_sys::ensure_initialized();
let mut input_ctx = ff_sys::avformat::open_input(Path::new(input_path)).map_err(ffmpeg_err)?;
ff_sys::avformat::find_stream_info(input_ctx).map_err(|e| {
ff_sys::avformat::close_input(&mut input_ctx);
ffmpeg_err(e)
})?;
let nb_streams = (*input_ctx).nb_streams as usize;
let mut video_stream_idx: i32 = -1;
let mut audio_stream_idx: i32 = -1;
for i in 0..nb_streams {
let stream = *(*input_ctx).streams.add(i);
let codec_type = (*(*stream).codecpar).codec_type;
if codec_type == ff_sys::AVMediaType_AVMEDIA_TYPE_VIDEO && video_stream_idx < 0 {
video_stream_idx = i as i32;
} else if codec_type == ff_sys::AVMediaType_AVMEDIA_TYPE_AUDIO && audio_stream_idx < 0 {
audio_stream_idx = i as i32;
}
}
if video_stream_idx < 0 {
ff_sys::avformat::close_input(&mut input_ctx);
return Err(StreamError::InvalidConfig {
reason: "input file contains no video stream".into(),
});
}
let video_stream = *(*input_ctx).streams.add(video_stream_idx as usize);
let video_codecpar = (*video_stream).codecpar;
let enc_width = if target_width > 0 {
target_width
} else {
(*video_codecpar).width
};
let enc_height = if target_height > 0 {
target_height
} else {
(*video_codecpar).height
};
let video_fps = detect_fps(video_stream, input_ctx);
let fps_int = video_fps.round().max(1.0) as i32;
let vid_codec_id = (*video_codecpar).codec_id;
let vid_decoder = ff_sys::avcodec::find_decoder(vid_codec_id)
.ok_or_else(|| ffmpeg_err_msg("no video decoder available for input stream"))?;
let mut vid_dec_ctx = ff_sys::avcodec::alloc_context3(vid_decoder).map_err(ffmpeg_err)?;
ff_sys::avcodec::parameters_to_context(vid_dec_ctx, video_codecpar).map_err(|e| {
ff_sys::avcodec::free_context(&mut vid_dec_ctx as *mut *mut _);
ff_sys::avformat::close_input(&mut input_ctx);
ffmpeg_err(e)
})?;
ff_sys::avcodec::open2(vid_dec_ctx, vid_decoder, ptr::null_mut()).map_err(|e| {
ff_sys::avcodec::free_context(&mut vid_dec_ctx as *mut *mut _);
ff_sys::avformat::close_input(&mut input_ctx);
ffmpeg_err(e)
})?;
let mut aud_dec_ctx: *mut AVCodecContext = ptr::null_mut();
let mut aud_sample_rate: i32 = 44100;
let mut aud_nb_channels: i32 = 2;
if audio_stream_idx >= 0 {
let audio_stream = *(*input_ctx).streams.add(audio_stream_idx as usize);
let audio_codecpar = (*audio_stream).codecpar;
let aud_codec_id = (*audio_codecpar).codec_id;
if let Some(aud_decoder) = ff_sys::avcodec::find_decoder(aud_codec_id) {
if let Ok(ctx) = ff_sys::avcodec::alloc_context3(aud_decoder) {
aud_dec_ctx = ctx;
if ff_sys::avcodec::parameters_to_context(aud_dec_ctx, audio_codecpar).is_ok()
&& ff_sys::avcodec::open2(aud_dec_ctx, aud_decoder, ptr::null_mut()).is_ok()
{
aud_sample_rate = (*aud_dec_ctx).sample_rate;
aud_nb_channels = (*aud_dec_ctx).ch_layout.nb_channels;
log::info!(
"hls audio decoder opened sample_rate={aud_sample_rate} \
channels={aud_nb_channels}"
);
} else {
ff_sys::avcodec::free_context(&mut aud_dec_ctx as *mut *mut _);
aud_dec_ctx = ptr::null_mut();
audio_stream_idx = -1;
log::warn!("hls audio decoder open failed, skipping audio");
}
} else {
audio_stream_idx = -1;
log::warn!("hls audio decoder alloc failed, skipping audio");
}
} else {
audio_stream_idx = -1;
log::warn!("hls no audio decoder found, skipping audio");
}
}
let playlist_path = format!("{output_dir}/playlist.m3u8");
let c_playlist = CString::new(playlist_path.as_str())
.map_err(|_| ffmpeg_err_msg("playlist path contains null byte"))?;
let c_hls = c"hls";
let mut out_ctx: *mut AVFormatContext = ptr::null_mut();
let ret = avformat_alloc_output_context2(
&mut out_ctx,
ptr::null_mut(),
c_hls.as_ptr(),
c_playlist.as_ptr(),
);
if ret < 0 || out_ctx.is_null() {
cleanup_decoders(vid_dec_ctx, aud_dec_ctx, &mut input_ctx);
return Err(ffmpeg_err(ret));
}
let seg_time_str = format!("{}", segment_duration_secs as u32);
let use_fmp4 = segment_format == crate::hls::HlsSegmentFormat::Fmp4;
let seg_ext = if use_fmp4 { "m4s" } else { "ts" };
let seg_filename = format!("{output_dir}/segment%03d.{seg_ext}");
if let (Ok(c_seg_time), Ok(c_seg_file)) = (
CString::new(seg_time_str.as_str()),
CString::new(seg_filename.as_str()),
) {
let ret = av_opt_set(
(*out_ctx).priv_data,
c"hls_time".as_ptr(),
c_seg_time.as_ptr(),
0,
);
if ret < 0 {
log::warn!(
"hls_time option not supported, using default \
requested={seg_time_str} error={}",
ff_sys::av_error_string(ret)
);
}
let ret = av_opt_set(
(*out_ctx).priv_data,
c"hls_segment_filename".as_ptr(),
c_seg_file.as_ptr(),
0,
);
if ret < 0 {
log::warn!(
"hls_segment_filename option not supported, using default \
requested={seg_filename} error={}",
ff_sys::av_error_string(ret)
);
}
if use_fmp4 {
let ret = av_opt_set(
(*out_ctx).priv_data,
c"hls_segment_type".as_ptr(),
c"fmp4".as_ptr(),
0,
);
if ret < 0 {
log::warn!(
"hls_segment_type fmp4 option not supported error={}",
ff_sys::av_error_string(ret)
);
}
}
}
let vid_enc_codec = crate::codec_utils::select_h264_encoder("hls").ok_or_else(|| {
cleanup_output_ctx(out_ctx);
cleanup_decoders(vid_dec_ctx, aud_dec_ctx, &mut input_ctx);
ffmpeg_err_msg("no H.264 encoder available (tried h264_nvenc, h264_qsv, h264_amf, h264_videotoolbox, libx264, mpeg4)")
})?;
let mut vid_enc_ctx = ff_sys::avcodec::alloc_context3(vid_enc_codec).map_err(|e| {
cleanup_output_ctx(out_ctx);
cleanup_decoders(vid_dec_ctx, aud_dec_ctx, &mut input_ctx);
ffmpeg_err(e)
})?;
(*vid_enc_ctx).width = enc_width;
(*vid_enc_ctx).height = enc_height;
(*vid_enc_ctx).time_base.num = 1;
(*vid_enc_ctx).time_base.den = fps_int;
(*vid_enc_ctx).framerate.num = fps_int;
(*vid_enc_ctx).framerate.den = 1;
(*vid_enc_ctx).pix_fmt = AVPixelFormat_AV_PIX_FMT_YUV420P;
(*vid_enc_ctx).bit_rate = if target_bitrate > 0 {
target_bitrate
} else {
2_000_000
};
ff_sys::avcodec::open2(vid_enc_ctx, vid_enc_codec, ptr::null_mut()).map_err(|e| {
ff_sys::avcodec::free_context(&mut vid_enc_ctx as *mut *mut _);
cleanup_output_ctx(out_ctx);
cleanup_decoders(vid_dec_ctx, aud_dec_ctx, &mut input_ctx);
ffmpeg_err(e)
})?;
let vid_out_stream = avformat_new_stream(out_ctx, vid_enc_codec);
if vid_out_stream.is_null() {
ff_sys::avcodec::free_context(&mut vid_enc_ctx as *mut *mut _);
cleanup_output_ctx(out_ctx);
cleanup_decoders(vid_dec_ctx, aud_dec_ctx, &mut input_ctx);
return Err(ffmpeg_err_msg("cannot create video output stream"));
}
(*vid_out_stream).time_base = (*vid_enc_ctx).time_base;
let vid_out_stream_idx = ((*out_ctx).nb_streams - 1) as i32;
ff_sys::avcodec::parameters_from_context((*vid_out_stream).codecpar, vid_enc_ctx).map_err(
|e| {
ff_sys::avcodec::free_context(&mut vid_enc_ctx as *mut *mut _);
cleanup_output_ctx(out_ctx);
cleanup_decoders(vid_dec_ctx, aud_dec_ctx, &mut input_ctx);
ffmpeg_err(e)
},
)?;
let mut aud_enc_ctx: *mut AVCodecContext = ptr::null_mut();
let mut aud_out_stream_idx: i32 = -1;
let mut swr_ctx: *mut SwrContext = ptr::null_mut();
if audio_stream_idx >= 0 {
match crate::codec_utils::open_aac_encoder(aud_sample_rate, aud_nb_channels, 192_000, "hls")
{
Ok(ctx) => {
aud_enc_ctx = ctx;
let aud_out_stream = avformat_new_stream(out_ctx, ptr::null());
if aud_out_stream.is_null() {
ff_sys::avcodec::free_context(&mut aud_enc_ctx as *mut *mut _);
log::warn!("hls cannot create audio output stream, skipping audio");
audio_stream_idx = -1;
} else {
(*aud_out_stream).time_base.num = 1;
(*aud_out_stream).time_base.den = aud_sample_rate;
aud_out_stream_idx = ((*out_ctx).nb_streams - 1) as i32;
if ff_sys::avcodec::parameters_from_context(
(*aud_out_stream).codecpar,
aud_enc_ctx,
)
.is_err()
{
log::warn!("hls audio stream codecpar copy failed");
}
let enc_ch_layout = &(*aud_enc_ctx).ch_layout;
let enc_sample_fmt = (*aud_enc_ctx).sample_fmt;
let enc_sample_rate = (*aud_enc_ctx).sample_rate;
let dec_ch_layout = &(*aud_dec_ctx).ch_layout;
let dec_sample_fmt = (*aud_dec_ctx).sample_fmt;
let dec_sample_rate = (*aud_dec_ctx).sample_rate;
if let Ok(ctx) = ff_sys::swresample::alloc_set_opts2(
enc_ch_layout,
enc_sample_fmt,
enc_sample_rate,
dec_ch_layout,
dec_sample_fmt,
dec_sample_rate,
) {
if ff_sys::swresample::init(ctx).is_ok() {
swr_ctx = ctx;
} else {
let mut swr_tmp = ctx;
ff_sys::swresample::free(&mut swr_tmp);
ff_sys::avcodec::free_context(&mut aud_enc_ctx as *mut *mut _);
log::warn!("hls swr init failed, skipping audio");
audio_stream_idx = -1;
}
} else {
ff_sys::avcodec::free_context(&mut aud_enc_ctx as *mut *mut _);
log::warn!("hls swr alloc failed, skipping audio");
audio_stream_idx = -1;
}
}
}
Err(e) => {
log::warn!("hls aac encoder unavailable: {e}, skipping audio");
audio_stream_idx = -1;
}
}
}
let pb = ff_sys::avformat::open_output(
Path::new(&playlist_path),
ff_sys::avformat::avio_flags::WRITE,
)
.map_err(|e| {
cleanup_encoders(vid_enc_ctx, aud_enc_ctx, swr_ctx);
cleanup_output_ctx(out_ctx);
cleanup_decoders(vid_dec_ctx, aud_dec_ctx, &mut input_ctx);
ffmpeg_err(e)
})?;
(*out_ctx).pb = pb;
let ret = avformat_write_header(out_ctx, ptr::null_mut());
if ret < 0 {
ff_sys::avformat::close_output(&mut (*out_ctx).pb);
cleanup_encoders(vid_enc_ctx, aud_enc_ctx, swr_ctx);
cleanup_output_ctx(out_ctx);
cleanup_decoders(vid_dec_ctx, aud_dec_ctx, &mut input_ctx);
return Err(ffmpeg_err(ret));
}
ff_sys::avformat::close_output(&mut (*out_ctx).pb);
log::info!(
"hls output context ready width={enc_width} height={enc_height} fps={video_fps:.1} \
bit_rate={} audio={}",
(*vid_enc_ctx).bit_rate,
audio_stream_idx >= 0,
);
let mut pkt = av_packet_alloc();
if pkt.is_null() {
av_write_trailer(out_ctx);
ff_sys::avformat::close_output(&mut (*out_ctx).pb);
cleanup_encoders(vid_enc_ctx, aud_enc_ctx, swr_ctx);
cleanup_output_ctx(out_ctx);
cleanup_decoders(vid_dec_ctx, aud_dec_ctx, &mut input_ctx);
return Err(ffmpeg_err_msg("cannot allocate packet"));
}
let vid_dec_frame = av_frame_alloc();
let vid_enc_frame = av_frame_alloc();
let aud_dec_frame = av_frame_alloc();
let aud_enc_frame = av_frame_alloc();
if vid_dec_frame.is_null()
|| vid_enc_frame.is_null()
|| aud_dec_frame.is_null()
|| aud_enc_frame.is_null()
{
free_frames(vid_dec_frame, vid_enc_frame, aud_dec_frame, aud_enc_frame);
av_packet_free(&mut pkt);
av_write_trailer(out_ctx);
ff_sys::avformat::close_output(&mut (*out_ctx).pb);
cleanup_encoders(vid_enc_ctx, aud_enc_ctx, swr_ctx);
cleanup_output_ctx(out_ctx);
cleanup_decoders(vid_dec_ctx, aud_dec_ctx, &mut input_ctx);
return Err(ffmpeg_err_msg("cannot allocate frame"));
}
let mut video_frame_count: u64 = 0;
let mut audio_sample_count: i64 = 0;
let mut sws_ctx: *mut SwsContext = ptr::null_mut();
let mut last_src_fmt: Option<AVPixelFormat> = None;
let mut last_src_w: Option<i32> = None;
let mut last_src_h: Option<i32> = None;
let vid_frame_period = AVRational {
num: 1,
den: fps_int,
};
let aud_frame_period = if !aud_enc_ctx.is_null() {
AVRational {
num: (*aud_enc_ctx).frame_size,
den: (*aud_enc_ctx).sample_rate,
}
} else {
AVRational { num: 1, den: 48000 } };
loop {
match ff_sys::avformat::read_frame(input_ctx, pkt) {
Err(e) if e == ff_sys::error_codes::EOF => break,
Err(_e) => {
av_packet_unref(pkt);
continue;
}
Ok(()) => {}
}
let stream_idx = (*pkt).stream_index;
if stream_idx == video_stream_idx {
if ff_sys::avcodec::send_packet(vid_dec_ctx, pkt).is_err() {
av_packet_unref(pkt);
continue;
}
av_packet_unref(pkt);
loop {
match ff_sys::avcodec::receive_frame(vid_dec_ctx, vid_dec_frame) {
Err(e) if e == ff_sys::error_codes::EAGAIN || e == ff_sys::error_codes::EOF => {
break;
}
Err(_) => break,
Ok(()) => {}
}
(*vid_dec_frame).pict_type =
if video_frame_count.is_multiple_of(u64::from(keyframe_interval)) {
AVPictureType_AV_PICTURE_TYPE_I
} else {
AVPictureType_AV_PICTURE_TYPE_NONE
};
let src_fmt = (*vid_dec_frame).format;
let src_w = (*vid_dec_frame).width;
let src_h = (*vid_dec_frame).height;
if last_src_fmt != Some(src_fmt)
|| last_src_w != Some(src_w)
|| last_src_h != Some(src_h)
{
if !sws_ctx.is_null() {
ff_sys::swscale::free_context(sws_ctx);
sws_ctx = ptr::null_mut();
}
if let Ok(ctx) = ff_sys::swscale::get_context(
src_w,
src_h,
src_fmt,
enc_width,
enc_height,
AVPixelFormat_AV_PIX_FMT_YUV420P,
ff_sys::swscale::scale_flags::BILINEAR,
) {
sws_ctx = ctx;
last_src_fmt = Some(src_fmt);
last_src_w = Some(src_w);
last_src_h = Some(src_h);
} else {
av_frame_unref(vid_dec_frame);
continue;
}
}
(*vid_enc_frame).format = AVPixelFormat_AV_PIX_FMT_YUV420P;
(*vid_enc_frame).width = enc_width;
(*vid_enc_frame).height = enc_height;
(*vid_enc_frame).pts = av_rescale_q(
video_frame_count as i64,
AVRational {
num: 1,
den: fps_int,
},
(*vid_enc_ctx).time_base,
);
let buf_ret = av_frame_get_buffer(vid_enc_frame, 0);
if buf_ret < 0 {
av_frame_unref(vid_dec_frame);
continue;
}
let scale_ok = ff_sys::swscale::scale(
sws_ctx,
(*vid_dec_frame).data.as_ptr() as *const *const u8,
(*vid_dec_frame).linesize.as_ptr(),
0,
src_h,
(*vid_enc_frame).data.as_mut_ptr().cast_const(),
(*vid_enc_frame).linesize.as_mut_ptr(),
);
if scale_ok.is_ok()
&& ff_sys::avcodec::send_frame(vid_enc_ctx, vid_enc_frame).is_ok()
{
crate::codec_utils::drain_encoder(
vid_enc_ctx,
out_ctx,
vid_out_stream_idx,
"hls",
vid_frame_period,
);
}
av_frame_unref(vid_enc_frame);
av_frame_unref(vid_dec_frame);
video_frame_count += 1;
}
} else if stream_idx == audio_stream_idx && !aud_dec_ctx.is_null() {
if ff_sys::avcodec::send_packet(aud_dec_ctx, pkt).is_err() {
av_packet_unref(pkt);
continue;
}
av_packet_unref(pkt);
loop {
match ff_sys::avcodec::receive_frame(aud_dec_ctx, aud_dec_frame) {
Err(e) if e == ff_sys::error_codes::EAGAIN || e == ff_sys::error_codes::EOF => {
break;
}
Err(_) => break,
Ok(()) => {}
}
let enc_frame_size = if (*aud_enc_ctx).frame_size > 0 {
(*aud_enc_ctx).frame_size
} else {
(*aud_dec_frame).nb_samples
};
(*aud_enc_frame).format = (*aud_enc_ctx).sample_fmt;
(*aud_enc_frame).sample_rate = (*aud_enc_ctx).sample_rate;
(*aud_enc_frame).nb_samples = enc_frame_size;
let _ = ff_sys::swresample::channel_layout::copy(
&mut (*aud_enc_frame).ch_layout,
&(*aud_enc_ctx).ch_layout,
);
let buf_ret = av_frame_get_buffer(aud_enc_frame, 0);
if buf_ret < 0 {
av_frame_unref(aud_dec_frame);
continue;
}
let in_data = (*aud_dec_frame).data.as_ptr() as *const *const u8;
let in_samples = (*aud_dec_frame).nb_samples;
let samples_out = ff_sys::swresample::convert(
swr_ctx,
(*aud_enc_frame).data.as_mut_ptr(),
enc_frame_size,
in_data,
in_samples,
);
if let Ok(n) = samples_out
&& n > 0
{
(*aud_enc_frame).nb_samples = n;
(*aud_enc_frame).pts = audio_sample_count;
if ff_sys::avcodec::send_frame(aud_enc_ctx, aud_enc_frame).is_ok() {
crate::codec_utils::drain_encoder(
aud_enc_ctx,
out_ctx,
aud_out_stream_idx,
"hls",
aud_frame_period,
);
}
audio_sample_count += i64::from(n);
}
av_frame_unref(aud_enc_frame);
av_frame_unref(aud_dec_frame);
}
} else {
av_packet_unref(pkt);
}
}
let _ = ff_sys::avcodec::send_frame(vid_enc_ctx, ptr::null());
crate::codec_utils::drain_encoder(
vid_enc_ctx,
out_ctx,
vid_out_stream_idx,
"hls",
vid_frame_period,
);
if !aud_enc_ctx.is_null() {
if !swr_ctx.is_null() {
let enc_frame_size = if (*aud_enc_ctx).frame_size > 0 {
(*aud_enc_ctx).frame_size
} else {
1024
};
(*aud_enc_frame).format = (*aud_enc_ctx).sample_fmt;
(*aud_enc_frame).sample_rate = (*aud_enc_ctx).sample_rate;
(*aud_enc_frame).nb_samples = enc_frame_size;
let _ = ff_sys::swresample::channel_layout::copy(
&mut (*aud_enc_frame).ch_layout,
&(*aud_enc_ctx).ch_layout,
);
if av_frame_get_buffer(aud_enc_frame, 0) == 0 {
if let Ok(n) = ff_sys::swresample::convert(
swr_ctx,
(*aud_enc_frame).data.as_mut_ptr(),
enc_frame_size,
ptr::null(),
0,
) && n > 0
{
(*aud_enc_frame).nb_samples = n;
(*aud_enc_frame).pts = audio_sample_count;
if ff_sys::avcodec::send_frame(aud_enc_ctx, aud_enc_frame).is_ok() {
crate::codec_utils::drain_encoder(
aud_enc_ctx,
out_ctx,
aud_out_stream_idx,
"hls",
aud_frame_period,
);
}
}
av_frame_unref(aud_enc_frame);
}
}
let _ = ff_sys::avcodec::send_frame(aud_enc_ctx, ptr::null());
crate::codec_utils::drain_encoder(
aud_enc_ctx,
out_ctx,
aud_out_stream_idx,
"hls",
aud_frame_period,
);
}
av_write_trailer(out_ctx);
free_frames(vid_dec_frame, vid_enc_frame, aud_dec_frame, aud_enc_frame);
av_packet_free(&mut pkt);
if !sws_ctx.is_null() {
ff_sys::swscale::free_context(sws_ctx);
}
cleanup_encoders(vid_enc_ctx, aud_enc_ctx, swr_ctx);
cleanup_output_ctx(out_ctx);
cleanup_decoders(vid_dec_ctx, aud_dec_ctx, &mut input_ctx);
log::info!(
"hls write complete video_frames={video_frame_count} \
audio_samples={audio_sample_count}"
);
Ok(())
}
#[allow(clippy::cast_precision_loss)]
unsafe fn detect_fps(stream: *mut ff_sys::AVStream, fmt_ctx: *mut AVFormatContext) -> f64 {
const MIN_FPS: f64 = 1.0;
const MAX_FPS: f64 = 240.0;
let try_rational = |num: i32, den: i32| -> Option<f64> {
if den <= 0 || num <= 0 {
return None;
}
let fps = num as f64 / den as f64;
if (MIN_FPS..=MAX_FPS).contains(&fps) {
Some(fps)
} else {
None
}
};
let avg = (*stream).avg_frame_rate;
if let Some(fps) = try_rational(avg.num, avg.den) {
return fps;
}
let rfr = (*stream).r_frame_rate;
if let Some(fps) = try_rational(rfr.num, rfr.den) {
return fps;
}
let nb = (*stream).nb_frames;
let dur = (*fmt_ctx).duration; if nb > 0 && dur > 0 {
let fps = nb as f64 / (dur as f64 / 1_000_000.0);
if (MIN_FPS..=MAX_FPS).contains(&fps) {
return fps;
}
}
25.0 }
unsafe fn cleanup_decoders(
mut vid_dec_ctx: *mut AVCodecContext,
mut aud_dec_ctx: *mut AVCodecContext,
input_ctx: *mut *mut AVFormatContext,
) {
if !vid_dec_ctx.is_null() {
ff_sys::avcodec::free_context(&mut vid_dec_ctx as *mut *mut _);
}
if !aud_dec_ctx.is_null() {
ff_sys::avcodec::free_context(&mut aud_dec_ctx as *mut *mut _);
}
ff_sys::avformat::close_input(input_ctx);
}
unsafe fn cleanup_encoders(
mut vid_enc_ctx: *mut AVCodecContext,
mut aud_enc_ctx: *mut AVCodecContext,
mut swr_ctx: *mut SwrContext,
) {
if !vid_enc_ctx.is_null() {
ff_sys::avcodec::free_context(&mut vid_enc_ctx as *mut *mut _);
}
if !aud_enc_ctx.is_null() {
ff_sys::avcodec::free_context(&mut aud_enc_ctx as *mut *mut _);
}
if !swr_ctx.is_null() {
ff_sys::swresample::free(&mut swr_ctx);
}
}
unsafe fn cleanup_output_ctx(mut out_ctx: *mut AVFormatContext) {
if !out_ctx.is_null() {
avformat_free_context(out_ctx);
out_ctx = ptr::null_mut();
let _ = out_ctx; }
}
unsafe fn free_frames(
mut vid_dec: *mut AVFrame,
mut vid_enc: *mut AVFrame,
mut aud_dec: *mut AVFrame,
mut aud_enc: *mut AVFrame,
) {
if !vid_dec.is_null() {
av_frame_free(&mut vid_dec as *mut *mut _);
}
if !vid_enc.is_null() {
av_frame_free(&mut vid_enc as *mut *mut _);
}
if !aud_dec.is_null() {
av_frame_free(&mut aud_dec as *mut *mut _);
}
if !aud_enc.is_null() {
av_frame_free(&mut aud_enc as *mut *mut _);
}
}