#![deny(non_camel_case_types)]
#![deny(unused_parens)]
#![deny(non_upper_case_globals)]
#![deny(unused_qualifications)]
#![deny(missing_docs)]
#![deny(unused_results)]
extern crate libc;
extern crate ffmpeg_sys;
use libc::c_void;
use ffmpeg_sys::{SwsContext, AVCodec, AVCodecContext, AVPacket, AVFormatContext, AVStream,
AVFrame, AVRational, AVPixelFormat, AVPicture, AVCodecID};
use std::ptr;
use std::mem;
use std::iter;
use std::path::{Path, PathBuf};
use std::ffi::CString;
use std::iter::FromIterator;
use std::sync::{Once, ONCE_INIT};
static mut avformat_init: Once = ONCE_INIT;
pub struct Encoder {
tmp_frame_buf: Vec<u8>,
frame_buf: Vec<u8>,
curr_frame_index: usize,
initialized: bool,
bit_rate: usize,
target_width: usize,
target_height: usize,
time_base: (usize, usize),
gop_size: usize,
max_b_frames: usize,
pix_fmt: AVPixelFormat,
tmp_frame: *mut AVFrame,
frame: *mut AVFrame,
context: *mut AVCodecContext,
format_context: *mut AVFormatContext,
video_st: *mut AVStream,
scale_context: *mut SwsContext,
path: PathBuf
}
impl Encoder {
pub fn new<P: AsRef<Path>>(path: P, width: usize, height: usize) -> Encoder {
Encoder::new_with_params(path, width, height, None, None, None, None, None)
}
pub fn new_with_params<P: AsRef<Path>>(path: P,
width: usize,
height: usize,
bit_rate: Option<usize>,
time_base: Option<(usize, usize)>,
gop_size: Option<usize>,
max_b_frames: Option<usize>,
pix_fmt: Option<AVPixelFormat>)
-> Encoder {
unsafe {
avformat_init.call_once(|| {
ffmpeg_sys::av_register_all();
});
}
let bit_rate = bit_rate.unwrap_or(400000); let time_base = time_base.unwrap_or((1, 60));
let gop_size = gop_size.unwrap_or(10);
let max_b_frames = max_b_frames.unwrap_or(1);
let pix_fmt = pix_fmt.unwrap_or(AVPixelFormat::AV_PIX_FMT_YUV420P);
let width = if width % 2 == 0 { width } else { width + 1 };
let height = if height % 2 == 0 { height } else { height + 1 };
let mut pathbuf = PathBuf::new();
pathbuf.push(path);
Encoder {
initialized: false,
curr_frame_index: 0,
bit_rate: bit_rate,
target_width: width,
target_height: height,
time_base: time_base,
gop_size: gop_size,
max_b_frames: max_b_frames,
pix_fmt: pix_fmt,
frame: ptr::null_mut(),
tmp_frame: ptr::null_mut(),
context: ptr::null_mut(),
scale_context: ptr::null_mut(),
format_context: ptr::null_mut(),
video_st: ptr::null_mut(),
path: pathbuf,
frame_buf: Vec::new(),
tmp_frame_buf: Vec::new()
}
}
pub fn encode_rgb(&mut self, width: usize, height: usize, data: &[u8], vertical_flip: bool) {
assert!(data.len() == width * height * 3);
self.encode(width, height, data, false, vertical_flip)
}
pub fn encode_rgba(&mut self, width: usize, height: usize, data: &[u8], vertical_flip: bool) {
assert!(data.len() == width * height * 4);
self.encode(width, height, data, true, vertical_flip)
}
fn encode(&mut self, width: usize, height: usize, data: &[u8], rgba: bool, vertical_flip: bool) {
assert!((rgba && data.len() == width * height * 4) || (!rgba && data.len() == width * height * 3));
self.init();
let mut pkt: AVPacket = unsafe { mem::uninitialized() };
unsafe {
ffmpeg_sys::av_init_packet(&mut pkt);
}
pkt.data = ptr::null_mut(); pkt.size = 0;
self.tmp_frame_buf.resize(width * height * 3, 0);
if rgba {
for (i, pixel) in data.chunks(4).enumerate() {
self.tmp_frame_buf[i * 3 + 0] = pixel[0];
self.tmp_frame_buf[i * 3 + 1] = pixel[1];
self.tmp_frame_buf[i * 3 + 2] = pixel[2];
}
}
else {
self.tmp_frame_buf.clone_from_slice(data);
}
if vertical_flip {
vflip(self.tmp_frame_buf.as_mut_slice(), width as usize * 3, height as usize);
}
unsafe {
(*self.frame).pts += ffmpeg_sys::av_rescale_q(1, (*self.context).time_base, (*self.video_st).time_base);
self.curr_frame_index = self.curr_frame_index + 1;
}
unsafe {
(*self.tmp_frame).width = width as i32;
(*self.tmp_frame).height = height as i32;
let _ = ffmpeg_sys::avpicture_fill(self.tmp_frame as *mut AVPicture,
self.tmp_frame_buf.get(0).unwrap(),
AVPixelFormat::AV_PIX_FMT_RGB24,
width as i32,
height as i32);
}
unsafe {
self.scale_context = ffmpeg_sys::sws_getCachedContext(
self.scale_context, width as i32, height as i32, AVPixelFormat::AV_PIX_FMT_RGB24,
self.target_width as i32, self.target_height as i32, AVPixelFormat::AV_PIX_FMT_YUV420P,
ffmpeg_sys::SWS_BICUBIC as i32, ptr::null_mut(), ptr::null_mut(), ptr::null());
let _ = ffmpeg_sys::sws_scale(self.scale_context,
mem::transmute(&(*self.tmp_frame).data[0]), &(*self.tmp_frame).linesize[0],
0, height as i32,
mem::transmute(&(*self.frame).data[0]), &mut (*self.frame).linesize[0]);
}
let mut got_output = 0;
let ret;
unsafe {
ret = ffmpeg_sys::avcodec_encode_video2(self.context, &mut pkt, self.frame, &mut got_output);
}
if ret < 0 {
panic!("Error encoding frame.");
}
if got_output != 0 {
unsafe {
let _ = ffmpeg_sys::av_interleaved_write_frame(self.format_context, &mut pkt);
ffmpeg_sys::av_free_packet(&mut pkt);
}
}
}
pub fn init(&mut self) {
if self.initialized {
return;
}
let path_str = CString::new(self.path.to_str().unwrap()).unwrap();
unsafe {
let mut fmt = ptr::null_mut();
let _ = ffmpeg_sys::avformat_alloc_output_context2(&mut fmt, ptr::null_mut(), ptr::null(), path_str.as_ptr());
if self.format_context.is_null() {
let mpeg = CString::new(&b"mpeg"[..]).unwrap();
let _ = ffmpeg_sys::avformat_alloc_output_context2(&mut fmt, ptr::null_mut(), mpeg.as_ptr(), path_str.as_ptr());
}
self.format_context = fmt;
if self.format_context.is_null() {
panic!("Unable to create the output context.");
}
let fmt = (*self.format_context).oformat;
if (*fmt).video_codec == AVCodecID::AV_CODEC_ID_NONE {
panic!("The selected output container does not support video encoding.")
}
let codec: *mut AVCodec;
let ret: i32 = 0;
codec = ffmpeg_sys::avcodec_find_encoder((*fmt).video_codec);
if codec.is_null() {
panic!("Codec not found.");
}
self.video_st = ffmpeg_sys::avformat_new_stream(self.format_context, codec);
if self.video_st.is_null() {
panic!("Failed to allocate the video stream.");
}
(*self.video_st).id = ((*self.format_context).nb_streams - 1) as i32;
self.context = (*self.video_st).codec;
let _ = ffmpeg_sys::avcodec_get_context_defaults3(self.context, codec);
if self.context.is_null() {
panic!("Could not allocate video codec context.");
}
self.scale_context = ffmpeg_sys::sws_getContext(
self.target_width as i32, self.target_height as i32, AVPixelFormat::AV_PIX_FMT_RGB24,
self.target_width as i32, self.target_height as i32, self.pix_fmt,
ffmpeg_sys::SWS_BICUBIC as i32, ptr::null_mut(), ptr::null_mut(), ptr::null());
(*self.context).bit_rate = self.bit_rate as i32;
(*self.context).width = self.target_width as i32;
(*self.context).height = self.target_height as i32;
let (tnum, tdenum) = self.time_base;
(*self.context).time_base = AVRational { num: tnum as i32, den: tdenum as i32 };
(*self.video_st).time_base = (*self.context).time_base;
(*self.context).gop_size = self.gop_size as i32;
(*self.context).max_b_frames = self.max_b_frames as i32;
(*self.context).pix_fmt = self.pix_fmt;
if (*self.context).codec_id == AVCodecID::AV_CODEC_ID_MPEG1VIDEO {
(*self.context).mb_decision = 2;
}
if ffmpeg_sys::avcodec_open2(self.context, codec, ptr::null_mut()) < 0 {
panic!("Could not open the codec.");
}
self.frame = ffmpeg_sys::avcodec_alloc_frame();
if self.frame.is_null() {
panic!("Could not allocate the video frame.");
}
(*self.frame).format = (*self.context).pix_fmt as i32;
(*self.frame).width = (*self.context).width;
(*self.frame).height = (*self.context).height;
(*self.frame).pts = 0;
let nframe_bytes = ffmpeg_sys::avpicture_get_size(self.pix_fmt,
self.target_width as i32,
self.target_height as i32);
let reps = iter::repeat(0u8).take(nframe_bytes as usize);
self.frame_buf = Vec::<u8>::from_iter(reps);
let _ = ffmpeg_sys::avpicture_fill(self.frame as *mut AVPicture,
self.frame_buf.get(0).unwrap(),
self.pix_fmt,
self.target_width as i32,
self.target_height as i32);
self.tmp_frame = ffmpeg_sys::avcodec_alloc_frame();
if self.tmp_frame.is_null() {
panic!("Could not allocate the video frame.");
}
(*self.frame).format = (*self.context).pix_fmt as i32;
static AVIO_FLAG_WRITE: i32 = 2; if ffmpeg_sys::avio_open(&mut (*self.format_context).pb, path_str.as_ptr(), AVIO_FLAG_WRITE) < 0 {
panic!("Failed to open the output file.");
}
if ffmpeg_sys::avformat_write_header(self.format_context, ptr::null_mut()) < 0 {
panic!("Failed to open the output file.");
}
if ret < 0 {
panic!("Could not allocate raw picture buffer");
}
}
self.initialized = true;
}
}
impl Drop for Encoder {
fn drop(&mut self) {
if self.initialized {
let mut pkt: AVPacket = unsafe { mem::uninitialized() };
let mut got_output = 1;
while got_output != 0 {
let ret;
unsafe {
ffmpeg_sys::av_init_packet(&mut pkt);
}
pkt.data = ptr::null_mut(); pkt.size = 0;
unsafe {
ret = ffmpeg_sys::avcodec_encode_video2(self.context, &mut pkt, ptr::null(), &mut got_output);
}
if ret < 0 {
panic!("Error encoding frame.");
}
if got_output != 0 {
unsafe {
let _ = ffmpeg_sys::av_interleaved_write_frame(self.format_context, &mut pkt);
ffmpeg_sys::av_free_packet(&mut pkt);
}
}
}
unsafe {
let _ = ffmpeg_sys::avcodec_close(self.context);
ffmpeg_sys::av_free(self.context as *mut c_void);
ffmpeg_sys::avcodec_free_frame(&mut self.frame);
ffmpeg_sys::avcodec_free_frame(&mut self.tmp_frame);
}
}
}
}
fn vflip(vec: &mut [u8], width: usize, height: usize) {
for j in 0 .. height / 2 {
for i in 0 .. width {
vec.swap((height - j - 1) * width + i, j * width + i);
}
}
}