mpeg_encoder/
lib.rs

1/*!
2 * MPEG  video encoder.
3 */
4
5#![deny(non_camel_case_types)]
6#![deny(unused_parens)]
7#![deny(non_upper_case_globals)]
8#![deny(unused_qualifications)]
9#![deny(missing_docs)]
10#![deny(unused_results)]
11
12extern crate libc;
13extern crate ffmpeg_sys;
14
15// Inspired by the muxing sample: http://ffmpeg.org/doxygen/trunk/muxing_8c-source.html
16
17use libc::c_void;
18use ffmpeg_sys::{SwsContext, AVCodec, AVCodecContext, AVPacket, AVFormatContext, AVStream,
19                 AVFrame, AVRational, AVPixelFormat, AVPicture, AVCodecID};
20use std::ptr;
21use std::mem;
22use std::iter;
23use std::path::{Path, PathBuf};
24use std::ffi::CString;
25use std::iter::FromIterator;
26use std::sync::{Once, ONCE_INIT};
27
28static mut avformat_init: Once = ONCE_INIT;
29
30/// MPEG video recorder.
31pub struct Encoder {
32    tmp_frame_buf:    Vec<u8>,
33    frame_buf:        Vec<u8>,
34    curr_frame_index: usize,
35    initialized:      bool,
36    bit_rate:         usize,
37    target_width:     usize,
38    target_height:    usize,
39    time_base:        (usize, usize),
40    gop_size:         usize,
41    max_b_frames:     usize,
42    pix_fmt:          AVPixelFormat,
43    tmp_frame:        *mut AVFrame,
44    frame:            *mut AVFrame,
45    context:          *mut AVCodecContext,
46    format_context:   *mut AVFormatContext,
47    video_st:         *mut AVStream,
48    scale_context:    *mut SwsContext,
49    path:             PathBuf
50}
51
52impl Encoder {
53    /// Creates a new video recorder.
54    ///
55    /// # Arguments:
56    /// * `path`   - path to the output file.
57    /// * `width`  - width of the recorded video.
58    /// * `height` - height of the recorded video.
59    pub fn new<P: AsRef<Path>>(path: P, width: usize, height: usize) -> Encoder {
60        Encoder::new_with_params(path, width, height, None, None, None, None, None)
61    }
62
63    /// Creates a new video recorder with custom recording parameters.
64    ///
65    /// # Arguments:
66    /// * `path`         - path to the output file.
67    /// * `width`        - width of the recorded video.
68    /// * `height`       - height of the recorded video.
69    /// * `bit_rate`     - the average bit rate. Default value: 400000.
70    /// * `time_base`    - this is the fundamental unit of time (in seconds) in terms of which
71    ///                    frame timestamps are represented. Default value: (1, 60), i-e, 60fps.
72    /// * `gop_size`     - the number of pictures in a group of pictures. Default value: 10.
73    /// * `max_b_frames` - maximum number of B-frames between non-B-frames. Default value: 1.
74    /// * `pix_fmt`      - pixel format. Default value: `AVPixelFormat::PIX_FMT_YUV420P`.
75    pub fn new_with_params<P: AsRef<Path>>(path:         P,
76                                           width:        usize,
77                                           height:       usize,
78                                           bit_rate:     Option<usize>,
79                                           time_base:    Option<(usize, usize)>,
80                                           gop_size:     Option<usize>,
81                                           max_b_frames: Option<usize>,
82                                           pix_fmt:      Option<AVPixelFormat>)
83                                           -> Encoder {
84        unsafe {
85            avformat_init.call_once(|| {
86                ffmpeg_sys::av_register_all();
87            });
88        }
89
90        let bit_rate     = bit_rate.unwrap_or(400000); // FIXME
91        let time_base    = time_base.unwrap_or((1, 60));
92        let gop_size     = gop_size.unwrap_or(10);
93        let max_b_frames = max_b_frames.unwrap_or(1);
94        let pix_fmt      = pix_fmt.unwrap_or(AVPixelFormat::AV_PIX_FMT_YUV420P);
95        // width and height must be a multiple of two.
96        let width        = if width  % 2 == 0 { width }  else { width + 1 };
97        let height       = if height % 2 == 0 { height } else { height + 1 };
98
99        let mut pathbuf = PathBuf::new();
100        pathbuf.push(path);
101
102        Encoder {
103            initialized:      false,
104            curr_frame_index: 0,
105            bit_rate:         bit_rate,
106            target_width:     width,
107            target_height:    height,
108            time_base:        time_base,
109            gop_size:         gop_size,
110            max_b_frames:     max_b_frames,
111            pix_fmt:          pix_fmt,
112            frame:            ptr::null_mut(),
113            tmp_frame:        ptr::null_mut(),
114            context:          ptr::null_mut(),
115            scale_context:    ptr::null_mut(),
116            format_context:   ptr::null_mut(),
117            video_st:         ptr::null_mut(),
118            path:             pathbuf,
119            frame_buf:        Vec::new(),
120            tmp_frame_buf:    Vec::new()
121        }
122    }
123
124    /// Adds a image with a RGB pixel format to the video.
125    pub fn encode_rgb(&mut self, width: usize, height: usize, data: &[u8], vertical_flip: bool) {
126        assert!(data.len() == width * height * 3);
127        self.encode(width, height, data, false, vertical_flip)
128    }
129
130    /// Adds a image with a RGBA pixel format to the video.
131    pub fn encode_rgba(&mut self, width: usize, height: usize, data: &[u8], vertical_flip: bool) {
132        assert!(data.len() == width * height * 4);
133        self.encode(width, height, data, true, vertical_flip)
134    }
135
136    fn encode(&mut self, width: usize, height: usize, data: &[u8], rgba: bool, vertical_flip: bool) {
137        assert!((rgba && data.len() == width * height * 4) || (!rgba && data.len() == width * height * 3));
138
139        self.init();
140
141        let mut pkt: AVPacket = unsafe { mem::uninitialized() };
142
143        unsafe {
144            ffmpeg_sys::av_init_packet(&mut pkt);
145        }
146
147        pkt.data = ptr::null_mut();  // packet data will be allocated by the encoder
148        pkt.size = 0;
149
150        /*
151         *
152         * Fill the snapshot frame.
153         *
154         */
155        self.tmp_frame_buf.resize(width * height * 3, 0);
156
157        if rgba {
158            for (i, pixel) in data.chunks(4).enumerate() {
159                self.tmp_frame_buf[i * 3 + 0] = pixel[0];
160                self.tmp_frame_buf[i * 3 + 1] = pixel[1];
161                self.tmp_frame_buf[i * 3 + 2] = pixel[2];
162            }
163        }
164        else {
165            self.tmp_frame_buf.clone_from_slice(data);
166        }
167
168        if vertical_flip {
169            vflip(self.tmp_frame_buf.as_mut_slice(), width as usize * 3, height as usize);
170        }
171
172        unsafe {
173            (*self.frame).pts += ffmpeg_sys::av_rescale_q(1, (*self.context).time_base, (*self.video_st).time_base);
174            self.curr_frame_index = self.curr_frame_index + 1;
175        }
176
177        unsafe {
178
179            (*self.tmp_frame).width  = width as i32;
180            (*self.tmp_frame).height = height as i32;
181
182            let _ = ffmpeg_sys::avpicture_fill(self.tmp_frame as *mut AVPicture,
183                                               self.tmp_frame_buf.get(0).unwrap(),
184                                               AVPixelFormat::AV_PIX_FMT_RGB24,
185                                               width as i32,
186                                               height as i32);
187        }
188
189        /*
190         * Convert the snapshot frame to the right format for the destination frame.
191         */
192        unsafe {
193            self.scale_context = ffmpeg_sys::sws_getCachedContext(
194                self.scale_context, width as i32, height as i32, AVPixelFormat::AV_PIX_FMT_RGB24,
195                self.target_width as i32, self.target_height as i32, AVPixelFormat::AV_PIX_FMT_YUV420P,
196                ffmpeg_sys::SWS_BICUBIC as i32, ptr::null_mut(), ptr::null_mut(), ptr::null());
197
198            let _ = ffmpeg_sys::sws_scale(self.scale_context,
199                                          mem::transmute(&(*self.tmp_frame).data[0]), &(*self.tmp_frame).linesize[0],
200                                          0, height as i32,
201                                          mem::transmute(&(*self.frame).data[0]), &mut (*self.frame).linesize[0]);
202        }
203
204
205        // Encode the image.
206
207        let mut got_output = 0;
208        let ret;
209
210        unsafe {
211            ret = ffmpeg_sys::avcodec_encode_video2(self.context, &mut pkt, self.frame, &mut got_output);
212        }
213
214        if ret < 0 {
215            panic!("Error encoding frame.");
216        }
217
218        if got_output != 0 {
219            unsafe {
220                let _ = ffmpeg_sys::av_interleaved_write_frame(self.format_context, &mut pkt);
221                ffmpeg_sys::av_free_packet(&mut pkt);
222            }
223        }
224    }
225
226    /// Initializes the recorder if needed.
227    ///
228    /// This is automatically called when the first snapshot is made. Call this explicitly if you
229    /// do not want the extra time overhead when the first snapshot is made.
230    pub fn init(&mut self) {
231        if self.initialized {
232            return;
233        }
234        
235        let path_str = CString::new(self.path.to_str().unwrap()).unwrap();
236
237        unsafe {
238            // try to guess the container type from the path.
239            let mut fmt = ptr::null_mut();
240            
241
242            let _ = ffmpeg_sys::avformat_alloc_output_context2(&mut fmt, ptr::null_mut(), ptr::null(), path_str.as_ptr());
243
244            if self.format_context.is_null() {
245                // could not guess, default to MPEG
246                let mpeg = CString::new(&b"mpeg"[..]).unwrap();
247                
248                let _ = ffmpeg_sys::avformat_alloc_output_context2(&mut fmt, ptr::null_mut(), mpeg.as_ptr(), path_str.as_ptr());
249            }
250
251            self.format_context = fmt;
252
253            if self.format_context.is_null() {
254                panic!("Unable to create the output context.");
255            }
256
257            let fmt = (*self.format_context).oformat;
258
259            if (*fmt).video_codec == AVCodecID::AV_CODEC_ID_NONE {
260                panic!("The selected output container does not support video encoding.")
261            }
262
263            let codec: *mut AVCodec;
264
265            let ret: i32 = 0;
266
267            codec = ffmpeg_sys::avcodec_find_encoder((*fmt).video_codec);
268
269            if codec.is_null() {
270                panic!("Codec not found.");
271            }
272
273            self.video_st = ffmpeg_sys::avformat_new_stream(self.format_context, codec);
274
275            if self.video_st.is_null() {
276                panic!("Failed to allocate the video stream.");
277            }
278
279            (*self.video_st).id = ((*self.format_context).nb_streams - 1) as i32;
280
281            self.context = (*self.video_st).codec;
282
283            let _ = ffmpeg_sys::avcodec_get_context_defaults3(self.context, codec);
284
285            if self.context.is_null() {
286                panic!("Could not allocate video codec context.");
287            }
288
289            // sws scaling context
290            self.scale_context = ffmpeg_sys::sws_getContext(
291                self.target_width as i32, self.target_height as i32, AVPixelFormat::AV_PIX_FMT_RGB24,
292                self.target_width as i32, self.target_height as i32, self.pix_fmt,
293                ffmpeg_sys::SWS_BICUBIC as i32, ptr::null_mut(), ptr::null_mut(), ptr::null());
294
295            // Put sample parameters.
296            (*self.context).bit_rate = self.bit_rate as i32;
297
298            // Resolution must be a multiple of two.
299            (*self.context).width    = self.target_width  as i32;
300            (*self.context).height   = self.target_height as i32;
301
302            // frames per second.
303            let (tnum, tdenum)           = self.time_base;
304            (*self.context).time_base    = AVRational { num: tnum as i32, den: tdenum as i32 };
305            (*self.video_st).time_base   = (*self.context).time_base;
306            (*self.context).gop_size     = self.gop_size as i32;
307            (*self.context).max_b_frames = self.max_b_frames as i32;
308            (*self.context).pix_fmt      = self.pix_fmt;
309
310            if (*self.context).codec_id == AVCodecID::AV_CODEC_ID_MPEG1VIDEO {
311                // Needed to avoid using macroblocks in which some coeffs overflow.
312                // This does not happen with normal video, it just happens here as
313                // the motion of the chroma plane does not match the luma plane.
314                (*self.context).mb_decision = 2;
315            }
316
317            /*
318            if (*fmt).flags & ffmpeg_sys::AVFMT_GLOBALHEADER != 0 {
319                (*self.context).flags = (*self.context).flags | CODEC_FLAG_GLOBAL_HEADER;
320            }
321            */
322
323            // Open the codec.
324            if ffmpeg_sys::avcodec_open2(self.context, codec, ptr::null_mut()) < 0 {
325                panic!("Could not open the codec.");
326            }
327
328            /*
329             * Init the destination video frame.
330             */
331            self.frame = ffmpeg_sys::avcodec_alloc_frame();
332
333            if self.frame.is_null() {
334                panic!("Could not allocate the video frame.");
335            }
336
337            (*self.frame).format = (*self.context).pix_fmt as i32;
338            (*self.frame).width  = (*self.context).width;
339            (*self.frame).height = (*self.context).height;
340            (*self.frame).pts    = 0;
341
342            // alloc the buffer
343            let nframe_bytes = ffmpeg_sys::avpicture_get_size(self.pix_fmt,
344                                                              self.target_width as i32,
345                                                              self.target_height as i32);
346            
347            let reps = iter::repeat(0u8).take(nframe_bytes as usize);
348            self.frame_buf = Vec::<u8>::from_iter(reps);
349            //self.frame_buf = Vec::from_elem(nframe_bytes as usize, 0u8);
350
351            let _ = ffmpeg_sys::avpicture_fill(self.frame as *mut AVPicture,
352                                               self.frame_buf.get(0).unwrap(),
353                                               self.pix_fmt,
354                                               self.target_width as i32,
355                                               self.target_height as i32);
356
357            /*
358             * Init the temporary video frame.
359             */
360            self.tmp_frame = ffmpeg_sys::avcodec_alloc_frame();
361
362            if self.tmp_frame.is_null() {
363                panic!("Could not allocate the video frame.");
364            }
365
366            (*self.frame).format = (*self.context).pix_fmt as i32;
367            // the rest (width, height, data, linesize) are set at the moment of the snapshot.
368
369            // Open the output file.
370            static AVIO_FLAG_WRITE: i32 = 2; // XXX: this should be defined by the bindings.
371            if ffmpeg_sys::avio_open(&mut (*self.format_context).pb, path_str.as_ptr(), AVIO_FLAG_WRITE) < 0 {
372                panic!("Failed to open the output file.");
373            }
374
375            if ffmpeg_sys::avformat_write_header(self.format_context, ptr::null_mut()) < 0 {
376                panic!("Failed to open the output file.");
377            }
378
379            if ret < 0 {
380                panic!("Could not allocate raw picture buffer");
381            }
382        }
383
384        self.initialized = true;
385    }
386}
387
388impl Drop for Encoder {
389    fn drop(&mut self) {
390        if self.initialized {
391            // Get the delayed frames.
392            let mut pkt: AVPacket = unsafe { mem::uninitialized() };
393            let mut got_output = 1;
394            while got_output != 0 {
395                let ret;
396
397                unsafe {
398                    ffmpeg_sys::av_init_packet(&mut pkt);
399                }
400
401                pkt.data = ptr::null_mut();  // packet data will be allocated by the encoder
402                pkt.size = 0;
403
404                unsafe {
405                    ret = ffmpeg_sys::avcodec_encode_video2(self.context, &mut pkt, ptr::null(), &mut got_output);
406                }
407
408                if ret < 0 {
409                    panic!("Error encoding frame.");
410                }
411
412                if got_output != 0 {
413                    unsafe {
414                        let _ = ffmpeg_sys::av_interleaved_write_frame(self.format_context, &mut pkt);
415                        ffmpeg_sys::av_free_packet(&mut pkt);
416                    }
417                }
418            }
419
420            // Free things and stuffs.
421            unsafe {
422                let _ = ffmpeg_sys::avcodec_close(self.context);
423                ffmpeg_sys::av_free(self.context as *mut c_void);
424                // ffmpeg_sys::av_freep((*self.frame).data[0] as *mut c_void);
425                ffmpeg_sys::avcodec_free_frame(&mut self.frame);
426                ffmpeg_sys::avcodec_free_frame(&mut self.tmp_frame);
427            }
428        }
429    }
430}
431
432fn vflip(vec: &mut [u8], width: usize, height: usize) {
433    for j in 0 .. height / 2 {
434        for i in 0 .. width {
435            vec.swap((height - j - 1) * width + i, j * width + i);
436        }
437    }
438}