Skip to content
Snippets Groups Projects
ffmpeg.c 111 KiB
Newer Older
  • Learn to ignore specific revisions
  • Fabrice Bellard's avatar
    Fabrice Bellard committed
    /*
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     * Copyright (c) 2000-2003 Fabrice Bellard
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     *
    
     * This file is part of FFmpeg.
     *
     * FFmpeg is free software; you can redistribute it and/or
    
     * modify it under the terms of the GNU Lesser General Public
     * License as published by the Free Software Foundation; either
    
     * version 2.1 of the License, or (at your option) any later version.
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     *
    
     * FFmpeg is distributed in the hope that it will be useful,
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     * but WITHOUT ANY WARRANTY; without even the implied warranty of
    
     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     * Lesser General Public License for more details.
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     *
    
     * You should have received a copy of the GNU Lesser General Public
    
     * License along with FFmpeg; if not, write to the Free Software
    
     * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     */
    
    /**
     * @file
     * multimedia converter based on the FFmpeg libraries
     */
    
    
    #include "config.h"
    #include <ctype.h>
    #include <string.h>
    #include <math.h>
    #include <stdlib.h>
    #include <errno.h>
    
    #include <limits.h>
    
    #if HAVE_IO_H
    #include <io.h>
    #endif
    #if HAVE_UNISTD_H
    
    #include "libavformat/avformat.h"
    #include "libavdevice/avdevice.h"
    #include "libswscale/swscale.h"
    
    Clément Bœsch's avatar
    Clément Bœsch committed
    #include "libswresample/swresample.h"
    
    #include "libavutil/opt.h"
    
    #include "libavutil/channel_layout.h"
    
    #include "libavutil/parseutils.h"
    #include "libavutil/samplefmt.h"
    
    #include "libavutil/colorspace.h"
    
    #include "libavutil/fifo.h"
    
    #include "libavutil/intreadwrite.h"
    
    #include "libavutil/dict.h"
    
    #include "libavutil/mathematics.h"
    
    #include "libavutil/avstring.h"
    
    #include "libavutil/libm.h"
    
    #include "libavutil/imgutils.h"
    
    #include "libavutil/timestamp.h"
    
    #include "libavutil/bprint.h"
    
    #include "libavutil/time.h"
    
    #include "libavformat/os_support.h"
    
    #include "libavformat/ffm.h" // not public API
    
    
    # include "libavfilter/avcodec.h"
    
    # include "libavfilter/avfilter.h"
    # include "libavfilter/avfiltergraph.h"
    
    # include "libavfilter/buffersrc.h"
    
    Clément Bœsch's avatar
    Clément Bœsch committed
    # include "libavfilter/buffersink.h"
    
    #if HAVE_SYS_RESOURCE_H
    
    #include <sys/time.h>
    
    #include <sys/resource.h>
    
    #elif HAVE_GETPROCESSTIMES
    
    #include <windows.h>
    #endif
    
    #if HAVE_GETPROCESSMEMORYINFO
    #include <windows.h>
    #include <psapi.h>
    #endif
    
    #if HAVE_SYS_SELECT_H
    
    #include <sys/select.h>
    #endif
    
    
    #if HAVE_TERMIOS_H
    #include <fcntl.h>
    #include <sys/ioctl.h>
    #include <sys/time.h>
    #include <termios.h>
    #elif HAVE_KBHIT
    
    #include <conio.h>
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    #endif
    
    #if HAVE_PTHREADS
    #include <pthread.h>
    #endif
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    #include "ffmpeg.h"
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    #include "cmdutils.h"
    
    
    #include "libavutil/avassert.h"
    
    const char program_name[] = "ffmpeg";
    
    const int program_birth_year = 2000;
    
    static FILE *vstats_file;
    
    static void do_video_stats(OutputStream *ost, int frame_size);
    
    static int64_t getutime(void);
    
    static int run_as_daemon  = 0;
    
    static int64_t video_size = 0;
    static int64_t audio_size = 0;
    
    static int64_t subtitle_size = 0;
    
    static int64_t extra_size = 0;
    
    static int nb_frames_dup = 0;
    static int nb_frames_drop = 0;
    
    static int current_time;
    
    AVIOContext *progress_avio = NULL;
    
    static uint8_t *subtitle_out;
    
    
    #if HAVE_PTHREADS
    
    /* signal to input threads that they should exit; set by the main thread */
    static int transcoding_finished;
    #endif
    
    
    Stefano Sabatini's avatar
    Stefano Sabatini committed
    #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    InputStream **input_streams = NULL;
    int        nb_input_streams = 0;
    InputFile   **input_files   = NULL;
    int        nb_input_files   = 0;
    
    OutputStream **output_streams = NULL;
    int         nb_output_streams = 0;
    OutputFile   **output_files   = NULL;
    int         nb_output_files   = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    FilterGraph **filtergraphs;
    int        nb_filtergraphs;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    #if HAVE_TERMIOS_H
    
    /* init terminal so that we can grab keys */
    static struct termios oldtty;
    
    static int restore_tty;
    
    /* sub2video hack:
       Convert subtitles to video with alpha to insert them in filter graphs.
       This is a temporary solution until libavfilter gets real subtitles support.
     */
    
    
    
    static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
                                    AVSubtitleRect *r)
    {
        uint32_t *pal, *dst2;
        uint8_t *src, *src2;
        int x, y;
    
        if (r->type != SUBTITLE_BITMAP) {
            av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
            return;
        }
        if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
            av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
            return;
        }
    
        dst += r->y * dst_linesize + r->x * 4;
        src = r->pict.data[0];
        pal = (uint32_t *)r->pict.data[1];
        for (y = 0; y < r->h; y++) {
            dst2 = (uint32_t *)dst;
            src2 = src;
            for (x = 0; x < r->w; x++)
                *(dst2++) = pal[*(src2++)];
            dst += dst_linesize;
            src += r->pict.linesize[0];
        }
    }
    
    static void sub2video_push_ref(InputStream *ist, int64_t pts)
    {
        AVFilterBufferRef *ref = ist->sub2video.ref;
        int i;
    
        ist->sub2video.last_pts = ref->pts = pts;
        for (i = 0; i < ist->nb_filters; i++)
            av_buffersrc_add_ref(ist->filters[i]->filter,
                                 avfilter_ref_buffer(ref, ~0),
                                 AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
    
                                 AV_BUFFERSRC_FLAG_NO_COPY |
                                 AV_BUFFERSRC_FLAG_PUSH);
    
    static void sub2video_update(InputStream *ist, AVSubtitle *sub)
    
    {
        int w = ist->sub2video.w, h = ist->sub2video.h;
        AVFilterBufferRef *ref = ist->sub2video.ref;
        int8_t *dst;
        int     dst_linesize;
        int i;
    
        int64_t pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ist->st->time_base);
    
    
        if (!ref)
            return;
        dst          = ref->data    [0];
        dst_linesize = ref->linesize[0];
        memset(dst, 0, h * dst_linesize);
        for (i = 0; i < sub->num_rects; i++)
            sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
        sub2video_push_ref(ist, pts);
    }
    
    static void sub2video_heartbeat(InputStream *ist, int64_t pts)
    {
        InputFile *infile = input_files[ist->file_index];
        int i, j, nb_reqs;
        int64_t pts2;
    
        /* When a frame is read from a file, examine all sub2video streams in
           the same file and send the sub2video frame again. Otherwise, decoded
           video frames could be accumulating in the filter graph while a filter
           (possibly overlay) is desperately waiting for a subtitle frame. */
        for (i = 0; i < infile->nb_streams; i++) {
            InputStream *ist2 = input_streams[infile->ist_index + i];
            if (!ist2->sub2video.ref)
                continue;
            /* subtitles seem to be usually muxed ahead of other streams;
               if not, substracting a larger time here is necessary */
            pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
            /* do not send the heartbeat frame if the subtitle is already ahead */
            if (pts2 <= ist2->sub2video.last_pts)
                continue;
            for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
                nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
            if (nb_reqs)
                sub2video_push_ref(ist2, pts2);
        }
    }
    
    static void sub2video_flush(InputStream *ist)
    {
        int i;
    
        for (i = 0; i < ist->nb_filters; i++)
            av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
    }
    
    /* end of sub2video hack */
    
    
    void term_exit(void)
    
        av_log(NULL, AV_LOG_QUIET, "%s", "");
    #if HAVE_TERMIOS_H
        if(restore_tty)
            tcsetattr (0, TCSANOW, &oldtty);
    #endif
    
    static volatile int received_sigterm = 0;
    
    static volatile int received_nb_signals = 0;
    
    static void
    sigterm_handler(int sig)
    
    {
        received_sigterm = sig;
        received_nb_signals++;
        term_exit();
    
        if(received_nb_signals > 3)
            exit(123);
    
    void term_init(void)
    
    #if HAVE_TERMIOS_H
        if(!run_as_daemon){
            struct termios tty;
            int istty = 1;
    #if HAVE_ISATTY
            istty = isatty(0) && isatty(2);
    #endif
            if (istty && tcgetattr (0, &tty) == 0) {
                oldtty = tty;
                restore_tty = 1;
                atexit(term_exit);
    
                tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
                                 |INLCR|IGNCR|ICRNL|IXON);
                tty.c_oflag |= OPOST;
                tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
                tty.c_cflag &= ~(CSIZE|PARENB);
                tty.c_cflag |= CS8;
                tty.c_cc[VMIN] = 1;
                tty.c_cc[VTIME] = 0;
    
                tcsetattr (0, TCSANOW, &tty);
    
            signal(SIGQUIT, sigterm_handler); /* Quit (POSIX).  */
    
    #endif
        avformat_network_deinit();
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
    
        signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
    #ifdef SIGXCPU
        signal(SIGXCPU, sigterm_handler);
    #endif
    
    /* read a key without blocking */
    static int read_key(void)
    
        unsigned char ch;
    #if HAVE_TERMIOS_H
        int n = 1;
        struct timeval tv;
        fd_set rfds;
    
        FD_ZERO(&rfds);
        FD_SET(0, &rfds);
        tv.tv_sec = 0;
        tv.tv_usec = 0;
        n = select(1, &rfds, NULL, NULL, &tv);
        if (n > 0) {
            n = read(0, &ch, 1);
            if (n == 1)
                return ch;
    
            return n;
        }
    #elif HAVE_KBHIT
    #    if HAVE_PEEKNAMEDPIPE
        static int is_pipe;
        static HANDLE input_handle;
        DWORD dw, nchars;
        if(!input_handle){
            input_handle = GetStdHandle(STD_INPUT_HANDLE);
            is_pipe = !GetConsoleMode(input_handle, &dw);
        }
    
        if (stdin->_cnt > 0) {
            read(0, &ch, 1);
            return ch;
        }
        if (is_pipe) {
            /* When running under a GUI, you will end here. */
            if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL))
                return -1;
            //Read it
            if(nchars != 0) {
                read(0, &ch, 1);
                return ch;
            }else{
                return -1;
    
        }
    #    endif
        if(kbhit())
            return(getch());
    #endif
        return -1;
    
    {
        return received_nb_signals > 1;
    
    const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
    
    static void exit_program(void)
    
        int i, j;
    
        for (i = 0; i < nb_filtergraphs; i++) {
            avfilter_graph_free(&filtergraphs[i]->graph);
    
            for (j = 0; j < filtergraphs[i]->nb_inputs; j++) {
                av_freep(&filtergraphs[i]->inputs[j]->name);
    
                av_freep(&filtergraphs[i]->inputs[j]);
    
            av_freep(&filtergraphs[i]->inputs);
    
            for (j = 0; j < filtergraphs[i]->nb_outputs; j++) {
                av_freep(&filtergraphs[i]->outputs[j]->name);
    
                av_freep(&filtergraphs[i]->outputs[j]);
    
            av_freep(&filtergraphs[i]->outputs);
            av_freep(&filtergraphs[i]);
        }
        av_freep(&filtergraphs);
    
        av_freep(&subtitle_out);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (i = 0; i < nb_output_files; i++) {
    
            AVFormatContext *s = output_files[i]->ctx;
    
            if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
                avio_close(s->pb);
            avformat_free_context(s);
    
            av_dict_free(&output_files[i]->opts);
            av_freep(&output_files[i]);
    
        for (i = 0; i < nb_output_streams; i++) {
    
            AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
    
            while (bsfc) {
                AVBitStreamFilterContext *next = bsfc->next;
                av_bitstream_filter_close(bsfc);
                bsfc = next;
            }
    
            output_streams[i]->bitstream_filters = NULL;
    
            avcodec_free_frame(&output_streams[i]->filtered_frame);
    
            av_freep(&output_streams[i]->forced_keyframes);
    
            av_freep(&output_streams[i]->avfilter);
    
            av_freep(&output_streams[i]->logfile_prefix);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (i = 0; i < nb_input_files; i++) {
    
            avformat_close_input(&input_files[i]->ctx);
            av_freep(&input_files[i]);
    
        for (i = 0; i < nb_input_streams; i++) {
    
            avcodec_free_frame(&input_streams[i]->decoded_frame);
    
            av_dict_free(&input_streams[i]->opts);
    
            free_buffer_pool(&input_streams[i]->buffer_pool);
    
            avfilter_unref_bufferp(&input_streams[i]->sub2video.ref);
    
            av_freep(&input_streams[i]->filters);
    
        if (vstats_file)
            fclose(vstats_file);
        av_free(vstats_filename);
    
        av_freep(&input_streams);
        av_freep(&input_files);
    
        avformat_network_deinit();
    
        if (received_sigterm) {
    
            av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
                   (int) received_sigterm);
    
    void assert_avoptions(AVDictionary *m)
    
        AVDictionaryEntry *t;
        if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
    
            av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
    
    static void abort_codec_experimental(AVCodec *c, int encoder)
    
    static void update_benchmark(const char *fmt, ...)
    
        if (do_benchmark_all) {
            int64_t t = getutime();
            va_list va;
            char buf[1024];
    
            if (fmt) {
                va_start(va, fmt);
                vsnprintf(buf, sizeof(buf), fmt, va);
                va_end(va);
                printf("bench: %8"PRIu64" %s \n", t - current_time, buf);
    
    static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
    
        AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
        AVCodecContext          *avctx = ost->st->codec;
    
        if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
            (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
            pkt->pts = pkt->dts = AV_NOPTS_VALUE;
    
        if ((avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) && pkt->dts != AV_NOPTS_VALUE) {
    
            int64_t max = ost->st->cur_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
            if (ost->st->cur_dts && ost->st->cur_dts != AV_NOPTS_VALUE &&  max > pkt->dts) {
    
                av_log(s, max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG,
    
    rogerdpack's avatar
    rogerdpack committed
                       "st:%d PTS: %"PRId64" DTS: %"PRId64" < %"PRId64" invalid, clipping\n", pkt->stream_index, pkt->pts, pkt->dts, max);
    
                if(pkt->pts >= pkt->dts)
                    pkt->pts = FFMAX(pkt->pts, max);
                pkt->dts = max;
    
        /*
         * Audio encoders may split the packets --  #frames in != #packets out.
         * But there is no reordering, so we can limit the number of output packets
         * by simply dropping them here.
         * Counting encoded video frames needs to be done separately because of
         * reordering, see do_video_out()
         */
        if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
    
            if (ost->frame_number >= ost->max_frames) {
                av_free_packet(pkt);
    
            ost->frame_number++;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        while (bsfc) {
            AVPacket new_pkt = *pkt;
            int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
                                               &new_pkt.data, &new_pkt.size,
                                               pkt->data, pkt->size,
                                               pkt->flags & AV_PKT_FLAG_KEY);
    
            if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
                uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
                if(t) {
                    memcpy(t, new_pkt.data, new_pkt.size);
                    memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
                    new_pkt.data = t;
                    a = 1;
                } else
                    a = AVERROR(ENOMEM);
            }
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            if (a > 0) {
    
                av_free_packet(pkt);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                new_pkt.destruct = av_destruct_packet;
            } else if (a < 0) {
    
                av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
    
                       bsfc->filter->name, pkt->stream_index,
                       avctx->codec ? avctx->codec->name : "copy");
    
                print_error("", a);
                if (exit_on_error)
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            *pkt = new_pkt;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            bsfc = bsfc->next;
    
        pkt->stream_index = ost->index;
    
    
        if (debug_ts) {
            av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
    
                    "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
    
                    av_get_media_type_string(ost->st->codec->codec_type),
                    av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
    
                    av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
                    pkt->size
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        ret = av_interleaved_write_frame(s, pkt);
        if (ret < 0) {
    
            print_error("av_interleaved_write_frame()", ret);
    
    static void close_output_stream(OutputStream *ost)
    {
        OutputFile *of = output_files[ost->file_index];
    
        ost->finished = 1;
        if (of->shortest) {
            int i;
            for (i = 0; i < of->ctx->nb_streams; i++)
                output_streams[of->ost_index + i]->finished = 1;
        }
    }
    
    
    static int check_recording_time(OutputStream *ost)
    
        OutputFile *of = output_files[ost->file_index];
    
        if (of->recording_time != INT64_MAX &&
            av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
                          AV_TIME_BASE_Q) >= 0) {
    
            close_output_stream(ost);
    
    static void do_audio_out(AVFormatContext *s, OutputStream *ost,
                             AVFrame *frame)
    
    {
        AVCodecContext *enc = ost->st->codec;
        AVPacket pkt;
    
        int got_packet = 0;
    
        av_init_packet(&pkt);
        pkt.data = NULL;
        pkt.size = 0;
    
        if (!check_recording_time(ost))
            return;
    
        if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
    
            frame->pts = ost->sync_opts;
    
        ost->sync_opts = frame->pts + frame->nb_samples;
    
        av_assert0(pkt.size || !pkt.data);
        update_benchmark(NULL);
    
        if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
    
            av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
    
        update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
    
        if (got_packet) {
            if (pkt.pts != AV_NOPTS_VALUE)
                pkt.pts      = av_rescale_q(pkt.pts,      enc->time_base, ost->st->time_base);
    
            if (pkt.dts != AV_NOPTS_VALUE)
                pkt.dts      = av_rescale_q(pkt.dts,      enc->time_base, ost->st->time_base);
    
            if (pkt.duration > 0)
                pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
    
            if (debug_ts) {
                av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
                       "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
                       av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
                       av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
            }
    
            audio_size += pkt.size;
    
            write_frame(s, &pkt, ost);
    
            av_free_packet(&pkt);
    
    static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
    
        AVCodecContext *dec;
        AVPicture *picture2;
        AVPicture picture_tmp;
        uint8_t *buf = 0;
    
        dec = ist->st->codec;
    
        /* deinterlace : must be done before any resize */
        if (do_deinterlace) {
            int size;
    
            /* create temporary picture */
            size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            buf  = av_malloc(size);
    
            picture2 = &picture_tmp;
            avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            if (avpicture_deinterlace(picture2, picture,
    
                                     dec->pix_fmt, dec->width, dec->height) < 0) {
                /* if error, do not deinterlace */
    
                av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
    
                av_free(buf);
                buf = NULL;
                picture2 = picture;
            }
        } else {
            picture2 = picture;
    
        if (picture != picture2)
            *picture = *picture2;
        *bufp = buf;
    
    static void do_subtitle_out(AVFormatContext *s,
                                OutputStream *ost,
                                InputStream *ist,
    
        int subtitle_out_max_size = 1024 * 1024;
        int subtitle_out_size, nb, i;
        AVCodecContext *enc;
        AVPacket pkt;
    
        if (sub->pts == AV_NOPTS_VALUE) {
    
            av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
    
        enc = ost->st->codec;
    
        if (!subtitle_out) {
            subtitle_out = av_malloc(subtitle_out_max_size);
    
        /* Note: DVB subtitle need one packet to draw them and one other
           packet to clear them */
        /* XXX: signal it in the codec context ? */
    
        if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
    
        /* shift timestamp to honor -ss and make check_recording_time() work with -t */
    
        pts = sub->pts - output_files[ost->file_index]->start_time;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (i = 0; i < nb; i++) {
    
            ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
    
            if (!check_recording_time(ost))
                return;
    
            // start_display_time is required to be 0
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
            sub->end_display_time  -= sub->start_display_time;
    
            sub->start_display_time = 0;
    
            if (i == 1)
                sub->num_rects = 0;
    
            subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
                                                        subtitle_out_max_size, sub);
            if (subtitle_out_size < 0) {
    
                av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
    
            }
    
            av_init_packet(&pkt);
            pkt.data = subtitle_out;
            pkt.size = subtitle_out_size;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
    
            pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
    
            if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
    
                /* XXX: the pts correction is handled here. Maybe handling
                   it in the codec would be better */
                if (i == 0)
                    pkt.pts += 90 * sub->start_display_time;
                else
                    pkt.pts += 90 * sub->end_display_time;
            }
    
            subtitle_size += pkt.size;
    
            write_frame(s, &pkt, ost);
    
    static void do_video_out(AVFormatContext *s,
                             OutputStream *ost,
    
        int ret, format_video_sync;
        AVPacket pkt;
        AVCodecContext *enc = ost->st->codec;
    
        int nb_frames, i;
        double sync_ipts, delta;
        double duration = 0;
        int frame_size = 0;
        InputStream *ist = NULL;
    
        if (ost->source_index >= 0)
            ist = input_streams[ost->source_index];
    
        if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
            duration = 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base));
    
        sync_ipts = in_picture->pts;
        delta = sync_ipts - ost->sync_opts + duration;
    
        /* by default, we output a single frame */
        nb_frames = 1;
    
        format_video_sync = video_sync_method;
    
        if (format_video_sync == VSYNC_AUTO)
    
            format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : 1;
    
        switch (format_video_sync) {
        case VSYNC_CFR:
            // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
            if (delta < -1.1)
                nb_frames = 0;
            else if (delta > 1.1)
                nb_frames = lrintf(delta);
            break;
        case VSYNC_VFR:
            if (delta <= -0.6)
                nb_frames = 0;
            else if (delta > 0.6)
                ost->sync_opts = lrint(sync_ipts);
            break;
        case VSYNC_DROP:
        case VSYNC_PASSTHROUGH:
            ost->sync_opts = lrint(sync_ipts);
            break;
        default:
            av_assert0(0);
        }
    
        nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
        if (nb_frames == 0) {
    
            nb_frames_drop++;
            av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
    
        } else if (nb_frames > 1) {
            if (nb_frames > dts_error_threshold * 30) {
    
    Clément Bœsch's avatar
    Clément Bœsch committed
                av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
    
                nb_frames_drop++;
                return;
    
            nb_frames_dup += nb_frames - 1;
            av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
    
      /* duplicates frame if needed */
      for (i = 0; i < nb_frames; i++) {
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        av_init_packet(&pkt);
        pkt.data = NULL;
        pkt.size = 0;
    
        in_picture->pts = ost->sync_opts;
    
        if (!check_recording_time(ost))
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            return;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        if (s->oformat->flags & AVFMT_RAWPICTURE &&
    
            enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            /* raw pictures are written as AVPicture structure to
               avoid any copies. We support temporarily the older
               method. */
            enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
            enc->coded_frame->top_field_first  = in_picture->top_field_first;
    
            if (enc->coded_frame->interlaced_frame)
                enc->field_order = enc->coded_frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
            else
                enc->field_order = AV_FIELD_PROGRESSIVE;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            pkt.data   = (uint8_t *)in_picture;
            pkt.size   =  sizeof(AVPicture);
            pkt.pts    = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
            pkt.flags |= AV_PKT_FLAG_KEY;
    
            video_size += pkt.size;
    
            write_frame(s, &pkt, ost);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        } else {
            int got_packet;
            AVFrame big_picture;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            big_picture = *in_picture;
            /* better than nothing: use input picture interlaced
               settings */
            big_picture.interlaced_frame = in_picture->interlaced_frame;
            if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
                if (ost->top_field_first == -1)
                    big_picture.top_field_first = in_picture->top_field_first;
                else
                    big_picture.top_field_first = !!ost->top_field_first;
            }
    
            if (big_picture.interlaced_frame) {
                if (enc->codec->id == AV_CODEC_ID_MJPEG)
                    enc->field_order = big_picture.top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
                else
                    enc->field_order = big_picture.top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
            } else
                enc->field_order = AV_FIELD_PROGRESSIVE;
    
    
            big_picture.quality = ost->st->codec->global_quality;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            if (!enc->me_threshold)
                big_picture.pict_type = 0;
            if (ost->forced_kf_index < ost->forced_kf_count &&
                big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
                big_picture.pict_type = AV_PICTURE_TYPE_I;
                ost->forced_kf_index++;
            }
    
            update_benchmark(NULL);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
    
            update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            if (ret < 0) {
                av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            }
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            if (got_packet) {
    
                if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
                    pkt.pts = ost->sync_opts;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
                if (pkt.dts != AV_NOPTS_VALUE)
                    pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
    
                if (debug_ts) {
                    av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
                        "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
                        av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
                        av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
                }
    
                frame_size = pkt.size;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
                video_size += pkt.size;
    
                write_frame(s, &pkt, ost);
    
                av_free_packet(&pkt);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
                /* if two pass, output log */
                if (ost->logfile && enc->stats_out) {
                    fprintf(ost->logfile, "%s", enc->stats_out);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        ost->sync_opts++;
        /*
         * For video, number of frames in == number of packets out.
         * But there may be reordering, so we can't throw away frames on encoder
         * flush, we need to limit them here, before they go into encoder.
         */
        ost->frame_number++;
    
        if (vstats_filename && frame_size)
    
            do_video_stats(ost, frame_size);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    }
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
    static double psnr(double d)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        return -10.0 * log(d) / log(10.0);
    
    static void do_video_stats(OutputStream *ost, int frame_size)
    
        AVCodecContext *enc;
        int frame_number;
        double ti1, bitrate, avg_bitrate;
    
        /* this is executed just the first time do_video_stats is called */
        if (!vstats_file) {
            vstats_file = fopen(vstats_filename, "w");
            if (!vstats_file) {
                perror("fopen");
    
        enc = ost->st->codec;
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
    
            frame_number = ost->st->nb_frames;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
    
            if (enc->flags&CODEC_FLAG_PSNR)
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
    
            fprintf(vstats_file,"f_size= %6d ", frame_size);
            /* compute pts value */
    
            ti1 = ost->st->pts.val * av_q2d(enc->time_base);
    
            if (ti1 < 0.01)
                ti1 = 0.01;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
    
            avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
            fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                   (double)video_size / 1024, ti1, bitrate, avg_bitrate);
    
            fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
    
     * Get and encode new output from any of the filtergraphs, without causing
     * activity.
     *
     * @return  0 for success, <0 for severe errors
     */
    static int reap_filters(void)
    
    {
        AVFilterBufferRef *picref;
        AVFrame *filtered_frame = NULL;
    
        /* Reap all buffers present in the buffer sinks */
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost = output_streams[i];
            OutputFile    *of = output_files[ost->file_index];
            int ret = 0;
    
            if (!ost->filter)
                continue;