Skip to content
Snippets Groups Projects
avconv.c 180 KiB
Newer Older
  • Learn to ignore specific revisions
  • /*
     * avconv main
     * Copyright (c) 2000-2011 The libav developers.
     *
     * This file is part of Libav.
     *
     * Libav is free software; you can redistribute it and/or
     * modify it under the terms of the GNU Lesser General Public
     * License as published by the Free Software Foundation; either
     * version 2.1 of the License, or (at your option) any later version.
     *
     * Libav is distributed in the hope that it will be useful,
     * but WITHOUT ANY WARRANTY; without even the implied warranty of
     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     * Lesser General Public License for more details.
     *
     * You should have received a copy of the GNU Lesser General Public
     * License along with Libav; if not, write to the Free Software
     * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
     */
    
    #include "config.h"
    #include <ctype.h>
    #include <string.h>
    #include <math.h>
    #include <stdlib.h>
    #include <errno.h>
    #include <signal.h>
    #include <limits.h>
    #include <unistd.h>
    #include "libavformat/avformat.h"
    #include "libavdevice/avdevice.h"
    #include "libswscale/swscale.h"
    
    #include "libavresample/avresample.h"
    
    #include "libavutil/opt.h"
    #include "libavutil/audioconvert.h"
    #include "libavutil/parseutils.h"
    #include "libavutil/samplefmt.h"
    #include "libavutil/colorspace.h"
    #include "libavutil/fifo.h"
    #include "libavutil/intreadwrite.h"
    #include "libavutil/dict.h"
    #include "libavutil/mathematics.h"
    #include "libavutil/pixdesc.h"
    #include "libavutil/avstring.h"
    #include "libavutil/libm.h"
    
    #include "libavutil/imgutils.h"
    
    #include "libavformat/os_support.h"
    
    # include "libavfilter/avfilter.h"
    # include "libavfilter/avfiltergraph.h"
    
    # include "libavfilter/buffersrc.h"
    
    # include "libavfilter/buffersink.h"
    
    # include "libavfilter/vsrc_buffer.h"
    
    #if HAVE_SYS_RESOURCE_H
    #include <sys/types.h>
    #include <sys/time.h>
    #include <sys/resource.h>
    #elif HAVE_GETPROCESSTIMES
    #include <windows.h>
    #endif
    #if HAVE_GETPROCESSMEMORYINFO
    #include <windows.h>
    #include <psapi.h>
    #endif
    
    #if HAVE_SYS_SELECT_H
    #include <sys/select.h>
    #endif
    
    #include <time.h>
    
    #include "cmdutils.h"
    
    #include "libavutil/avassert.h"
    
    
    #define VSYNC_AUTO       -1
    #define VSYNC_PASSTHROUGH 0
    #define VSYNC_CFR         1
    #define VSYNC_VFR         2
    
    
    const char program_name[] = "avconv";
    const int program_birth_year = 2000;
    
    /* select an input stream for an output stream */
    typedef struct StreamMap {
    
        int disabled;           /** 1 is this mapping is disabled by a negative map */
    
        int file_index;
        int stream_index;
        int sync_file_index;
        int sync_stream_index;
    
        char *linklabel;       /** name of an output link, for mapping lavfi outputs */
    
    } StreamMap;
    
    /**
     * select an input file for an output file
     */
    typedef struct MetadataMap {
    
        int  file;      ///< file index
        char type;      ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
        int  index;     ///< stream/chapter/program number
    
    } MetadataMap;
    
    static const OptionDef options[];
    
    static int video_discard = 0;
    
    static int same_quant = 0;
    
    static int do_deinterlace = 0;
    static int intra_dc_precision = 8;
    static int qp_hist = 0;
    
    static int file_overwrite = 0;
    static int do_benchmark = 0;
    static int do_hex_dump = 0;
    static int do_pkt_dump = 0;
    static int do_pass = 0;
    static char *pass_logfilename_prefix = NULL;
    
    static int video_sync_method = VSYNC_AUTO;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
    static int audio_sync_method = 0;
    static float audio_drift_threshold = 0.1;
    static int copy_ts = 0;
    
    static int copy_tb = 1;
    
    static int opt_shortest = 0;
    static char *vstats_filename;
    static FILE *vstats_file;
    
    static int audio_volume = 256;
    
    static int exit_on_error = 0;
    static int using_stdin = 0;
    static int64_t video_size = 0;
    static int64_t audio_size = 0;
    static int64_t extra_size = 0;
    static int nb_frames_dup = 0;
    static int nb_frames_drop = 0;
    static int input_sync;
    
    static float dts_delta_threshold = 10;
    
    
    #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
    
    
    typedef struct InputFilter {
        AVFilterContext    *filter;
        struct InputStream *ist;
        struct FilterGraph *graph;
    } InputFilter;
    
    typedef struct OutputFilter {
        AVFilterContext     *filter;
        struct OutputStream *ost;
        struct FilterGraph  *graph;
    
    
        /* temporary storage until stream maps are processed */
        AVFilterInOut       *out_tmp;
    
    } OutputFilter;
    
    typedef struct FilterGraph {
    
        int            index;
        const char    *graph_desc;
    
    
        AVFilterGraph *graph;
    
        InputFilter   **inputs;
        int          nb_inputs;
        OutputFilter **outputs;
        int         nb_outputs;
    } FilterGraph;
    
    
    typedef struct FrameBuffer {
        uint8_t *base[4];
        uint8_t *data[4];
        int  linesize[4];
    
        int h, w;
        enum PixelFormat pix_fmt;
    
        int refcount;
        struct InputStream *ist;
        struct FrameBuffer *next;
    } FrameBuffer;
    
    
    typedef struct InputStream {
        int file_index;
        AVStream *st;
        int discard;             /* true if stream data should be discarded */
        int decoding_needed;     /* true if the packets must be decoded in 'raw_fifo' */
        AVCodec *dec;
    
        AVFrame *decoded_frame;
    
    
        int64_t       start;     /* time when read started */
    
        /* predicted dts of the next packet read for this stream or (when there are
         * several frames in a packet) of the next frame in current packet */
        int64_t       next_dts;
    
        /* dts of the last packet read for this stream */
        int64_t       last_dts;
    
        PtsCorrectionContext pts_ctx;
        double ts_scale;
        int is_start;            /* is 1 at the start and after a discontinuity */
        int showed_multi_packet_warning;
        AVDictionary *opts;
    
        int resample_height;
        int resample_width;
        int resample_pix_fmt;
    
    
        int      resample_sample_fmt;
        int      resample_sample_rate;
        int      resample_channels;
        uint64_t resample_channel_layout;
    
    
        /* a pool of free buffers for decoded data */
        FrameBuffer *buffer_pool;
    
    
        /* decoded data from this stream goes into all those filters
    
         * currently video and audio only */
    
        InputFilter **filters;
        int        nb_filters;
    
    } InputStream;
    
    typedef struct InputFile {
        AVFormatContext *ctx;
        int eof_reached;      /* true if eof reached */
        int ist_index;        /* index of first stream in ist_table */
        int buffer_size;      /* current total buffer size */
        int64_t ts_offset;
    
        int nb_streams;       /* number of stream that avconv is aware of; may be different
                                 from ctx.nb_streams if new streams appear during av_read_frame() */
    
    } InputFile;
    
    
    typedef struct OutputStream {
        int file_index;          /* file index */
        int index;               /* stream index in the output file */
        int source_index;        /* InputStream index */
        AVStream *st;            /* stream in the output file */
        int encoding_needed;     /* true if encoding needed for this stream */
        int frame_number;
        /* input pts and corresponding output pts
           for A/V sync */
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        // double sync_ipts;        /* dts from the AVPacket of the demuxer in second units */
    
        struct InputStream *sync_ist; /* input stream to sync against */
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        int64_t sync_opts;       /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
    
        /* pts of the first frame encoded for this stream, used for limiting
         * recording time */
        int64_t first_pts;
    
        AVBitStreamFilterContext *bitstream_filters;
        AVCodec *enc;
    
        int64_t max_frames;
    
    
        /* video only */
        AVRational frame_rate;
    
        int top_field_first;
    
    
        float frame_aspect_ratio;
    
        float last_quality;
    
    
        /* forced key frames */
        int64_t *forced_kf_pts;
        int forced_kf_count;
        int forced_kf_index;
    
        FILE *logfile;
    
    
        OutputFilter *filter;
    
        int64_t sws_flags;
        AVDictionary *opts;
        int is_past_recording_time;
        int stream_copy;
        const char *attachment_filename;
        int copy_initial_nonkeyframes;
    
    
        enum PixelFormat pix_fmts[2];
    
    typedef struct OutputFile {
        AVFormatContext *ctx;
        AVDictionary *opts;
    
        int ost_index;       /* index of the first stream in output_streams */
    
        int64_t recording_time; /* desired length of the resulting file in microseconds */
    
        int64_t start_time;     /* start time in microseconds */
    
        uint64_t limit_filesize;
    
    static InputStream **input_streams = NULL;
    static int        nb_input_streams = 0;
    static InputFile   **input_files   = NULL;
    static int        nb_input_files   = 0;
    
    static OutputStream **output_streams = NULL;
    static int         nb_output_streams = 0;
    static OutputFile   **output_files   = NULL;
    static int         nb_output_files   = 0;
    
    static FilterGraph **filtergraphs;
    int               nb_filtergraphs;
    
    
    typedef struct OptionsContext {
    
        /* input/output options */
        int64_t start_time;
    
        const char *format;
    
        SpecifierOpt *codec_names;
        int        nb_codec_names;
    
        SpecifierOpt *audio_channels;
        int        nb_audio_channels;
    
        SpecifierOpt *audio_sample_rate;
        int        nb_audio_sample_rate;
    
        SpecifierOpt *frame_rates;
        int        nb_frame_rates;
    
        SpecifierOpt *frame_sizes;
        int        nb_frame_sizes;
    
        SpecifierOpt *frame_pix_fmts;
        int        nb_frame_pix_fmts;
    
        /* input options */
        int64_t input_ts_offset;
    
        SpecifierOpt *ts_scale;
        int        nb_ts_scale;
    
        SpecifierOpt *dump_attachment;
        int        nb_dump_attachment;
    
        /* output options */
        StreamMap *stream_maps;
        int     nb_stream_maps;
    
        /* first item specifies output metadata, second is input */
        MetadataMap (*meta_data_maps)[2];
        int nb_meta_data_maps;
        int metadata_global_manual;
        int metadata_streams_manual;
        int metadata_chapters_manual;
    
        const char **attachments;
        int       nb_attachments;
    
        int chapters_input_file;
    
    
        uint64_t limit_filesize;
    
        float mux_preload;
        float mux_max_delay;
    
        int video_disable;
        int audio_disable;
        int subtitle_disable;
        int data_disable;
    
    
        /* indexed by output file stream index */
        int   *streamid_map;
        int nb_streamid_map;
    
    
        SpecifierOpt *metadata;
        int        nb_metadata;
    
        SpecifierOpt *max_frames;
        int        nb_max_frames;
    
        SpecifierOpt *bitstream_filters;
        int        nb_bitstream_filters;
    
        SpecifierOpt *codec_tags;
        int        nb_codec_tags;
    
        SpecifierOpt *sample_fmts;
        int        nb_sample_fmts;
    
        SpecifierOpt *qscale;
        int        nb_qscale;
    
        SpecifierOpt *forced_key_frames;
        int        nb_forced_key_frames;
    
        SpecifierOpt *force_fps;
        int        nb_force_fps;
    
        SpecifierOpt *frame_aspect_ratios;
        int        nb_frame_aspect_ratios;
    
        SpecifierOpt *rc_overrides;
        int        nb_rc_overrides;
    
        SpecifierOpt *intra_matrices;
        int        nb_intra_matrices;
        SpecifierOpt *inter_matrices;
        int        nb_inter_matrices;
    
        SpecifierOpt *top_field_first;
        int        nb_top_field_first;
    
        SpecifierOpt *metadata_map;
        int        nb_metadata_map;
    
    Alexandra Khirnova's avatar
    Alexandra Khirnova committed
        SpecifierOpt *presets;
        int        nb_presets;
    
        SpecifierOpt *copy_initial_nonkeyframes;
        int        nb_copy_initial_nonkeyframes;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        SpecifierOpt *filters;
        int        nb_filters;
    
    } OptionsContext;
    
    
    #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
    {\
        int i, ret;\
        for (i = 0; i < o->nb_ ## name; i++) {\
            char *spec = o->name[i].specifier;\
            if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
                outvar = o->name[i].u.type;\
            else if (ret < 0)\
                exit_program(1);\
        }\
    }
    
    
    static void reset_options(OptionsContext *o)
    {
        const OptionDef *po = options;
    
    
        /* all OPT_SPEC and OPT_STRING can be freed in generic way */
        while (po->name) {
            void *dst = (uint8_t*)o + po->u.off;
    
            if (po->flags & OPT_SPEC) {
                SpecifierOpt **so = dst;
                int i, *count = (int*)(so + 1);
                for (i = 0; i < *count; i++) {
                    av_freep(&(*so)[i].specifier);
                    if (po->flags & OPT_STRING)
                        av_freep(&(*so)[i].u.str);
                }
                av_freep(so);
                *count = 0;
            } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
                av_freep(dst);
            po++;
        }
    
    
        for (i = 0; i < o->nb_stream_maps; i++)
            av_freep(&o->stream_maps[i].linklabel);
    
        av_freep(&o->stream_maps);
    
        av_freep(&o->meta_data_maps);
    
        av_freep(&o->streamid_map);
    
    
        memset(o, 0, sizeof(*o));
    
        o->limit_filesize = UINT64_MAX;
    
        o->chapters_input_file = INT_MAX;
    
        uninit_opts();
        init_opts();
    }
    
    
    static int alloc_buffer(InputStream *ist, AVCodecContext *s, FrameBuffer **pbuf)
    
    {
        FrameBuffer  *buf = av_mallocz(sizeof(*buf));
    
        int i, ret;
    
        const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
        int h_chroma_shift, v_chroma_shift;
        int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
        int w = s->width, h = s->height;
    
        if (!buf)
            return AVERROR(ENOMEM);
    
        if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
            w += 2*edge;
            h += 2*edge;
        }
    
        avcodec_align_dimensions(s, &w, &h);
        if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
                                  s->pix_fmt, 32)) < 0) {
            av_freep(&buf);
            return ret;
        }
        /* XXX this shouldn't be needed, but some tests break without this line
         * those decoders are buggy and need to be fixed.
         * the following tests fail:
    
         * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
    
         */
        memset(buf->base[0], 128, ret);
    
        avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
    
        for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
    
            const int h_shift = i==0 ? 0 : h_chroma_shift;
            const int v_shift = i==0 ? 0 : v_chroma_shift;
            if (s->flags & CODEC_FLAG_EMU_EDGE)
                buf->data[i] = buf->base[i];
            else
                buf->data[i] = buf->base[i] +
                               FFALIGN((buf->linesize[i]*edge >> v_shift) +
                                       (pixel_size*edge >> h_shift), 32);
        }
        buf->w       = s->width;
        buf->h       = s->height;
        buf->pix_fmt = s->pix_fmt;
        buf->ist     = ist;
    
        *pbuf = buf;
        return 0;
    }
    
    static void free_buffer_pool(InputStream *ist)
    {
        FrameBuffer *buf = ist->buffer_pool;
        while (buf) {
            ist->buffer_pool = buf->next;
            av_freep(&buf->base[0]);
            av_free(buf);
            buf = ist->buffer_pool;
        }
    }
    
    static void unref_buffer(InputStream *ist, FrameBuffer *buf)
    {
        av_assert0(buf->refcount);
        buf->refcount--;
        if (!buf->refcount) {
            buf->next = ist->buffer_pool;
            ist->buffer_pool = buf;
        }
    }
    
    static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
    {
        InputStream *ist = s->opaque;
        FrameBuffer *buf;
        int ret, i;
    
    
        if (!ist->buffer_pool && (ret = alloc_buffer(ist, s, &ist->buffer_pool)) < 0)
    
            return ret;
    
        buf              = ist->buffer_pool;
        ist->buffer_pool = buf->next;
        buf->next        = NULL;
        if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
            av_freep(&buf->base[0]);
            av_free(buf);
    
            if ((ret = alloc_buffer(ist, s, &buf)) < 0)
    
                return ret;
        }
        buf->refcount++;
    
        frame->opaque        = buf;
        frame->type          = FF_BUFFER_TYPE_USER;
        frame->extended_data = frame->data;
        frame->pkt_pts       = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
    
        frame->width         = buf->w;
        frame->height        = buf->h;
        frame->format        = buf->pix_fmt;
        frame->sample_aspect_ratio = s->sample_aspect_ratio;
    
    
        for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
            frame->base[i]     = buf->base[i];  // XXX h264.c uses base though it shouldn't
            frame->data[i]     = buf->data[i];
            frame->linesize[i] = buf->linesize[i];
        }
    
        return 0;
    }
    
    static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
    {
        InputStream *ist = s->opaque;
        FrameBuffer *buf = frame->opaque;
        int i;
    
        for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
            frame->data[i] = NULL;
    
        unref_buffer(ist, buf);
    }
    
    
    static void filter_release_buffer(AVFilterBuffer *fb)
    {
        FrameBuffer *buf = fb->priv;
        av_free(fb);
        unref_buffer(buf->ist, buf);
    }
    
    
    /**
     * Define a function for building a string containing a list of
     * allowed formats,
     */
    #define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator) \
    static char *choose_ ## var ## s(OutputStream *ost)                             \
    {                                                                               \
        if (ost->st->codec->var != none) {                                          \
            get_name(ost->st->codec->var);                                          \
            return av_strdup(name);                                                 \
        } else if (ost->enc->supported_list) {                                      \
            const type *p;                                                          \
            AVIOContext *s = NULL;                                                  \
            uint8_t *ret;                                                           \
            int len;                                                                \
                                                                                    \
            if (avio_open_dyn_buf(&s) < 0)                                          \
                exit_program(1);                                                    \
                                                                                    \
            for (p = ost->enc->supported_list; *p != none; p++) {                   \
                get_name(*p);                                                       \
                avio_printf(s, "%s" separator, name);                               \
            }                                                                       \
            len = avio_close_dyn_buf(s, &ret);                                      \
            ret[len - 1] = 0;                                                       \
            return ret;                                                             \
        } else                                                                      \
            return NULL;                                                            \
    }
    
    #define GET_PIX_FMT_NAME(pix_fmt)\
        const char *name = av_get_pix_fmt_name(pix_fmt);
    
    DEF_CHOOSE_FORMAT(enum PixelFormat, pix_fmt, pix_fmts, PIX_FMT_NONE,
                      GET_PIX_FMT_NAME, ":")
    
    #define GET_SAMPLE_FMT_NAME(sample_fmt)\
        const char *name = av_get_sample_fmt_name(sample_fmt)
    
    DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
                      AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME, ",")
    
    #define GET_SAMPLE_RATE_NAME(rate)\
        char name[16];\
        snprintf(name, sizeof(name), "%d", rate);
    
    DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
                      GET_SAMPLE_RATE_NAME, ",")
    
    #define GET_CH_LAYOUT_NAME(ch_layout)\
        char name[16];\
        snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
    
    DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
                      GET_CH_LAYOUT_NAME, ",")
    
    static int configure_audio_filters(FilterGraph *fg, AVFilterContext **in_filter,
                                       AVFilterContext **out_filter)
    
        InputStream  *ist = fg->inputs[0]->ist;
        OutputStream *ost = fg->outputs[0]->ost;
        AVCodecContext *codec  = ost->st->codec;
        AVCodecContext *icodec = ist->st->codec;
        char *sample_fmts, *sample_rates, *channel_layouts;
        char args[256];
        int ret;
    
        avfilter_graph_free(&fg->graph);
        if (!(fg->graph = avfilter_graph_alloc()))
            return AVERROR(ENOMEM);
    
        snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:"
                 "channel_layout=0x%"PRIx64, ist->st->time_base.num,
                 ist->st->time_base.den, icodec->sample_rate,
                 av_get_sample_fmt_name(icodec->sample_fmt), icodec->channel_layout);
        ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
                                           avfilter_get_by_name("abuffer"),
                                           "src", args, NULL, fg->graph);
        if (ret < 0)
            return ret;
    
        ret = avfilter_graph_create_filter(&fg->outputs[0]->filter,
                                           avfilter_get_by_name("abuffersink"),
                                           "out", NULL, NULL, fg->graph);
        if (ret < 0)
    
    
        *in_filter  = fg->inputs[0]->filter;
        *out_filter = fg->outputs[0]->filter;
    
        if (codec->channels && !codec->channel_layout)
            codec->channel_layout = av_get_default_channel_layout(codec->channels);
    
        sample_fmts     = choose_sample_fmts(ost);
        sample_rates    = choose_sample_rates(ost);
        channel_layouts = choose_channel_layouts(ost);
        if (sample_fmts || sample_rates || channel_layouts) {
            AVFilterContext *format;
            char args[256];
            int len = 0;
    
            if (sample_fmts)
                len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
                                sample_fmts);
            if (sample_rates)
                len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
                                sample_rates);
            if (channel_layouts)
                len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
                                channel_layouts);
            args[len - 1] = 0;
    
            av_freep(&sample_fmts);
            av_freep(&sample_rates);
            av_freep(&channel_layouts);
    
            ret = avfilter_graph_create_filter(&format,
                                               avfilter_get_by_name("aformat"),
                                               "aformat", args, NULL, fg->graph);
            if (ret < 0)
                return ret;
    
            ret = avfilter_link(format, 0, fg->outputs[0]->filter, 0);
            if (ret < 0)
                return ret;
    
            *out_filter = format;
        }
    
    
        if (audio_sync_method > 0) {
            AVFilterContext *async;
            char args[256];
            int  len = 0;
    
            av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
                   "asyncts audio filter instead.\n");
    
            if (audio_sync_method > 1)
                len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
                                "max_comp=%d:", audio_sync_method);
            snprintf(args + len, sizeof(args) - len, "min_delta=%f",
                     audio_drift_threshold);
    
            ret = avfilter_graph_create_filter(&async,
                                               avfilter_get_by_name("asyncts"),
                                               "async", args, NULL, fg->graph);
            if (ret < 0)
                return ret;
    
            ret = avfilter_link(*in_filter, 0, async, 0);
            if (ret < 0)
                return ret;
    
            *in_filter = async;
        }
    
    
    static int configure_video_filters(FilterGraph *fg, AVFilterContext **in_filter,
                                       AVFilterContext **out_filter)
    
        InputStream  *ist = fg->inputs[0]->ist;
        OutputStream *ost = fg->outputs[0]->ost;
    
        AVFilterContext *filter;
    
        AVCodecContext *codec = ost->st->codec;
    
        AVRational sample_aspect_ratio;
        char args[255];
        int ret;
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        if (ist->st->sample_aspect_ratio.num) {
    
            sample_aspect_ratio = ist->st->sample_aspect_ratio;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        } else
    
            sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
    
        snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
    
                 ist->st->codec->height, ist->st->codec->pix_fmt,
                 ist->st->time_base.num, ist->st->time_base.den,
    
                 sample_aspect_ratio.num, sample_aspect_ratio.den);
    
    
        ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
                                           avfilter_get_by_name("buffer"),
                                           "src", args, NULL, fg->graph);
    
        if (ret < 0)
            return ret;
    
        ret = avfilter_graph_create_filter(&fg->outputs[0]->filter,
                                           avfilter_get_by_name("buffersink"),
                                           "out", NULL, NULL, fg->graph);
    
        if (ret < 0)
            return ret;
    
        *in_filter  = fg->inputs[0]->filter;
        *out_filter = fg->outputs[0]->filter;
    
        if (codec->width || codec->height) {
    
            snprintf(args, 255, "%d:%d:flags=0x%X",
                     codec->width,
                     codec->height,
    
                     (unsigned)ost->sws_flags);
    
            if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
    
                                                    NULL, args, NULL, fg->graph)) < 0)
    
            if ((ret = avfilter_link(*in_filter, 0, filter, 0)) < 0)
    
            *in_filter = filter;
    
        if (ost->frame_rate.num) {
            snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
                     ost->frame_rate.den);
            ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("fps"),
                                               "fps", args, NULL, fg->graph);
            if (ret < 0)
                return ret;
    
            ret = avfilter_link(filter, 0, *out_filter, 0);
            if (ret < 0)
                return ret;
            *out_filter = filter;
        }
    
    
        if ((pix_fmts = choose_pix_fmts(ost))) {
    
            if ((ret = avfilter_graph_create_filter(&filter,
                                                    avfilter_get_by_name("format"),
                                                    "format", pix_fmts, NULL,
                                                    fg->graph)) < 0)
                return ret;
    
            if ((ret = avfilter_link(filter, 0, *out_filter, 0)) < 0)
    
            *out_filter = filter;
    
        snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
    
        fg->graph->scale_sws_opts = av_strdup(args);
    
        return 0;
    }
    
    static int configure_simple_filtergraph(FilterGraph *fg)
    {
        OutputStream *ost = fg->outputs[0]->ost;
        AVFilterContext *in_filter, *out_filter;
        int ret;
    
        avfilter_graph_free(&fg->graph);
        fg->graph = avfilter_graph_alloc();
    
        switch (ost->st->codec->codec_type) {
        case AVMEDIA_TYPE_VIDEO:
            ret = configure_video_filters(fg, &in_filter, &out_filter);
            break;
        case AVMEDIA_TYPE_AUDIO:
            ret = configure_audio_filters(fg, &in_filter, &out_filter);
            break;
        default: av_assert0(0);
        }
        if (ret < 0)
            return ret;
    
    
        if (ost->avfilter) {
    
            AVFilterInOut *outputs = avfilter_inout_alloc();
            AVFilterInOut *inputs  = avfilter_inout_alloc();
    
    
            outputs->name    = av_strdup("in");
    
            outputs->filter_ctx = in_filter;
    
            outputs->pad_idx = 0;
            outputs->next    = NULL;
    
            inputs->name    = av_strdup("out");
    
            inputs->filter_ctx = out_filter;
    
            inputs->pad_idx = 0;
            inputs->next    = NULL;
    
    
            if ((ret = avfilter_graph_parse(fg->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
    
            if ((ret = avfilter_link(in_filter, 0, out_filter, 0)) < 0)
    
        if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
    
        ost->filter = fg->outputs[0];
    
    static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
    {
        FilterGraph *fg = av_mallocz(sizeof(*fg));
    
        if (!fg)
            exit_program(1);
    
        fg->index = nb_filtergraphs;
    
    
        fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
                                 fg->nb_outputs + 1);
        if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
            exit_program(1);
        fg->outputs[0]->ost   = ost;
        fg->outputs[0]->graph = fg;
    
        fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
                                fg->nb_inputs + 1);
        if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
            exit_program(1);
        fg->inputs[0]->ist   = ist;
        fg->inputs[0]->graph = fg;
    
        ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
                                  &ist->nb_filters, ist->nb_filters + 1);
        ist->filters[ist->nb_filters - 1] = fg->inputs[0];
    
        filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
                                  &nb_filtergraphs, nb_filtergraphs + 1);
        filtergraphs[nb_filtergraphs - 1] = fg;
    
        return fg;
    }
    
    
    static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
    {
        InputStream *ist;
        enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type;
        int i;
    
        // TODO: support other filter types
    
        if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
            av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
                   "currently.\n");
    
            exit_program(1);
        }
    
        if (in->name) {
            AVFormatContext *s;
            AVStream       *st = NULL;
            char *p;
            int file_idx = strtol(in->name, &p, 0);
    
    
            if (file_idx < 0 || file_idx >= nb_input_files) {
    
                av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
                       file_idx, fg->graph_desc);
                exit_program(1);
            }
            s = input_files[file_idx]->ctx;
    
            for (i = 0; i < s->nb_streams; i++) {
                if (s->streams[i]->codec->codec_type != type)
                    continue;
                if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
                    st = s->streams[i];
                    break;
                }
            }
            if (!st) {
                av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
                       "matches no streams.\n", p, fg->graph_desc);
                exit_program(1);
            }
            ist = input_streams[input_files[file_idx]->ist_index + st->index];
        } else {
            /* find the first unused stream of corresponding type */
            for (i = 0; i < nb_input_streams; i++) {
                ist = input_streams[i];
                if (ist->st->codec->codec_type == type && ist->discard)
                    break;
            }
            if (i == nb_input_streams) {
                av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
                       "unlabeled input pad %d on filter %s", in->pad_idx,
                       in->filter_ctx->name);
                exit_program(1);
            }
        }
        ist->discard         = 0;
        ist->decoding_needed = 1;
        ist->st->discard = AVDISCARD_NONE;
    
        fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
                                &fg->nb_inputs, fg->nb_inputs + 1);
        if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
            exit_program(1);
        fg->inputs[fg->nb_inputs - 1]->ist   = ist;
        fg->inputs[fg->nb_inputs - 1]->graph = fg;
    
        ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
                                  &ist->nb_filters, ist->nb_filters + 1);
        ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
    }
    
    
    static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
    
        AVCodecContext *codec = ofilter->ost->st->codec;
        AVFilterContext *last_filter = out->filter_ctx;
        int pad_idx = out->pad_idx;
        int ret;
    
    
    
        ret = avfilter_graph_create_filter(&ofilter->filter,
                                           avfilter_get_by_name("buffersink"),
                                           "out", NULL, pix_fmts, fg->graph);
    
        if (ret < 0)
            return ret;
    
        if (codec->width || codec->height) {
            char args[255];
    
            snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
                     codec->width,
                     codec->height,
                     (unsigned)ofilter->ost->sws_flags);
    
            if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
    
                                                    NULL, args, NULL, fg->graph)) < 0)
                return ret;
    
            if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)