Skip to content
Snippets Groups Projects
ffmpeg.c 159 KiB
Newer Older
  • Learn to ignore specific revisions
  • Fabrice Bellard's avatar
    Fabrice Bellard committed
    /*
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     * Copyright (c) 2000-2003 Fabrice Bellard
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     *
    
     * This file is part of FFmpeg.
     *
     * FFmpeg is free software; you can redistribute it and/or
    
     * modify it under the terms of the GNU Lesser General Public
     * License as published by the Free Software Foundation; either
    
     * version 2.1 of the License, or (at your option) any later version.
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     *
    
     * FFmpeg is distributed in the hope that it will be useful,
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     * but WITHOUT ANY WARRANTY; without even the implied warranty of
    
     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     * Lesser General Public License for more details.
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     *
    
     * You should have received a copy of the GNU Lesser General Public
    
     * License along with FFmpeg; if not, write to the Free Software
    
     * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     */
    
    /**
     * @file
     * multimedia converter based on the FFmpeg libraries
     */
    
    
    #include "config.h"
    #include <ctype.h>
    #include <string.h>
    #include <math.h>
    #include <stdlib.h>
    #include <errno.h>
    
    #include <limits.h>
    
    #if HAVE_IO_H
    #include <io.h>
    #endif
    #if HAVE_UNISTD_H
    
    #include "libavformat/avformat.h"
    #include "libavdevice/avdevice.h"
    
    Clément Bœsch's avatar
    Clément Bœsch committed
    #include "libswresample/swresample.h"
    
    #include "libavutil/opt.h"
    
    #include "libavutil/channel_layout.h"
    
    #include "libavutil/parseutils.h"
    #include "libavutil/samplefmt.h"
    
    #include "libavutil/fifo.h"
    
    #include "libavutil/internal.h"
    
    #include "libavutil/intreadwrite.h"
    
    #include "libavutil/dict.h"
    
    #include "libavutil/mathematics.h"
    
    #include "libavutil/avstring.h"
    
    #include "libavutil/libm.h"
    
    #include "libavutil/imgutils.h"
    
    #include "libavutil/timestamp.h"
    
    #include "libavutil/bprint.h"
    
    #include "libavutil/time.h"
    
    #include "libavutil/threadmessage.h"
    
    #include "libavcodec/mathops.h"
    
    #include "libavformat/os_support.h"
    
    # include "libavfilter/avfilter.h"
    
    # include "libavfilter/buffersrc.h"
    
    Clément Bœsch's avatar
    Clément Bœsch committed
    # include "libavfilter/buffersink.h"
    
    #if HAVE_SYS_RESOURCE_H
    
    #include <sys/time.h>
    
    #include <sys/resource.h>
    
    #elif HAVE_GETPROCESSTIMES
    
    #include <windows.h>
    #endif
    
    #if HAVE_GETPROCESSMEMORYINFO
    #include <windows.h>
    #include <psapi.h>
    #endif
    
    #if HAVE_SETCONSOLECTRLHANDLER
    #include <windows.h>
    #endif
    
    
    #if HAVE_SYS_SELECT_H
    
    #include <sys/select.h>
    #endif
    
    
    #if HAVE_TERMIOS_H
    #include <fcntl.h>
    #include <sys/ioctl.h>
    #include <sys/time.h>
    #include <termios.h>
    #elif HAVE_KBHIT
    
    #include <conio.h>
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    #endif
    
    #if HAVE_PTHREADS
    #include <pthread.h>
    #endif
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    #include "ffmpeg.h"
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    #include "cmdutils.h"
    
    
    #include "libavutil/avassert.h"
    
    const char program_name[] = "ffmpeg";
    
    const int program_birth_year = 2000;
    
    static FILE *vstats_file;
    
    const char *const forced_keyframes_const_names[] = {
        "n",
        "n_forced",
        "prev_forced_n",
        "prev_forced_t",
        "t",
        NULL
    };
    
    
    static void do_video_stats(OutputStream *ost, int frame_size);
    
    static int64_t getutime(void);
    
    static int64_t getmaxrss(void);
    
    static int run_as_daemon  = 0;
    
    static int nb_frames_dup = 0;
    
    static unsigned dup_warning = 1000;
    
    static int nb_frames_drop = 0;
    
    static int64_t decode_error_stat[2];
    
    static int current_time;
    
    AVIOContext *progress_avio = NULL;
    
    static uint8_t *subtitle_out;
    
    
    InputStream **input_streams = NULL;
    int        nb_input_streams = 0;
    InputFile   **input_files   = NULL;
    int        nb_input_files   = 0;
    
    OutputStream **output_streams = NULL;
    int         nb_output_streams = 0;
    OutputFile   **output_files   = NULL;
    int         nb_output_files   = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    FilterGraph **filtergraphs;
    int        nb_filtergraphs;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    #if HAVE_TERMIOS_H
    
    /* init terminal so that we can grab keys */
    static struct termios oldtty;
    
    static int restore_tty;
    
    static void free_input_threads(void);
    
    /* sub2video hack:
       Convert subtitles to video with alpha to insert them in filter graphs.
       This is a temporary solution until libavfilter gets real subtitles support.
     */
    
    
    static int sub2video_get_blank_frame(InputStream *ist)
    {
        int ret;
        AVFrame *frame = ist->sub2video.frame;
    
        av_frame_unref(frame);
    
        ist->sub2video.frame->width  = ist->dec_ctx->width  ? ist->dec_ctx->width  : ist->sub2video.w;
        ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
    
        ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
        if ((ret = av_frame_get_buffer(frame, 32)) < 0)
            return ret;
        memset(frame->data[0], 0, frame->height * frame->linesize[0]);
        return 0;
    }
    
    
    static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
                                    AVSubtitleRect *r)
    {
        uint32_t *pal, *dst2;
        uint8_t *src, *src2;
        int x, y;
    
        if (r->type != SUBTITLE_BITMAP) {
            av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
            return;
        }
        if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
    
            av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
                r->x, r->y, r->w, r->h, w, h
            );
    
            return;
        }
    
        dst += r->y * dst_linesize + r->x * 4;
    
        src = r->data[0];
        pal = (uint32_t *)r->data[1];
    
        for (y = 0; y < r->h; y++) {
            dst2 = (uint32_t *)dst;
            src2 = src;
            for (x = 0; x < r->w; x++)
                *(dst2++) = pal[*(src2++)];
            dst += dst_linesize;
    
            src += r->linesize[0];
    
        }
    }
    
    static void sub2video_push_ref(InputStream *ist, int64_t pts)
    {
    
        AVFrame *frame = ist->sub2video.frame;
    
        av_assert1(frame->data[0]);
        ist->sub2video.last_pts = frame->pts = pts;
    
        for (i = 0; i < ist->nb_filters; i++)
    
            av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
                                         AV_BUFFERSRC_FLAG_KEEP_REF |
                                         AV_BUFFERSRC_FLAG_PUSH);
    
    static void sub2video_update(InputStream *ist, AVSubtitle *sub)
    
        AVFrame *frame = ist->sub2video.frame;
    
        int8_t *dst;
        int     dst_linesize;
    
        int num_rects, i;
        int64_t pts, end_pts;
    
        if (!frame)
    
            pts       = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
    
                                     AV_TIME_BASE_Q, ist->st->time_base);
    
            end_pts   = av_rescale_q(sub->pts + sub->end_display_time   * 1000LL,
    
                                     AV_TIME_BASE_Q, ist->st->time_base);
            num_rects = sub->num_rects;
        } else {
            pts       = ist->sub2video.end_pts;
            end_pts   = INT64_MAX;
            num_rects = 0;
        }
    
        if (sub2video_get_blank_frame(ist) < 0) {
    
                   "Impossible to get a blank canvas.\n");
            return;
        }
        dst          = frame->data    [0];
        dst_linesize = frame->linesize[0];
    
        for (i = 0; i < num_rects; i++)
    
            sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
    
        sub2video_push_ref(ist, pts);
    
        ist->sub2video.end_pts = end_pts;
    
    }
    
    static void sub2video_heartbeat(InputStream *ist, int64_t pts)
    {
        InputFile *infile = input_files[ist->file_index];
        int i, j, nb_reqs;
        int64_t pts2;
    
        /* When a frame is read from a file, examine all sub2video streams in
           the same file and send the sub2video frame again. Otherwise, decoded
           video frames could be accumulating in the filter graph while a filter
           (possibly overlay) is desperately waiting for a subtitle frame. */
        for (i = 0; i < infile->nb_streams; i++) {
            InputStream *ist2 = input_streams[infile->ist_index + i];
    
            if (!ist2->sub2video.frame)
    
                continue;
            /* subtitles seem to be usually muxed ahead of other streams;
    
    Lou Logan's avatar
    Lou Logan committed
               if not, subtracting a larger time here is necessary */
    
            pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
            /* do not send the heartbeat frame if the subtitle is already ahead */
            if (pts2 <= ist2->sub2video.last_pts)
                continue;
    
            if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
    
                sub2video_update(ist2, NULL);
    
            for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
                nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
            if (nb_reqs)
                sub2video_push_ref(ist2, pts2);
        }
    }
    
    static void sub2video_flush(InputStream *ist)
    {
        int i;
    
    
        if (ist->sub2video.end_pts < INT64_MAX)
            sub2video_update(ist, NULL);
    
        for (i = 0; i < ist->nb_filters; i++)
    
            av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
    
    static void term_exit_sigsafe(void)
    
    #if HAVE_TERMIOS_H
        if(restore_tty)
            tcsetattr (0, TCSANOW, &oldtty);
    #endif
    
    void term_exit(void)
    {
        av_log(NULL, AV_LOG_QUIET, "%s", "");
        term_exit_sigsafe();
    }
    
    
    static volatile int received_sigterm = 0;
    
    static volatile int received_nb_signals = 0;
    
    static volatile int transcode_init_done = 0;
    
    static volatile int ffmpeg_exited = 0;
    
    static void
    sigterm_handler(int sig)
    
    {
        received_sigterm = sig;
        received_nb_signals++;
    
            write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
                               strlen("Received > 3 system signals, hard exiting\n"));
    
    
    #if HAVE_SETCONSOLECTRLHANDLER
    static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
    {
        av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
    
        switch (fdwCtrlType)
        {
        case CTRL_C_EVENT:
        case CTRL_BREAK_EVENT:
            sigterm_handler(SIGINT);
            return TRUE;
    
        case CTRL_CLOSE_EVENT:
        case CTRL_LOGOFF_EVENT:
        case CTRL_SHUTDOWN_EVENT:
            sigterm_handler(SIGTERM);
            /* Basically, with these 3 events, when we return from this method the
               process is hard terminated, so stall as long as we need to
               to try and let the main thread(s) clean up and gracefully terminate
               (we have at most 5 seconds, but should be done far before that). */
            while (!ffmpeg_exited) {
                Sleep(0);
            }
            return TRUE;
    
        default:
            av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
            return FALSE;
        }
    }
    #endif
    
    
    void term_init(void)
    
        if (!run_as_daemon && stdin_interaction) {
    
            struct termios tty;
    
            if (tcgetattr (0, &tty) == 0) {
    
                oldtty = tty;
                restore_tty = 1;
    
                tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
                                 |INLCR|IGNCR|ICRNL|IXON);
                tty.c_oflag |= OPOST;
                tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
                tty.c_cflag &= ~(CSIZE|PARENB);
                tty.c_cflag |= CS8;
                tty.c_cc[VMIN] = 1;
                tty.c_cc[VTIME] = 0;
    
                tcsetattr (0, TCSANOW, &tty);
    
            signal(SIGQUIT, sigterm_handler); /* Quit (POSIX).  */
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
    
        signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
    #ifdef SIGXCPU
        signal(SIGXCPU, sigterm_handler);
    #endif
    
    #if HAVE_SETCONSOLECTRLHANDLER
        SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
    #endif
    
    /* read a key without blocking */
    static int read_key(void)
    
        unsigned char ch;
    #if HAVE_TERMIOS_H
        int n = 1;
        struct timeval tv;
        fd_set rfds;
    
        FD_ZERO(&rfds);
        FD_SET(0, &rfds);
        tv.tv_sec = 0;
        tv.tv_usec = 0;
        n = select(1, &rfds, NULL, NULL, &tv);
        if (n > 0) {
            n = read(0, &ch, 1);
            if (n == 1)
                return ch;
    
            return n;
        }
    #elif HAVE_KBHIT
    #    if HAVE_PEEKNAMEDPIPE
        static int is_pipe;
        static HANDLE input_handle;
        DWORD dw, nchars;
        if(!input_handle){
            input_handle = GetStdHandle(STD_INPUT_HANDLE);
            is_pipe = !GetConsoleMode(input_handle, &dw);
        }
    
        if (is_pipe) {
            /* When running under a GUI, you will end here. */
    
    rogerdpack's avatar
    rogerdpack committed
            if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
                // input pipe may have been closed by the program that ran ffmpeg
    
    rogerdpack's avatar
    rogerdpack committed
            }
    
            //Read it
            if(nchars != 0) {
                read(0, &ch, 1);
                return ch;
            }else{
                return -1;
    
        }
    #    endif
        if(kbhit())
            return(getch());
    #endif
        return -1;
    
        return received_nb_signals > transcode_init_done;
    
    const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
    
    static void ffmpeg_cleanup(int ret)
    
        int i, j;
    
        if (do_benchmark) {
            int maxrss = getmaxrss() / 1024;
    
            av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
    
        for (i = 0; i < nb_filtergraphs; i++) {
    
            FilterGraph *fg = filtergraphs[i];
            avfilter_graph_free(&fg->graph);
            for (j = 0; j < fg->nb_inputs; j++) {
    
                av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
    
                av_freep(&fg->inputs[j]->name);
                av_freep(&fg->inputs[j]);
    
            av_freep(&fg->inputs);
            for (j = 0; j < fg->nb_outputs; j++) {
                av_freep(&fg->outputs[j]->name);
    
                av_freep(&fg->outputs[j]->formats);
                av_freep(&fg->outputs[j]->channel_layouts);
                av_freep(&fg->outputs[j]->sample_rates);
    
                av_freep(&fg->outputs[j]);
    
            av_freep(&fg->outputs);
            av_freep(&fg->graph_desc);
    
    
            av_freep(&filtergraphs[i]);
        }
        av_freep(&filtergraphs);
    
        av_freep(&subtitle_out);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (i = 0; i < nb_output_files; i++) {
    
            OutputFile *of = output_files[i];
    
            AVFormatContext *s;
            if (!of)
                continue;
            s = of->ctx;
    
            if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
    
            avformat_free_context(s);
    
        for (i = 0; i < nb_output_streams; i++) {
    
            OutputStream *ost = output_streams[i];
    
            for (j = 0; j < ost->nb_bitstream_filters; j++)
                av_bsf_free(&ost->bsf_ctx[j]);
            av_freep(&ost->bsf_ctx);
            av_freep(&ost->bsf_extradata_updated);
    
    
            av_frame_free(&ost->filtered_frame);
    
            av_frame_free(&ost->last_frame);
    
            av_dict_free(&ost->encoder_opts);
    
    
            av_parser_close(ost->parser);
    
            avcodec_free_context(&ost->parser_avctx);
    
            av_freep(&ost->forced_keyframes);
    
            av_expr_free(ost->forced_keyframes_pexpr);
    
            av_freep(&ost->avfilter);
            av_freep(&ost->logfile_prefix);
    
            av_freep(&ost->audio_channels_map);
            ost->audio_channels_mapped = 0;
    
    
            av_dict_free(&ost->sws_dict);
    
    
            avcodec_free_context(&ost->enc_ctx);
    
            avcodec_parameters_free(&ost->ref_par);
    
            while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
    
                AVPacket pkt;
                av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
                av_packet_unref(&pkt);
            }
    
            av_fifo_freep(&ost->muxing_queue);
    
    #if HAVE_PTHREADS
        free_input_threads();
    #endif
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (i = 0; i < nb_input_files; i++) {
    
            avformat_close_input(&input_files[i]->ctx);
            av_freep(&input_files[i]);
    
        for (i = 0; i < nb_input_streams; i++) {
    
            InputStream *ist = input_streams[i];
    
            av_frame_free(&ist->decoded_frame);
            av_frame_free(&ist->filter_frame);
    
            av_dict_free(&ist->decoder_opts);
    
            avsubtitle_free(&ist->prev_sub.subtitle);
            av_frame_free(&ist->sub2video.frame);
    
            av_freep(&ist->filters);
            av_freep(&ist->hwaccel_device);
    
    wm4's avatar
    wm4 committed
            av_freep(&ist->dts_buffer);
    
            avcodec_free_context(&ist->dec_ctx);
    
    
        if (vstats_file) {
            if (fclose(vstats_file))
                av_log(NULL, AV_LOG_ERROR,
                       "Error closing vstats file, loss of information possible: %s\n",
                       av_err2str(AVERROR(errno)));
        }
    
        av_freep(&input_streams);
        av_freep(&input_files);
    
        avformat_network_deinit();
    
        if (received_sigterm) {
    
            av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
    
                   (int) received_sigterm);
    
        } else if (ret && transcode_init_done) {
    
            av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
    
    void remove_avoptions(AVDictionary **a, AVDictionary *b)
    {
        AVDictionaryEntry *t = NULL;
    
        while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
            av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
        }
    }
    
    
    void assert_avoptions(AVDictionary *m)
    
        AVDictionaryEntry *t;
        if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
    
            av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
    
            exit_program(1);
    
    static void abort_codec_experimental(AVCodec *c, int encoder)
    
        exit_program(1);
    
    static void update_benchmark(const char *fmt, ...)
    
        if (do_benchmark_all) {
            int64_t t = getutime();
            va_list va;
            char buf[1024];
    
            if (fmt) {
                va_start(va, fmt);
                vsnprintf(buf, sizeof(buf), fmt, va);
                va_end(va);
    
                av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
    
    static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
    
    {
        int i;
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost2 = output_streams[i];
            ost2->finished |= ost == ost2 ? this_stream : others;
        }
    }
    
    
    static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
    
        AVFormatContext *s = of->ctx;
    
        AVStream *st = ost->st;
    
        if (!of->header_written) {
            AVPacket tmp_pkt;
            /* the muxer is not initialized yet, buffer the packet */
            if (!av_fifo_space(ost->muxing_queue)) {
                int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
                                     ost->max_muxing_queue_size);
                if (new_size <= av_fifo_size(ost->muxing_queue)) {
                    av_log(NULL, AV_LOG_ERROR,
                           "Too many packets buffered for output stream %d:%d.\n",
                           ost->file_index, ost->st->index);
                    exit_program(1);
                }
                ret = av_fifo_realloc2(ost->muxing_queue, new_size);
                if (ret < 0)
                    exit_program(1);
            }
            av_packet_move_ref(&tmp_pkt, pkt);
            av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
            return;
        }
    
    
        if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
            (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
    
            pkt->pts = pkt->dts = AV_NOPTS_VALUE;
    
        /*
         * Audio encoders may split the packets --  #frames in != #packets out.
         * But there is no reordering, so we can limit the number of output packets
         * by simply dropping them here.
         * Counting encoded video frames needs to be done separately because of
         * reordering, see do_video_out()
         */
    
        if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
    
            if (ost->frame_number >= ost->max_frames) {
    
            ost->frame_number++;
    
        if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
    
            uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
    
            ost->quality = sd ? AV_RL32(sd) : -1;
    
            ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
    
    
            for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
                if (sd && i < sd[5])
                    ost->error[i] = AV_RL64(sd + 8 + 8*i);
                else
                    ost->error[i] = -1;
            }
    
            if (ost->frame_rate.num && ost->is_cfr) {
    
                if (pkt->duration > 0)
                    av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
    
                pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
                                             ost->st->time_base);
            }
    
        if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
    
            if (pkt->dts != AV_NOPTS_VALUE &&
                pkt->pts != AV_NOPTS_VALUE &&
                pkt->dts > pkt->pts) {
                av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
                       pkt->dts, pkt->pts,
                       ost->file_index, ost->st->index);
                pkt->pts =
                pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
                         - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
                         - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
            }
    
            if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
    
                pkt->dts != AV_NOPTS_VALUE &&
    
                !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
    
                ost->last_mux_dts != AV_NOPTS_VALUE) {
                int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
                if (pkt->dts < max) {
    
                    int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
    
                    av_log(s, loglevel, "Non-monotonous DTS in output stream "
                           "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
                           ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
                    if (exit_on_error) {
                        av_log(NULL, AV_LOG_FATAL, "aborting.\n");
                        exit_program(1);
                    }
                    av_log(s, loglevel, "changing to %"PRId64". This may result "
                           "in incorrect timestamps in the output file.\n",
                           max);
                    if (pkt->pts >= pkt->dts)
                        pkt->pts = FFMAX(pkt->pts, max);
                    pkt->dts = max;
                }
    
        ost->data_size += pkt->size;
    
        ost->packets_written++;
    
        pkt->stream_index = ost->index;
    
    
        if (debug_ts) {
            av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
    
                    "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
    
                    av_get_media_type_string(ost->enc_ctx->codec_type),
    
                    av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
    
                    av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
                    pkt->size
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        ret = av_interleaved_write_frame(s, pkt);
        if (ret < 0) {
    
            print_error("av_interleaved_write_frame()", ret);
    
            close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
    
    static void close_output_stream(OutputStream *ost)
    {
        OutputFile *of = output_files[ost->file_index];
    
    
        ost->finished |= ENCODER_FINISHED;
    
        if (of->shortest) {
    
            int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
    
            of->recording_time = FFMIN(of->recording_time, end);
    
    static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
    
    {
        int ret = 0;
    
        /* apply the output bitstream filters, if any */
        if (ost->nb_bitstream_filters) {
            int idx;
    
    
            av_packet_split_side_data(pkt);
    
            ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
            if (ret < 0)
                goto finish;
    
            idx = 1;
            while (idx) {
                /* get a packet from the previous filter up the chain */
                ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
    
                if (ret == AVERROR(EAGAIN)) {
                    ret = 0;
                    idx--;
                    continue;
                } else if (ret < 0)
                    goto finish;
    
                /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
                 * the api states this shouldn't happen after init(). Propagate it here to the
                 * muxer and to the next filters in the chain to workaround this.
                 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
                 * par_out->extradata and adapt muxers accordingly to get rid of this. */
                if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
                    ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
                    if (ret < 0)
                        goto finish;
                    ost->bsf_extradata_updated[idx - 1] |= 1;
                }
    
                /* send it to the next filter down the chain or to the muxer */
                if (idx < ost->nb_bitstream_filters) {
                    /* HACK/FIXME! - See above */
                    if (!(ost->bsf_extradata_updated[idx] & 2)) {
                        ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
                        if (ret < 0)
                            goto finish;
                        ost->bsf_extradata_updated[idx] |= 2;
                    }
                    ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
                    if (ret < 0)
                        goto finish;
                    idx++;
                } else
    
                    write_packet(of, pkt, ost);
    
            write_packet(of, pkt, ost);
    
    
    finish:
        if (ret < 0 && ret != AVERROR_EOF) {
            av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
                   "packet for stream #%d:%d.\n", ost->file_index, ost->index);
            if(exit_on_error)
                exit_program(1);
        }
    }
    
    
    static int check_recording_time(OutputStream *ost)
    
        OutputFile *of = output_files[ost->file_index];
    
        if (of->recording_time != INT64_MAX &&
    
            av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
    
                          AV_TIME_BASE_Q) >= 0) {
    
            close_output_stream(ost);
    
    static void do_audio_out(OutputFile *of, OutputStream *ost,
    
                             AVFrame *frame)
    
        AVCodecContext *enc = ost->enc_ctx;
    
        AVPacket pkt;
    
    wm4's avatar
    wm4 committed
        int ret;
    
        av_init_packet(&pkt);
        pkt.data = NULL;
        pkt.size = 0;
    
        if (!check_recording_time(ost))
            return;
    
        if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
    
            frame->pts = ost->sync_opts;
    
        ost->sync_opts = frame->pts + frame->nb_samples;
    
        ost->samples_encoded += frame->nb_samples;
        ost->frames_encoded++;
    
        av_assert0(pkt.size || !pkt.data);
        update_benchmark(NULL);
    
        if (debug_ts) {
            av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
                   "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
                   av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
                   enc->time_base.num, enc->time_base.den);
        }
    
    wm4's avatar
    wm4 committed
        ret = avcodec_send_frame(enc, frame);
        if (ret < 0)
            goto error;
    
        while (1) {
            ret = avcodec_receive_packet(enc, &pkt);
            if (ret == AVERROR(EAGAIN))
                break;
            if (ret < 0)
                goto error;
    
            update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
    
            av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
    
            if (debug_ts) {
                av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
                       "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
                       av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
                       av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
            }
    
            output_packet(of, &pkt, ost);
    
    wm4's avatar
    wm4 committed
    
        return;
    error:
        av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
        exit_program(1);
    
    static void do_subtitle_out(OutputFile *of,
    
        int subtitle_out_max_size = 1024 * 1024;
        int subtitle_out_size, nb, i;
        AVCodecContext *enc;
        AVPacket pkt;
    
        if (sub->pts == AV_NOPTS_VALUE) {
    
            av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
    
                exit_program(1);
    
        if (!subtitle_out) {
            subtitle_out = av_malloc(subtitle_out_max_size);
    
            if (!subtitle_out) {
                av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
                exit_program(1);
            }
    
        /* Note: DVB subtitle need one packet to draw them and one other
           packet to clear them */
        /* XXX: signal it in the codec context ? */
    
        if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
    
        /* shift timestamp to honor -ss and make check_recording_time() work with -t */
    
        pts = sub->pts;
        if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
            pts -= output_files[ost->file_index]->start_time;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (i = 0; i < nb; i++) {
    
            unsigned save_num_rects = sub->num_rects;
    
    
            ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
    
            if (!check_recording_time(ost))
                return;
    
            // start_display_time is required to be 0
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
            sub->end_display_time  -= sub->start_display_time;
    
            sub->start_display_time = 0;
    
            if (i == 1)
                sub->num_rects = 0;
    
            subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
                                                        subtitle_out_max_size, sub);
    
            if (i == 1)
                sub->num_rects = save_num_rects;
    
            if (subtitle_out_size < 0) {
    
                av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
    
                exit_program(1);
    
            }
    
            av_init_packet(&pkt);
            pkt.data = subtitle_out;
            pkt.size = subtitle_out_size;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
    
            pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
    
            if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
    
                /* XXX: the pts correction is handled here. Maybe handling
                   it in the codec would be better */
                if (i == 0)
                    pkt.pts += 90 * sub->start_display_time;
                else