Skip to content
Snippets Groups Projects
ffmpeg.c 149 KiB
Newer Older
  • Learn to ignore specific revisions
  •                          avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
    
                    ret = AVERROR(EINVAL);
                    goto dump_format;
    
                set_encoder_id(output_files[ost->file_index], ost);
    
    
                if (!ost->filter &&
    
                    (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
                     enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
    
                        FilterGraph *fg;
                        fg = init_simple_filtergraph(ist, ost);
    
                        if (configure_filtergraph(fg)) {
    
                            av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
    
                            exit_program(1);
    
                if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
    
                        ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
    
                    if (ist && !ost->frame_rate.num)
                        ost->frame_rate = ist->framerate;
    
                    if (ist && !ost->frame_rate.num)
    
                        ost->frame_rate = ist->st->r_frame_rate;
                    if (ist && !ost->frame_rate.num) {
                        ost->frame_rate = (AVRational){25, 1};
                        av_log(NULL, AV_LOG_WARNING,
                               "No information "
                               "about the input framerate is available. Falling "
                               "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
                               "if you want a different framerate.\n",
                               ost->file_index, ost->index);
                    }
    
    //                    ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
                    if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
                        int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
                        ost->frame_rate = ost->enc->supported_framerates[idx];
                    }
    
                    // reduce frame rate for mpeg4 to be within the spec limits
    
                    if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
    
                        av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
                                  ost->frame_rate.num, ost->frame_rate.den, 65535);
                    }
    
                switch (enc_ctx->codec_type) {
    
                case AVMEDIA_TYPE_AUDIO:
    
                    enc_ctx->sample_fmt     = ost->filter->filter->inputs[0]->format;
                    enc_ctx->sample_rate    = ost->filter->filter->inputs[0]->sample_rate;
                    enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
    
                    enc_ctx->channels       = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
    
                    enc_ctx->time_base      = (AVRational){ 1, enc_ctx->sample_rate };
    
                    break;
                case AVMEDIA_TYPE_VIDEO:
    
                    enc_ctx->time_base = av_inv_q(ost->frame_rate);
    
                    if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
    
                        enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
                    if (   av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
    
                       && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
    
                        av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
                                                   "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
                    }
                    for (j = 0; j < ost->forced_kf_count; j++)
                        ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
                                                             AV_TIME_BASE_Q,
    
                    enc_ctx->width  = ost->filter->filter->inputs[0]->w;
                    enc_ctx->height = ost->filter->filter->inputs[0]->h;
                    enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
    
                        ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
    
                        av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
    
                        ost->filter->filter->inputs[0]->sample_aspect_ratio;
    
                    if (!strncmp(ost->enc->name, "libx264", 7) &&
    
                        enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
    
                        ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
    
                               "No pixel format specified, %s for H.264 encoding chosen.\n"
                               "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
                               av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
    
                    if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
    
                        enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
    
                        ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
                        av_log(NULL, AV_LOG_WARNING,
                               "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
                               "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
                               av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
    
                    enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
    
                    ost->st->avg_frame_rate = ost->frame_rate;
    
    
                        enc_ctx->width   != dec_ctx->width  ||
                        enc_ctx->height  != dec_ctx->height ||
                        enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
                        enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
    
                    if (ost->forced_keyframes) {
                        if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
                            ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
                                                forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
                            if (ret < 0) {
                                av_log(NULL, AV_LOG_ERROR,
                                       "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
                                return ret;
                            }
                            ost->forced_keyframes_expr_const_values[FKF_N] = 0;
                            ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
                            ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
                            ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
    
    
                            // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
                            // parse it only for static kf timings
                        } else if(strncmp(ost->forced_keyframes, "source", 6)) {
    
                            parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
    
                    break;
                case AVMEDIA_TYPE_SUBTITLE:
    
                    enc_ctx->time_base = (AVRational){1, 1000};
    
                    if (!enc_ctx->width) {
                        enc_ctx->width     = input_streams[ost->source_index]->st->codec->width;
                        enc_ctx->height    = input_streams[ost->source_index]->st->codec->height;
    
                case AVMEDIA_TYPE_DATA:
                    break;
    
    
            if (ost->disposition) {
                static const AVOption opts[] = {
                    { "disposition"         , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
                    { "default"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT           },    .unit = "flags" },
                    { "dub"                 , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB               },    .unit = "flags" },
                    { "original"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL          },    .unit = "flags" },
                    { "comment"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT           },    .unit = "flags" },
                    { "lyrics"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS            },    .unit = "flags" },
                    { "karaoke"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE           },    .unit = "flags" },
                    { "forced"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED            },    .unit = "flags" },
                    { "hearing_impaired"    , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED  },    .unit = "flags" },
                    { "visual_impaired"     , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED   },    .unit = "flags" },
                    { "clean_effects"       , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS     },    .unit = "flags" },
                    { "captions"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS          },    .unit = "flags" },
                    { "descriptions"        , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS      },    .unit = "flags" },
                    { "metadata"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA          },    .unit = "flags" },
                    { NULL },
                };
                static const AVClass class = {
                    .class_name = "",
                    .item_name  = av_default_item_name,
                    .option     = opts,
                    .version    = LIBAVUTIL_VERSION_INT,
                };
                const AVClass *pclass = &class;
    
                ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
                if (ret < 0)
                    goto dump_format;
            }
    
        /* open each encoder */
    
        for (i = 0; i < nb_output_streams; i++) {
    
            ret = init_output_stream(output_streams[i], error, sizeof(error));
            if (ret < 0)
                goto dump_format;
    
        /* init input streams */
        for (i = 0; i < nb_input_streams; i++)
    
            if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
                for (i = 0; i < nb_output_streams; i++) {
                    ost = output_streams[i];
    
                    avcodec_close(ost->enc_ctx);
    
        /* discard unused programs */
        for (i = 0; i < nb_input_files; i++) {
    
            InputFile *ifile = input_files[i];
    
            for (j = 0; j < ifile->ctx->nb_programs; j++) {
                AVProgram *p = ifile->ctx->programs[j];
                int discard  = AVDISCARD_ALL;
    
                for (k = 0; k < p->nb_stream_indexes; k++)
    
                    if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
    
                        discard = AVDISCARD_DEFAULT;
                        break;
                    }
                p->discard = discard;
    
        /* open files and write file headers */
    
            oc->interrupt_callback = int_cb;
    
            if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
    
                snprintf(error, sizeof(error),
                         "Could not write header for output file #%d "
                         "(incorrect codec parameters ?): %s",
    
                         i, av_err2str(ret));
    
                ret = AVERROR(EINVAL);
                goto dump_format;
    
    //         assert_avoptions(output_files[i]->opts);
    
            if (strcmp(oc->oformat->name, "rtp")) {
    
     dump_format:
        /* dump the file output parameters - cannot be done before in case
           of stream copy */
    
        for (i = 0; i < nb_output_files; i++) {
    
            av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        /* dump the stream mapping */
    
        av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
    
        for (i = 0; i < nb_input_streams; i++) {
            ist = input_streams[i];
    
            for (j = 0; j < ist->nb_filters; j++) {
                if (ist->filters[j]->graph->graph_desc) {
                    av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d (%s) -> %s",
                           ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
    
                    if (nb_filtergraphs > 1)
                        av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
                    av_log(NULL, AV_LOG_INFO, "\n");
                }
    
        for (i = 0; i < nb_output_streams; i++) {
    
            if (ost->attachment_filename) {
                /* an attached file */
                av_log(NULL, AV_LOG_INFO, "  File %s -> Stream #%d:%d\n",
                       ost->attachment_filename, ost->file_index, ost->index);
                continue;
    
            if (ost->filter && ost->filter->graph->graph_desc) {
                /* output from a complex graph */
    
                av_log(NULL, AV_LOG_INFO, "  %s", ost->filter->name);
    
                if (nb_filtergraphs > 1)
                    av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
    
                av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
                       ost->index, ost->enc ? ost->enc->name : "?");
    
            av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
    
                   input_streams[ost->source_index]->file_index,
                   input_streams[ost->source_index]->st->index,
    
                   ost->file_index,
                   ost->index);
    
            if (ost->sync_ist != input_streams[ost->source_index])
    
                av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
    
                       ost->sync_ist->file_index,
                       ost->sync_ist->st->index);
    
            if (ost->stream_copy)
    
                av_log(NULL, AV_LOG_INFO, " (copy)");
    
            else {
                const AVCodec *in_codec    = input_streams[ost->source_index]->dec;
                const AVCodec *out_codec   = ost->enc;
                const char *decoder_name   = "?";
                const char *in_codec_name  = "?";
                const char *encoder_name   = "?";
                const char *out_codec_name = "?";
    
                const AVCodecDescriptor *desc;
    
    
                if (in_codec) {
                    decoder_name  = in_codec->name;
    
                    desc = avcodec_descriptor_get(in_codec->id);
                    if (desc)
                        in_codec_name = desc->name;
    
                    if (!strcmp(decoder_name, in_codec_name))
                        decoder_name = "native";
                }
    
                if (out_codec) {
                    encoder_name   = out_codec->name;
    
                    desc = avcodec_descriptor_get(out_codec->id);
                    if (desc)
                        out_codec_name = desc->name;
    
                    if (!strcmp(encoder_name, out_codec_name))
    
                        encoder_name = "native";
                }
    
                av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
                       in_codec_name, decoder_name,
                       out_codec_name, encoder_name);
            }
    
            av_log(NULL, AV_LOG_INFO, "\n");
    
            av_log(NULL, AV_LOG_ERROR, "%s\n", error);
    
        if (sdp_filename || want_sdp) {
    
    /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
    
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost    = output_streams[i];
            OutputFile *of       = output_files[ost->file_index];
            AVFormatContext *os  = output_files[ost->file_index]->ctx;
    
                (os->pb && avio_tell(os->pb) >= of->limit_filesize))
                continue;
    
            if (ost->frame_number >= ost->max_frames) {
    
                int j;
                for (j = 0; j < of->ctx->nb_streams; j++)
    
                    close_output_stream(output_streams[of->ost_index + j]);
    
     * Select the output stream to process.
    
     * @return  selected output stream, or NULL if none available
    
    static OutputStream *choose_output(void)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
        int i;
        int64_t opts_min = INT64_MAX;
        OutputStream *ost_min = NULL;
    
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost = output_streams[i];
            int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
                                        AV_TIME_BASE_Q);
    
    static int check_keyboard_interaction(int64_t cur_time)
    {
        int i, ret, key;
        static int64_t last_time;
        if (received_nb_signals)
            return AVERROR_EXIT;
        /* read_key() returns 0 on EOF */
        if(cur_time - last_time >= 100000 && !run_as_daemon){
            key =  read_key();
            last_time = cur_time;
        }else
            key = -1;
        if (key == 'q')
            return AVERROR_EXIT;
        if (key == '+') av_log_set_level(av_log_get_level()+10);
        if (key == '-') av_log_set_level(av_log_get_level()-10);
        if (key == 's') qp_hist     ^= 1;
        if (key == 'h'){
            if (do_hex_dump){
                do_hex_dump = do_pkt_dump = 0;
            } else if(do_pkt_dump){
                do_hex_dump = 1;
            } else
                do_pkt_dump = 1;
            av_log_set_level(AV_LOG_DEBUG);
        }
        if (key == 'c' || key == 'C'){
            char buf[4096], target[64], command[256], arg[256] = {0};
            double time;
            int k, n = 0;
    
            fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
    
            i = 0;
            while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
                if (k > 0)
                    buf[i++] = k;
            buf[i] = 0;
            if (k > 0 &&
                (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
                av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
                       target, time, command, arg);
                for (i = 0; i < nb_filtergraphs; i++) {
                    FilterGraph *fg = filtergraphs[i];
                    if (fg->graph) {
                        if (time < 0) {
                            ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
                                                              key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
    
                            fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
    
                        } else if (key == 'c') {
                            fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
                            ret = AVERROR_PATCHWELCOME;
    
                        } else {
                            ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
    
                            if (ret < 0)
                                fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
    
            } else {
                av_log(NULL, AV_LOG_ERROR,
                       "Parse error, at least 3 arguments were expected, "
                       "only %d given in string '%s'\n", n, buf);
    
        }
        if (key == 'd' || key == 'D'){
            int debug=0;
            if(key == 'D') {
                debug = input_streams[0]->st->codec->debug<<1;
                if(!debug) debug = 1;
                while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
                    debug += debug;
    
            }else{
                char buf[32];
                int k = 0;
                i = 0;
                while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
                    if (k > 0)
                        buf[i++] = k;
                buf[i] = 0;
                if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
    
                    fprintf(stderr,"error parsing debug value\n");
    
            for(i=0;i<nb_input_streams;i++) {
                input_streams[i]->st->codec->debug = debug;
    
            for(i=0;i<nb_output_streams;i++) {
                OutputStream *ost = output_streams[i];
    
            }
            if(debug) av_log_set_level(AV_LOG_DEBUG);
            fprintf(stderr,"debug=%d\n", debug);
    
        if (key == '?'){
            fprintf(stderr, "key    function\n"
                            "?      show this help\n"
                            "+      increase verbosity\n"
                            "-      decrease verbosity\n"
    
                            "c      Send command to first matching filter supporting it\n"
                            "C      Send/Que command to all matching filters\n"
    
                            "D      cycle through available debug modes\n"
                            "h      dump packets/hex press to cycle through the 3 states\n"
                            "q      quit\n"
                            "s      Show QP histogram\n"
            );
        }
        return 0;
    
    #if HAVE_PTHREADS
    
    static void *input_thread(void *arg)
    
        InputFile *f = arg;
    
        unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
    
        int ret = 0;
    
        while (1) {
    
            AVPacket pkt;
            ret = av_read_frame(f->ctx, &pkt);
    
            if (ret == AVERROR(EAGAIN)) {
    
                av_usleep(10000);
    
                continue;
    
            }
            if (ret < 0) {
                av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
    
            av_dup_packet(&pkt);
    
            ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
            if (flags && ret == AVERROR(EAGAIN)) {
                flags = 0;
                ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
                av_log(f->ctx, AV_LOG_WARNING,
                       "Thread message queue blocking; consider raising the "
                       "thread_queue_size option (current value: %d)\n",
                       f->thread_queue_size);
            }
    
            if (ret < 0) {
                if (ret != AVERROR_EOF)
                    av_log(f->ctx, AV_LOG_ERROR,
                           "Unable to send packet to main thread: %s\n",
                           av_err2str(ret));
                av_free_packet(&pkt);
                av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
                break;
            }
    
        return NULL;
    
    static void free_input_threads(void)
    
        for (i = 0; i < nb_input_files; i++) {
            InputFile *f = input_files[i];
            AVPacket pkt;
    
                continue;
    
            av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
            while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
    
                av_free_packet(&pkt);
    
            pthread_join(f->thread, NULL);
            f->joined = 1;
    
            av_thread_message_queue_free(&f->in_thread_queue);
    
    static int init_input_threads(void)
    
        int i, ret;
    
        if (nb_input_files == 1)
            return 0;
    
        for (i = 0; i < nb_input_files; i++) {
            InputFile *f = input_files[i];
    
            if (f->ctx->pb ? !f->ctx->pb->seekable :
                strcmp(f->ctx->iformat->name, "lavfi"))
                f->non_blocking = 1;
    
            ret = av_thread_message_queue_alloc(&f->in_thread_queue,
    
                                                f->thread_queue_size, sizeof(AVPacket));
    
            if (ret < 0)
                return ret;
    
            if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
                av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
                av_thread_message_queue_free(&f->in_thread_queue);
    
                return AVERROR(ret);
    
    static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
    
        return av_thread_message_queue_recv(f->in_thread_queue, pkt,
                                            f->non_blocking ?
                                            AV_THREAD_MESSAGE_NONBLOCK : 0);
    
    static int get_input_packet(InputFile *f, AVPacket *pkt)
    {
    
        if (f->rate_emu) {
            int i;
            for (i = 0; i < f->nb_streams; i++) {
                InputStream *ist = input_streams[f->ist_index + i];
    
                int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
    
                int64_t now = av_gettime_relative() - ist->start;
    
                if (pts > now)
                    return AVERROR(EAGAIN);
            }
        }
    
    
    #if HAVE_PTHREADS
    
        if (nb_input_files > 1)
            return get_input_packet_mt(f, pkt);
    #endif
        return av_read_frame(f->ctx, pkt);
    
        for (i = 0; i < nb_output_streams; i++)
            if (output_streams[i]->unavailable)
    
                return 1;
        return 0;
    }
    
    static void reset_eagain(void)
    {
        int i;
        for (i = 0; i < nb_input_files; i++)
            input_files[i]->eagain = 0;
    
        for (i = 0; i < nb_output_streams; i++)
            output_streams[i]->unavailable = 0;
    }
    
    
     * - 0 -- one packet was read and processed
     * - AVERROR(EAGAIN) -- no packets were available for selected file,
     *   this function should be called again
     * - AVERROR_EOF -- this function should not be called again
     */
    
    static int process_input(int file_index)
    
        InputFile *ifile = input_files[file_index];
    
        AVFormatContext *is;
        InputStream *ist;
        AVPacket pkt;
        int ret, i, j;
    
        is  = ifile->ctx;
        ret = get_input_packet(ifile, &pkt);
    
        if (ret == AVERROR(EAGAIN)) {
            ifile->eagain = 1;
            return ret;
        }
        if (ret < 0) {
            if (ret != AVERROR_EOF) {
                print_error(is->filename, ret);
                if (exit_on_error)
    
                    exit_program(1);
    
            }
    
            for (i = 0; i < ifile->nb_streams; i++) {
                ist = input_streams[ifile->ist_index + i];
    
                if (ist->decoding_needed) {
    
                    ret = process_input_packet(ist, NULL);
    
                /* mark all outputs that don't go through lavfi as finished */
                for (j = 0; j < nb_output_streams; j++) {
                    OutputStream *ost = output_streams[j];
    
                    if (ost->source_index == ifile->ist_index + i &&
                        (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
    
            return AVERROR(EAGAIN);
    
        }
    
        reset_eagain();
    
        if (do_pkt_dump) {
            av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
                             is->streams[pkt.stream_index]);
        }
        /* the following test is needed in case new streams appear
           dynamically in stream : we ignore them */
        if (pkt.stream_index >= ifile->nb_streams) {
            report_new_stream(file_index, &pkt);
            goto discard_packet;
        }
    
        ist = input_streams[ifile->ist_index + pkt.stream_index];
    
        if (ist->discard)
            goto discard_packet;
    
    
        if (debug_ts) {
            av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
    
                   "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
    
                   ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
    
                   av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
                   av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
                   av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
                   av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
                   av_ts2str(input_files[ist->file_index]->ts_offset),
                   av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
    
        if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
    
            int64_t stime, stime2;
    
            // Correcting starttime based on the enabled streams
            // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
            //       so we instead do it here as part of discontinuity handling
            if (   ist->next_dts == AV_NOPTS_VALUE
                && ifile->ts_offset == -is->start_time
                && (is->iformat->flags & AVFMT_TS_DISCONT)) {
                int64_t new_start_time = INT64_MAX;
                for (i=0; i<is->nb_streams; i++) {
                    AVStream *st = is->streams[i];
                    if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
                        continue;
                    new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
                }
                if (new_start_time > is->start_time) {
                    av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
                    ifile->ts_offset = -new_start_time;
                }
            }
    
    
            stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
            stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
    
            ist->wrap_correction_done = 1;
    
    
            if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
                pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
    
                ist->wrap_correction_done = 0;
            }
    
            if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
                pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
    
                ist->wrap_correction_done = 0;
            }
        }
    
    
        /* add the stream-global side data to the first packet */
    
            if (ist->st->nb_side_data)
                av_packet_split_side_data(&pkt);
    
            for (i = 0; i < ist->st->nb_side_data; i++) {
                AVPacketSideData *src_sd = &ist->st->side_data[i];
                uint8_t *dst_data;
    
                if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
                    continue;
    
                if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
                    continue;
    
    
                dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
                if (!dst_data)
                    exit_program(1);
    
                memcpy(dst_data, src_sd->data, src_sd->size);
            }
    
        if (pkt.dts != AV_NOPTS_VALUE)
            pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
        if (pkt.pts != AV_NOPTS_VALUE)
            pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
    
        if (pkt.pts != AV_NOPTS_VALUE)
            pkt.pts *= ist->ts_scale;
        if (pkt.dts != AV_NOPTS_VALUE)
            pkt.dts *= ist->ts_scale;
    
    
        if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
             ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
            pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
    
            && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
            int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
            int64_t delta   = pkt_dts - ifile->last_ts;
    
            if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
                delta >  1LL*dts_delta_threshold*AV_TIME_BASE){
    
                ifile->ts_offset -= delta;
                av_log(NULL, AV_LOG_DEBUG,
                       "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                       delta, ifile->ts_offset);
                pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
            }
        }
    
    
        if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
             ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
             pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
    
            !copy_ts) {
            int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
            int64_t delta   = pkt_dts - ist->next_dts;
            if (is->iformat->flags & AVFMT_TS_DISCONT) {
    
                if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
    
                    delta >  1LL*dts_delta_threshold*AV_TIME_BASE ||
    
                    pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
                    ifile->ts_offset -= delta;
                    av_log(NULL, AV_LOG_DEBUG,
                           "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                           delta, ifile->ts_offset);
                    pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
                    if (pkt.pts != AV_NOPTS_VALUE)
                        pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
                }
    
            } else {
                if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
    
                     delta >  1LL*dts_error_threshold*AV_TIME_BASE) {
    
                    av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
                    pkt.dts = AV_NOPTS_VALUE;
                }
                if (pkt.pts != AV_NOPTS_VALUE){
                    int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
                    delta   = pkt_pts - ist->next_dts;
                    if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
    
                         delta >  1LL*dts_error_threshold*AV_TIME_BASE) {
    
                        av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
                        pkt.pts = AV_NOPTS_VALUE;
                    }
                }
            }
        }
    
    
        if (pkt.dts != AV_NOPTS_VALUE)
            ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
    
    
            av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
    
                   ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
    
                   av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
                   av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
                   av_ts2str(input_files[ist->file_index]->ts_offset),
                   av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
    
        sub2video_heartbeat(ist, pkt.pts);
    
    
        process_input_packet(ist, &pkt);
    
    
    discard_packet:
        av_free_packet(&pkt);
    
        return 0;
    }
    
    
    /**
     * Perform a step of transcoding for the specified filter graph.
     *
     * @param[in]  graph     filter graph to consider
     * @param[out] best_ist  input stream where a frame would allow to continue
     * @return  0 for success, <0 for error
     */
    static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
    {
        int i, ret;
        int nb_requests, nb_requests_max = 0;
        InputFilter *ifilter;
        InputStream *ist;
    
        *best_ist = NULL;
        ret = avfilter_graph_request_oldest(graph->graph);
        if (ret >= 0)
    
            return reap_filters(0);
    
            ret = reap_filters(1);
    
            for (i = 0; i < graph->nb_outputs; i++)
                close_output_stream(graph->outputs[i]->ost);
            return ret;
        }
        if (ret != AVERROR(EAGAIN))
            return ret;
    
        for (i = 0; i < graph->nb_inputs; i++) {
            ifilter = graph->inputs[i];
            ist = ifilter->ist;
            if (input_files[ist->file_index]->eagain ||
                input_files[ist->file_index]->eof_reached)
                continue;
            nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
            if (nb_requests > nb_requests_max) {
                nb_requests_max = nb_requests;
                *best_ist = ist;
            }
        }
    
        if (!*best_ist)
            for (i = 0; i < graph->nb_outputs; i++)
                graph->outputs[i]->ost->unavailable = 1;
    
        return 0;
    }
    
    /**
     * Run a single step of transcoding.
     *
     * @return  0 for success, <0 for error
     */
    static int transcode_step(void)
    {
        OutputStream *ost;
        InputStream  *ist;
        int ret;
    
        ost = choose_output();
        if (!ost) {
            if (got_eagain()) {
                reset_eagain();
                av_usleep(10000);
                return 0;
            }
            av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
            return AVERROR_EOF;
        }
    
        if (ost->filter) {
            if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
                return ret;
            if (!ist)
                return 0;
        } else {
            av_assert0(ost->source_index >= 0);
            ist = input_streams[ost->source_index];
        }
    
        ret = process_input(ist->file_index);
        if (ret == AVERROR(EAGAIN)) {
            if (input_files[ist->file_index]->eagain)
                ost->unavailable = 1;
            return 0;
        }
    
        if (ret < 0)
            return ret == AVERROR_EOF ? 0 : ret;
    
    
        return reap_filters(0);
    
    /*
     * The following code is the main loop of the file converter
     */
    
    static int transcode(void)
    
        AVFormatContext *os;
    
        OutputStream *ost;
    
        InputStream *ist;
        int64_t timer_start;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        if (stdin_interaction) {
            av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
    
        timer_start = av_gettime_relative();
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    #if HAVE_PTHREADS
    
        if ((ret = init_input_threads()) < 0)
            goto fail;
    #endif
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        while (!received_sigterm) {
    
            int64_t cur_time= av_gettime_relative();
    
            /* if 'q' pressed, exits */
            if (stdin_interaction)
                if (check_keyboard_interaction(cur_time) < 0)
                    break;
    
            /* check if there's any stream where output is still needed */
    
            if (!need_output()) {
                av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
    
                if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
    
                } else {
                    char errbuf[128];
                    av_strerror(ret, errbuf, sizeof(errbuf));
    
                    av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
                    break;
                }
    
    
            /* dump report by using the output first video and audio streams */
    
            print_report(0, timer_start, cur_time);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    #if HAVE_PTHREADS
    
        free_input_threads();
    #endif
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        /* at the end of stream, we must flush the decoder buffers */
        for (i = 0; i < nb_input_streams; i++) {
    
            if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
    
                process_input_packet(ist, NULL);