Skip to content
Snippets Groups Projects
ffmpeg.c 164 KiB
Newer Older
  • Learn to ignore specific revisions
  •         close_output_stream(ost);
    
        if (f->recording_time != INT64_MAX) {
            start_time = f->ctx->start_time;
    
            if (f->start_time != AV_NOPTS_VALUE && copy_ts)
    
                start_time += f->start_time;
    
            if (ist->pts >= f->recording_time + start_time) {
                close_output_stream(ost);
    
        /* force the input stream PTS */
    
        if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        if (pkt->pts != AV_NOPTS_VALUE)
    
            opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
    
        if (pkt->dts == AV_NOPTS_VALUE)
    
            opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
    
            opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
    
        opkt.dts -= ost_tb_start_time;
    
        if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
    
            int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
    
            opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
    
                                                   (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
    
                                                   ost->mux_timebase) - ost_tb_start_time;
    
        opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
    
        if (  ost->st->codecpar->codec_id != AV_CODEC_ID_H264
           && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
           && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
           && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
    
            int ret = av_parser_change(ost->parser, ost->parser_avctx,
    
                                 &opkt.data, &opkt.size,
                                 pkt->data, pkt->size,
    
                                 pkt->flags & AV_PKT_FLAG_KEY);
            if (ret < 0) {
    
                av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
                       av_err2str(ret));
    
                opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
                if (!opkt.buf)
    
                    exit_program(1);
    
        } else {
            opkt.data = pkt->data;
            opkt.size = pkt->size;
    
        av_copy_packet_side_data(&opkt, pkt);
    
        if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
            ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
    
            (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
    
            /* store AVPicture in AVPacket, as expected by the output format */
    
            int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
    
                av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
                       av_err2str(ret));
    
            opkt.data = (uint8_t *)&pict;
            opkt.size = sizeof(AVPicture);
            opkt.flags |= AV_PKT_FLAG_KEY;
        }
    
        output_packet(of, &opkt, ost);
    
    int guess_input_channel_layout(InputStream *ist)
    
        AVCodecContext *dec = ist->dec_ctx;
    
        if (!dec->channel_layout) {
            char layout_name[256];
    
            if (dec->channels > ist->guess_layout_max)
                return 0;
    
            dec->channel_layout = av_get_default_channel_layout(dec->channels);
            if (!dec->channel_layout)
                return 0;
            av_get_channel_layout_string(layout_name, sizeof(layout_name),
                                         dec->channels, dec->channel_layout);
    
            av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
    
                   "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
    
    static void check_decode_result(InputStream *ist, int *got_output, int ret)
    
    {
        if (*got_output || ret<0)
            decode_error_stat[ret<0] ++;
    
        if (ret < 0 && exit_on_error)
            exit_program(1);
    
    
        if (exit_on_error && *got_output && ist) {
            if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
                av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
                exit_program(1);
            }
        }
    
    // Filters can be configured only if the formats of all inputs are known.
    static int ifilter_has_all_input_formats(FilterGraph *fg)
    {
        int i;
        for (i = 0; i < fg->nb_inputs; i++) {
            if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
                                              fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
                return 0;
        }
        return 1;
    }
    
    static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
    {
        FilterGraph *fg = ifilter->graph;
        int need_reinit, ret, i;
    
        /* determine if the parameters for this input changed */
        need_reinit = ifilter->format != frame->format;
        if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
            (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
            need_reinit = 1;
    
        switch (ifilter->ist->st->codecpar->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            need_reinit |= ifilter->sample_rate    != frame->sample_rate ||
                           ifilter->channels       != frame->channels ||
                           ifilter->channel_layout != frame->channel_layout;
            break;
        case AVMEDIA_TYPE_VIDEO:
            need_reinit |= ifilter->width  != frame->width ||
                           ifilter->height != frame->height;
            break;
        }
    
        if (need_reinit) {
            ret = ifilter_parameters_from_frame(ifilter, frame);
            if (ret < 0)
                return ret;
        }
    
        /* (re)init the graph if possible, otherwise buffer the frame and return */
        if (need_reinit || !fg->graph) {
            for (i = 0; i < fg->nb_inputs; i++) {
                if (!ifilter_has_all_input_formats(fg)) {
                    AVFrame *tmp = av_frame_clone(frame);
                    if (!tmp)
                        return AVERROR(ENOMEM);
                    av_frame_unref(frame);
    
                    if (!av_fifo_space(ifilter->frame_queue)) {
                        ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
                        if (ret < 0)
                            return ret;
                    }
                    av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
                    return 0;
                }
            }
    
            ret = reap_filters(1);
            if (ret < 0 && ret != AVERROR_EOF) {
                char errbuf[128];
                av_strerror(ret, errbuf, sizeof(errbuf));
    
                av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
                return ret;
            }
    
            ret = configure_filtergraph(fg);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
                return ret;
            }
        }
    
        ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
            return ret;
        }
    
        return 0;
    }
    
    
    static int ifilter_send_eof(InputFilter *ifilter)
    {
        int i, j, ret;
    
        ifilter->eof = 1;
    
        if (ifilter->filter) {
            ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
            if (ret < 0)
                return ret;
        } else {
            // the filtergraph was never configured
            FilterGraph *fg = ifilter->graph;
            for (i = 0; i < fg->nb_inputs; i++)
                if (!fg->inputs[i]->eof)
                    break;
            if (i == fg->nb_inputs) {
                // All the input streams have finished without the filtergraph
                // ever being configured.
                // Mark the output streams as finished.
                for (j = 0; j < fg->nb_outputs; j++)
                    finish_output_stream(fg->outputs[j]->ost);
            }
        }
    
        return 0;
    }
    
    
    wm4's avatar
    wm4 committed
    // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
    // There is the following difference: if you got a frame, you must call
    // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
    // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
    static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
    {
        int ret;
    
        *got_frame = 0;
    
        if (pkt) {
            ret = avcodec_send_packet(avctx, pkt);
            // In particular, we don't expect AVERROR(EAGAIN), because we read all
            // decoded frames with avcodec_receive_frame() until done.
            if (ret < 0 && ret != AVERROR_EOF)
                return ret;
        }
    
        ret = avcodec_receive_frame(avctx, frame);
        if (ret < 0 && ret != AVERROR(EAGAIN))
            return ret;
        if (ret >= 0)
            *got_frame = 1;
    
        return 0;
    }
    
    
    static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
    {
        int i, ret;
        AVFrame *f;
    
    
        av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
    
        for (i = 0; i < ist->nb_filters; i++) {
            if (i < ist->nb_filters - 1) {
                f = ist->filter_frame;
                ret = av_frame_ref(f, decoded_frame);
                if (ret < 0)
                    break;
            } else
                f = decoded_frame;
    
            ret = ifilter_send_frame(ist->filters[i], f);
    
            if (ret == AVERROR_EOF)
                ret = 0; /* ignore */
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR,
                       "Failed to inject frame into filter network: %s\n", av_err2str(ret));
                break;
            }
        }
        return ret;
    }
    
    
    static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
    
        AVFrame *decoded_frame;
    
        AVCodecContext *avctx = ist->dec_ctx;
    
        AVRational decoded_frame_tb;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
    
        if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
            return AVERROR(ENOMEM);
    
        decoded_frame = ist->decoded_frame;
    
        update_benchmark(NULL);
    
    wm4's avatar
    wm4 committed
        ret = decode(avctx, decoded_frame, got_output, pkt);
    
        update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
    
    
        if (ret >= 0 && avctx->sample_rate <= 0) {
    
            av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
    
    wm4's avatar
    wm4 committed
        if (ret != AVERROR_EOF)
            check_decode_result(ist, got_output, ret);
    
        ist->samples_decoded += decoded_frame->nb_samples;
        ist->frames_decoded++;
    
    
    #if 1
        /* increment next_dts to use for the case where the input stream does not
           have timestamps or there are multiple frames in the packet */
        ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                         avctx->sample_rate;
        ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                         avctx->sample_rate;
    #endif
    
        if (decoded_frame->pts != AV_NOPTS_VALUE) {
    
            decoded_frame_tb   = ist->st->time_base;
    
    wm4's avatar
    wm4 committed
        } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
    
            decoded_frame->pts = pkt->pts;
            decoded_frame_tb   = ist->st->time_base;
        }else {
            decoded_frame->pts = ist->dts;
            decoded_frame_tb   = AV_TIME_BASE_Q;
        }
    
        if (decoded_frame->pts != AV_NOPTS_VALUE)
    
            decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
    
                                                  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
                                                  (AVRational){1, avctx->sample_rate});
    
        ist->nb_samples = decoded_frame->nb_samples;
    
        err = send_frame_to_filters(ist, decoded_frame);
    
        av_frame_unref(ist->filter_frame);
        av_frame_unref(decoded_frame);
        return err < 0 ? err : ret;
    
    wm4's avatar
    wm4 committed
    static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
    
        AVFrame *decoded_frame;
    
        int64_t best_effort_timestamp;
    
    wm4's avatar
    wm4 committed
        int64_t dts = AV_NOPTS_VALUE;
        AVPacket avpkt;
    
        // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
        // reason. This seems like a semi-critical bug. Don't trigger EOF, and
        // skip the packet.
        if (!eof && pkt && pkt->size == 0)
            return 0;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
            return AVERROR(ENOMEM);
        if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
    
        decoded_frame = ist->decoded_frame;
    
    wm4's avatar
    wm4 committed
        if (ist->dts != AV_NOPTS_VALUE)
            dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
        if (pkt) {
            avpkt = *pkt;
            avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
        }
    
        // The old code used to set dts on the drain packet, which does not work
        // with the new API anymore.
        if (eof) {
            void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
            if (!new)
                return AVERROR(ENOMEM);
            ist->dts_buffer = new;
            ist->dts_buffer[ist->nb_dts_buffer++] = dts;
        }
    
        update_benchmark(NULL);
    
    wm4's avatar
    wm4 committed
        ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
    
        update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
    
    
        // The following line may be required in some cases where there is no parser
        // or the parser does not has_b_frames correctly
    
        if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
    
            if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
    
                ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
    
                av_log(ist->dec_ctx, AV_LOG_WARNING,
    
                       "video_delay is larger in decoder than demuxer %d > %d.\n"
    
                       "If you want to help, upload a sample "
                       "of this file to ftp://upload.ffmpeg.org/incoming/ "
    
                       "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
    
                       ist->st->codecpar->video_delay);
    
    wm4's avatar
    wm4 committed
        if (ret != AVERROR_EOF)
            check_decode_result(ist, got_output, ret);
    
        if (*got_output && ret >= 0) {
            if (ist->dec_ctx->width  != decoded_frame->width ||
                ist->dec_ctx->height != decoded_frame->height ||
                ist->dec_ctx->pix_fmt != decoded_frame->format) {
                av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
                    decoded_frame->width,
                    decoded_frame->height,
                    decoded_frame->format,
                    ist->dec_ctx->width,
                    ist->dec_ctx->height,
                    ist->dec_ctx->pix_fmt);
            }
        }
    
    
        if(ist->top_field_first>=0)
            decoded_frame->top_field_first = ist->top_field_first;
    
        if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
    
            err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
    
        ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
    
    
        best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
    
    wm4's avatar
    wm4 committed
    
        if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
            best_effort_timestamp = ist->dts_buffer[0];
    
            for (i = 0; i < ist->nb_dts_buffer - 1; i++)
                ist->dts_buffer[i] = ist->dts_buffer[i + 1];
            ist->nb_dts_buffer--;
        }
    
    
        if(best_effort_timestamp != AV_NOPTS_VALUE) {
            int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
    
            if (ts != AV_NOPTS_VALUE)
                ist->next_pts = ist->pts = ts;
        }
    
        if (debug_ts) {
            av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
    
                   "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
                   ist->st->index, av_ts2str(decoded_frame->pts),
                   av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
                   best_effort_timestamp,
                   av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
                   decoded_frame->key_frame, decoded_frame->pict_type,
                   ist->st->time_base.num, ist->st->time_base.den);
    
        if (ist->st->sample_aspect_ratio.num)
            decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
    
        err = send_frame_to_filters(ist, decoded_frame);
    
        av_frame_unref(ist->filter_frame);
        av_frame_unref(decoded_frame);
        return err < 0 ? err : ret;
    
    static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
    
        int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
    
                                              &subtitle, got_output, pkt);
    
        check_decode_result(NULL, got_output, ret);
    
        if (ret < 0 || !*got_output) {
            if (!pkt->size)
                sub2video_flush(ist);
    
        if (ist->fix_sub_duration) {
    
            if (ist->prev_sub.got_output) {
    
                end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
                                 1000, AV_TIME_BASE);
    
                if (end < ist->prev_sub.subtitle.end_display_time) {
    
                           "Subtitle duration reduced from %d to %d%s\n",
                           ist->prev_sub.subtitle.end_display_time, end,
                           end <= 0 ? ", dropping it" : "");
    
                    ist->prev_sub.subtitle.end_display_time = end;
                }
            }
            FFSWAP(int,        *got_output, ist->prev_sub.got_output);
            FFSWAP(int,        ret,         ist->prev_sub.ret);
            FFSWAP(AVSubtitle, subtitle,    ist->prev_sub.subtitle);
    
        if (ist->sub2video.frame) {
            sub2video_update(ist, &subtitle);
        } else if (ist->nb_filters) {
            if (!ist->sub2video.sub_queue)
                ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
            if (!ist->sub2video.sub_queue)
                exit_program(1);
            if (!av_fifo_space(ist->sub2video.sub_queue)) {
                ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
                if (ret < 0)
                    exit_program(1);
            }
            av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
            free_sub = 0;
        }
    
        for (i = 0; i < nb_output_streams; i++) {
    
            OutputStream *ost = output_streams[i];
    
            if (!check_output_constraints(ist, ost) || !ost->encoding_needed
                || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
    
            do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
    
        if (free_sub)
            avsubtitle_free(&subtitle);
    
    static int send_filter_eof(InputStream *ist)
    {
    
        for (i = 0; i < ist->nb_filters; i++) {
    
            ret = ifilter_send_eof(ist->filters[i]);
            if (ret < 0)
                return ret;
    
    /* pkt = NULL means EOF (needed to flush decoder buffers) */
    
    static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
    
    wm4's avatar
    wm4 committed
        int repeating = 0;
        int eof_reached = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        if (!ist->saw_first_ts) {
    
            ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
    
            if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
    
                ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
                ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
    
        if (ist->next_dts == AV_NOPTS_VALUE)
    
            ist->next_dts = ist->dts;
        if (ist->next_pts == AV_NOPTS_VALUE)
            ist->next_pts = ist->pts;
    
            /* EOF handling */
            av_init_packet(&avpkt);
            avpkt.data = NULL;
            avpkt.size = 0;
        } else {
            avpkt = *pkt;
        }
    
    wm4's avatar
    wm4 committed
        if (pkt && pkt->dts != AV_NOPTS_VALUE) {
    
            ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
    
            if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
    
                ist->next_pts = ist->pts = ist->dts;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        // while we have more to decode or while the decoder did output something on EOF
    
    wm4's avatar
    wm4 committed
        while (ist->decoding_needed) {
            int duration = 0;
            int got_output = 0;
    
            ist->pts = ist->next_pts;
            ist->dts = ist->next_dts;
    
            switch (ist->dec_ctx->codec_type) {
    
    wm4's avatar
    wm4 committed
                ret = decode_audio    (ist, repeating ? NULL : &avpkt, &got_output);
    
                break;
            case AVMEDIA_TYPE_VIDEO:
    
    wm4's avatar
    wm4 committed
                ret = decode_video    (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
                if (!repeating || !pkt || got_output) {
                    if (pkt && pkt->duration) {
                        duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
                    } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
                        int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
                        duration = ((int64_t)AV_TIME_BASE *
                                        ist->dec_ctx->framerate.den * ticks) /
                                        ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
                    }
    
    wm4's avatar
    wm4 committed
                    if(ist->dts != AV_NOPTS_VALUE && duration) {
                        ist->next_dts += duration;
                    }else
                        ist->next_dts = AV_NOPTS_VALUE;
                }
    
                if (got_output)
                    ist->next_pts += duration; //FIXME the duration is not correct in some cases
    
                break;
            case AVMEDIA_TYPE_SUBTITLE:
    
    wm4's avatar
    wm4 committed
                if (repeating)
                    break;
    
                ret = transcode_subtitles(ist, &avpkt, &got_output);
    
    wm4's avatar
    wm4 committed
                if (!pkt && ret >= 0)
                    ret = AVERROR_EOF;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            default:
                return -1;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    wm4's avatar
    wm4 committed
            if (ret == AVERROR_EOF) {
                eof_reached = 1;
                break;
            }
    
    
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
                       ist->file_index, ist->st->index, av_err2str(ret));
                if (exit_on_error)
                    exit_program(1);
    
    wm4's avatar
    wm4 committed
                // Decoding might not terminate if we're draining the decoder, and
                // the decoder keeps returning an error.
                // This should probably be considered a libavcodec issue.
                // Sample: fate-vsynth1-dnxhd-720p-hr-lb
                if (!pkt)
                    eof_reached = 1;
    
    wm4's avatar
    wm4 committed
            if (!got_output)
                break;
    
    wm4's avatar
    wm4 committed
            // During draining, we might get multiple output frames in this loop.
            // ffmpeg.c does not drain the filter chain on configuration changes,
            // which means if we send multiple frames at once to the filters, and
            // one of those frames changes configuration, the buffered frames will
            // be lost. This can upset certain FATE tests.
            // Decode only 1 frame per call on EOF to appease these FATE tests.
            // The ideal solution would be to rewrite decoding to use the new
            // decoding API in a better way.
            if (!pkt)
    
    wm4's avatar
    wm4 committed
    
            repeating = 1;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        /* after flushing, send an EOF on all the filter inputs attached to the stream */
    
        /* except when looping we need to flush but not to send an EOF */
    
    wm4's avatar
    wm4 committed
        if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
    
            int ret = send_filter_eof(ist);
            if (ret < 0) {
                av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
                exit_program(1);
            }
        }
    
    
        if (!ist->decoding_needed) {
    
            ist->dts = ist->next_dts;
    
            switch (ist->dec_ctx->codec_type) {
    
            case AVMEDIA_TYPE_AUDIO:
    
                ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
                                 ist->dec_ctx->sample_rate;
    
                break;
            case AVMEDIA_TYPE_VIDEO:
    
                    // TODO: Remove work-around for c99-to-c89 issue 7
                    AVRational time_base_q = AV_TIME_BASE_Q;
                    int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
                    ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
    
                    ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
    
                } else if(ist->dec_ctx->framerate.num != 0) {
    
                    int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
    
                    ist->next_dts += ((int64_t)AV_TIME_BASE *
    
                                      ist->dec_ctx->framerate.den * ticks) /
                                      ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                }
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            }
    
            ist->pts = ist->dts;
            ist->next_pts = ist->next_dts;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
        for (i = 0; pkt && i < nb_output_streams; i++) {
    
            OutputStream *ost = output_streams[i];
    
            if (!check_output_constraints(ist, ost) || ost->encoding_needed)
                continue;
    
    wm4's avatar
    wm4 committed
        return !eof_reached;
    
    static void print_sdp(void)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
        int j;
        AVIOContext *sdp_pb;
    
        AVFormatContext **avc;
    
        for (i = 0; i < nb_output_files; i++) {
            if (!output_files[i]->header_written)
                return;
        }
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        avc = av_malloc_array(nb_output_files, sizeof(*avc));
    
            exit_program(1);
    
        for (i = 0, j = 0; i < nb_output_files; i++) {
            if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
                avc[j] = output_files[i]->ctx;
                j++;
            }
        }
    
    
        av_sdp_create(avc, j, sdp, sizeof(sdp));
    
        if (!sdp_filename) {
            printf("SDP:\n%s\n", sdp);
            fflush(stdout);
        } else {
            if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
                av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
            } else {
                avio_printf(sdp_pb, "SDP:\n%s", sdp);
    
    static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
    {
        int i;
        for (i = 0; hwaccels[i].name; i++)
            if (hwaccels[i].pix_fmt == pix_fmt)
                return &hwaccels[i];
        return NULL;
    }
    
    static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
    {
        InputStream *ist = s->opaque;
        const enum AVPixelFormat *p;
        int ret;
    
        for (p = pix_fmts; *p != -1; p++) {
            const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
            const HWAccel *hwaccel;
    
            if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
                break;
    
            hwaccel = get_hwaccel(*p);
            if (!hwaccel ||
                (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
                (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
                continue;
    
            ret = hwaccel->init(s);
            if (ret < 0) {
                if (ist->hwaccel_id == hwaccel->id) {
                    av_log(NULL, AV_LOG_FATAL,
                           "%s hwaccel requested for input stream #%d:%d, "
                           "but cannot be initialized.\n", hwaccel->name,
                           ist->file_index, ist->st->index);
    
    
            if (ist->hw_frames_ctx) {
                s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
                if (!s->hw_frames_ctx)
                    return AV_PIX_FMT_NONE;
            }
    
    
            ist->active_hwaccel_id = hwaccel->id;
            ist->hwaccel_pix_fmt   = *p;
            break;
        }
    
        return *p;
    }
    
    static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
    {
        InputStream *ist = s->opaque;
    
        if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
            return ist->hwaccel_get_buffer(s, frame, flags);
    
        return avcodec_default_get_buffer2(s, frame, flags);
    }
    
    
    static int init_input_stream(int ist_index, char *error, int error_len)
    
        InputStream *ist = input_streams[ist_index];
    
        if (ist->decoding_needed) {
            AVCodec *codec = ist->dec;
            if (!codec) {
    
                snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
    
                        avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
    
                return AVERROR(EINVAL);
    
            ist->dec_ctx->opaque                = ist;
            ist->dec_ctx->get_format            = get_format;
            ist->dec_ctx->get_buffer2           = get_buffer;
            ist->dec_ctx->thread_safe_callbacks = 1;
    
            av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
    
            if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
               (ist->decoding_needed & DECODING_FOR_OST)) {
    
                av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
    
                if (ist->decoding_needed & DECODING_FOR_FILTER)
                    av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
            }
    
            av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
    
    
            /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
             * audio, and video decoders such as cuvid or mediacodec */
            av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
    
    
            if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
                av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
    
            if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
    
                if (ret == AVERROR_EXPERIMENTAL)
                    abort_codec_experimental(codec, 0);
    
    
                snprintf(error, error_len,
                         "Error while opening decoder for input stream "
                         "#%d:%d : %s",
    
                         ist->file_index, ist->st->index, av_err2str(ret));
    
            assert_avoptions(ist->decoder_opts);
    
        ist->next_pts = AV_NOPTS_VALUE;
    
        ist->next_dts = AV_NOPTS_VALUE;
    
        return 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    }
    
    
    static InputStream *get_input_stream(OutputStream *ost)
    
        if (ost->source_index >= 0)
            return input_streams[ost->source_index];
        return NULL;
    
    static int compare_int64(const void *a, const void *b)
    {
    
        return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
    
    /* open the muxer when all the streams are initialized */
    static int check_init_output_file(OutputFile *of, int file_index)
    {
        int ret, i;
    
        for (i = 0; i < of->ctx->nb_streams; i++) {
            OutputStream *ost = output_streams[of->ost_index + i];
            if (!ost->initialized)
                return 0;
        }
    
        of->ctx->interrupt_callback = int_cb;
    
        ret = avformat_write_header(of->ctx, &of->opts);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR,
                   "Could not write header for output file #%d "
    
                   "(incorrect codec parameters ?): %s\n",
    
                   file_index, av_err2str(ret));
            return ret;
        }
        //assert_avoptions(of->opts);
        of->header_written = 1;
    
        av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
    
        if (sdp_filename || want_sdp)
            print_sdp();
    
    
        /* flush the muxing queues */
        for (i = 0; i < of->ctx->nb_streams; i++) {
            OutputStream *ost = output_streams[of->ost_index + i];
    
    
            /* try to improve muxing time_base (only possible if nothing has been written yet) */
            if (!av_fifo_size(ost->muxing_queue))
                ost->mux_timebase = ost->st->time_base;
    
    
            while (av_fifo_size(ost->muxing_queue)) {
                AVPacket pkt;
                av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
                write_packet(of, &pkt, ost);
            }
        }
    
    
    static int init_output_bsfs(OutputStream *ost)
    {
        AVBSFContext *ctx;
        int i, ret;
    
        if (!ost->nb_bitstream_filters)
            return 0;
    
        for (i = 0; i < ost->nb_bitstream_filters; i++) {
            ctx = ost->bsf_ctx[i];
    
            ret = avcodec_parameters_copy(ctx->par_in,
                                          i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
            if (ret < 0)
                return ret;
    
            ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
    
            ret = av_bsf_init(ctx);
            if (ret < 0) {
    
                av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
    
                       ost->bsf_ctx[i]->filter->name);
                return ret;
            }
        }
    
        ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
        ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
        if (ret < 0)
            return ret;
    
        ost->st->time_base = ctx->time_base_out;
    
        return 0;
    }
    
    
    static int init_output_stream_streamcopy(OutputStream *ost)
    {
        OutputFile *of = output_files[ost->file_index];
        InputStream *ist = get_input_stream(ost);
        AVCodecParameters *par_dst = ost->st->codecpar;
        AVCodecParameters *par_src = ost->ref_par;
        AVRational sar;
        int i, ret;
    
        uint32_t codec_tag = par_dst->codec_tag;
    
    
        av_assert0(ist && !ost->filter);