Skip to content
Snippets Groups Projects
ffmpeg.c 109 KiB
Newer Older
  • Learn to ignore specific revisions
  •             continue;
    
            if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
                return AVERROR(ENOMEM);
            } else
                avcodec_get_frame_defaults(ost->filtered_frame);
            filtered_frame = ost->filtered_frame;
    
            while (1) {
                ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref,
                                                   AV_BUFFERSINK_FLAG_NO_REQUEST);
                if (ret < 0) {
                    if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
                        char buf[256];
                        av_strerror(ret, buf, sizeof(buf));
                        av_log(NULL, AV_LOG_WARNING,
                               "Error in av_buffersink_get_buffer_ref(): %s\n", buf);
    
                    break;
                }
                frame_pts = AV_NOPTS_VALUE;
                if (picref->pts != AV_NOPTS_VALUE) {
                    filtered_frame->pts = frame_pts = av_rescale_q(picref->pts,
                                                    ost->filter->filter->inputs[0]->time_base,
                                                    ost->st->codec->time_base) -
                                        av_rescale_q(of->start_time,
                                                    AV_TIME_BASE_Q,
                                                    ost->st->codec->time_base);
    
                    if (of->start_time && filtered_frame->pts < 0) {
                        avfilter_unref_buffer(picref);
                        continue;
    
                //if (ost->source_index >= 0)
                //    *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
    
    
                switch (ost->filter->filter->inputs[0]->type) {
                case AVMEDIA_TYPE_VIDEO:
                    avfilter_copy_buf_props(filtered_frame, picref);
                    filtered_frame->pts = frame_pts;
                    if (!ost->frame_aspect_ratio)
                        ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;
    
                    do_video_out(of->ctx, ost, filtered_frame,
                                 same_quant ? ost->last_quality :
                                              ost->st->codec->global_quality);
                    break;
                case AVMEDIA_TYPE_AUDIO:
                    avfilter_copy_buf_props(filtered_frame, picref);
                    filtered_frame->pts = frame_pts;
                    do_audio_out(of->ctx, ost, filtered_frame);
                    break;
                default:
                    // TODO support subtitle filters
                    av_assert0(0);
                }
    
                avfilter_unref_buffer(picref);
    
    static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
    
        AVBPrint buf_script;
    
        OutputStream *ost;
        AVFormatContext *oc;
        int64_t total_size;
        AVCodecContext *enc;
        int frame_number, vid, i;
    
        static int64_t last_time = -1;
        static int qp_histogram[52];
    
        int hours, mins, secs, us;
    
        if (!print_stats && !is_last_report && !progress_avio)
    
        if (!is_last_report) {
            if (last_time == -1) {
                last_time = cur_time;
    
    Ramiro Polla's avatar
    Ramiro Polla committed
            }
    
            if ((cur_time - last_time) < 500000)
                return;
            last_time = cur_time;
    
        total_size = avio_size(oc->pb);
    
        if (total_size < 0) { // FIXME improve avio_size() so it works with non seekable output too
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            total_size = avio_tell(oc->pb);
    
            if (total_size < 0)
                total_size = 0;
    
        av_bprint_init(&buf_script, 0, 1);
    
        for (i = 0; i < nb_output_streams; i++) {
    
            enc = ost->st->codec;
    
            if (!ost->stream_copy && enc->coded_frame)
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
    
            if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
    
                av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
                           ost->file_index, ost->index, q);
    
            if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
    
                float fps, t = (cur_time-timer_start) / 1000000.0;
    
                frame_number = ost->frame_number;
    
                fps = t > 1 ? frame_number / t : 0;
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
                         frame_number, fps < 9.95, fps, q);
                av_bprintf(&buf_script, "frame=%d\n", frame_number);
                av_bprintf(&buf_script, "fps=%.1f\n", fps);
                av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
                           ost->file_index, ost->index, q);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (is_last_report)
    
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (qp_hist) {
    
                    int j;
                    int qp = lrintf(q);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    for (j = 0; j < 32; j++)
    
                        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                }
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (enc->flags&CODEC_FLAG_PSNR) {
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    double error, error_sum = 0;
                    double scale, scale_sum = 0;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    char type[3] = { 'Y','U','V' };
    
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    for (j = 0; j < 3; j++) {
                        if (is_last_report) {
                            error = enc->error[j];
                            scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
                        } else {
                            error = enc->coded_frame->error[j];
                            scale = enc->width * enc->height * 255.0 * 255.0;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                        if (j)
                            scale /= 4;
    
                        error_sum += error;
                        scale_sum += scale;
    
                        p = psnr(error / scale);
                        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
                        av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
                                   ost->file_index, ost->index, type[i] | 32, p);
    
                    p = psnr(error_sum / scale_sum);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
    
                    av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
                               ost->file_index, ost->index, p);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            }
    
            /* compute min output value */
    
            if ((is_last_report || !ost->finished) && ost->st->pts.val != AV_NOPTS_VALUE)
    
                pts = FFMAX(pts, av_rescale_q(ost->st->pts.val,
                                              ost->st->time_base, AV_TIME_BASE_Q));
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        secs = pts / AV_TIME_BASE;
        us = pts % AV_TIME_BASE;
        mins = secs / 60;
        secs %= 60;
        hours = mins / 60;
        mins %= 60;
    
        bitrate = pts ? total_size * 8 / (pts / 1000.0) : 0;
    
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
    
                 "size=%8.0fkB time=", total_size / 1024.0);
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                 "%02d:%02d:%02d.%02d ", hours, mins, secs,
                 (100 * us) / AV_TIME_BASE);
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                 "bitrate=%6.1fkbits/s", bitrate);
        av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
        av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
        av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
                   hours, mins, secs, us);
    
        if (nb_frames_dup || nb_frames_drop)
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
                    nb_frames_dup, nb_frames_drop);
    
        av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
        av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
    
        if (print_stats || is_last_report) {
    
        av_log(NULL, AV_LOG_INFO, "%s    \r", buf);
    
        fflush(stderr);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        if (progress_avio) {
            av_bprintf(&buf_script, "progress=%s\n",
                       is_last_report ? "end" : "continue");
            avio_write(progress_avio, buf_script.str,
                       FFMIN(buf_script.len, buf_script.size - 1));
            avio_flush(progress_avio);
            av_bprint_finalize(&buf_script, NULL);
            if (is_last_report) {
                avio_close(progress_avio);
                progress_avio = NULL;
    
        if (is_last_report) {
    
            int64_t raw= audio_size + video_size + subtitle_size + extra_size;
    
            av_log(NULL, AV_LOG_INFO, "\n");
    
            av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0f global headers:%1.0fkB muxing overhead %f%%\n",
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                   video_size / 1024.0,
                   audio_size / 1024.0,
    
                   subtitle_size / 1024.0,
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                   extra_size / 1024.0,
                   100.0 * (total_size - raw) / raw
    
            if(video_size + audio_size + subtitle_size + extra_size == 0){
                av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
            }
    
    static void flush_encoders(void)
    
        for (i = 0; i < nb_output_streams; i++) {
    
            OutputStream   *ost = output_streams[i];
    
            AVCodecContext *enc = ost->st->codec;
    
            AVFormatContext *os = output_files[ost->file_index]->ctx;
    
            int stop_encoding = 0;
    
            if (!ost->encoding_needed)
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
    
            if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            for (;;) {
    
                int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
                const char *desc;
                int64_t *size;
    
    
                switch (ost->st->codec->codec_type) {
                case AVMEDIA_TYPE_AUDIO:
    
                    encode = avcodec_encode_audio2;
                    desc   = "Audio";
                    size   = &audio_size;
    
                    break;
                case AVMEDIA_TYPE_VIDEO:
    
                    encode = avcodec_encode_video2;
                    desc   = "Video";
                    size   = &video_size;
                    break;
                default:
                    stop_encoding = 1;
                }
    
                if (encode) {
                    AVPacket pkt;
                    int got_packet;
                    av_init_packet(&pkt);
                    pkt.data = NULL;
                    pkt.size = 0;
    
    
                    update_benchmark(NULL);
    
                    ret = encode(enc, &pkt, NULL, &got_packet);
    
                    update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
    
                        av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
    
                    if (ost->logfile && enc->stats_out) {
                        fprintf(ost->logfile, "%s", enc->stats_out);
                    }
    
                    if (!got_packet) {
    
                        stop_encoding = 1;
                        break;
                    }
    
                    if (pkt.pts != AV_NOPTS_VALUE)
                        pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
                    if (pkt.dts != AV_NOPTS_VALUE)
                        pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
    
                    write_frame(os, &pkt, ost);
    
                if (stop_encoding)
    
    /*
     * Check whether a packet from ist should be written into ost at this time
     */
    static int check_output_constraints(InputStream *ist, OutputStream *ost)
    {
    
        OutputFile *of = output_files[ost->file_index];
        int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
    
        if (ost->source_index != ist_index)
            return 0;
    
        if (of->start_time && ist->pts < of->start_time)
    
    static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
    {
    
        OutputFile *of = output_files[ost->file_index];
    
        int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
    
        av_init_packet(&opkt);
    
        if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
            !ost->copy_initial_nonkeyframes)
            return;
    
    
        if (!ost->frame_number && ist->pts < of->start_time &&
            !ost->copy_prior_start)
            return;
    
    
        if (of->recording_time != INT64_MAX &&
    
            ist->pts >= of->recording_time + of->start_time) {
    
            close_output_stream(ost);
    
        /* force the input stream PTS */
        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
            audio_size += pkt->size;
        else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_size += pkt->size;
            ost->sync_opts++;
    
        } else if (ost->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
            subtitle_size += pkt->size;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        if (pkt->pts != AV_NOPTS_VALUE)
            opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
        else
            opkt.pts = AV_NOPTS_VALUE;
    
        if (pkt->dts == AV_NOPTS_VALUE)
    
            opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
    
        else
            opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
        opkt.dts -= ost_tb_start_time;
    
        opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
        opkt.flags    = pkt->flags;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
    
        if (  ost->st->codec->codec_id != AV_CODEC_ID_H264
           && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
           && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
           && ost->st->codec->codec_id != AV_CODEC_ID_VC1
    
           ) {
            if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
                opkt.destruct = av_destruct_packet;
        } else {
            opkt.data = pkt->data;
            opkt.size = pkt->size;
    
        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
            /* store AVPicture in AVPacket, as expected by the output format */
            avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
            opkt.data = (uint8_t *)&pict;
            opkt.size = sizeof(AVPicture);
            opkt.flags |= AV_PKT_FLAG_KEY;
        }
    
        write_frame(of->ctx, &opkt, ost);
    
        ost->st->codec->frame_number++;
        av_free_packet(&opkt);
    
    static void rate_emu_sleep(InputStream *ist)
    
        if (input_files[ist->file_index]->rate_emu) {
    
            int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
    
            int64_t now = av_gettime() - ist->start;
            if (pts > now)
    
                av_usleep(pts - now);
    
    int guess_input_channel_layout(InputStream *ist)
    
        AVCodecContext *dec = ist->st->codec;
    
        if (!dec->channel_layout) {
            char layout_name[256];
    
            dec->channel_layout = av_get_default_channel_layout(dec->channels);
            if (!dec->channel_layout)
                return 0;
            av_get_channel_layout_string(layout_name, sizeof(layout_name),
                                         dec->channels, dec->channel_layout);
            av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for  Input Stream "
                   "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
    
    static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
    
        AVFrame *decoded_frame;
        AVCodecContext *avctx = ist->st->codec;
    
        int i, ret, resample_changed;
    
        AVRational decoded_frame_tb;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
    
        else
            avcodec_get_frame_defaults(ist->decoded_frame);
        decoded_frame = ist->decoded_frame;
    
        update_benchmark(NULL);
    
        ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
    
        update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
    
    
        if (ret >= 0 && avctx->sample_rate <= 0) {
    
            av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
    
        if (!*got_output || ret < 0) {
            if (!pkt->size) {
    
                for (i = 0; i < ist->nb_filters; i++)
    
                    av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
    
    #if 1
        /* increment next_dts to use for the case where the input stream does not
           have timestamps or there are multiple frames in the packet */
        ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                         avctx->sample_rate;
        ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                         avctx->sample_rate;
    #endif
    
        resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
                           ist->resample_channels       != avctx->channels               ||
                           ist->resample_channel_layout != decoded_frame->channel_layout ||
                           ist->resample_sample_rate    != decoded_frame->sample_rate;
        if (resample_changed) {
            char layout1[64], layout2[64];
    
            if (!guess_input_channel_layout(ist)) {
                av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
                       "layout for Input Stream #%d.%d\n", ist->file_index,
                       ist->st->index);
    
            }
            decoded_frame->channel_layout = avctx->channel_layout;
    
            av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
                                         ist->resample_channel_layout);
            av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
                                         decoded_frame->channel_layout);
    
            av_log(NULL, AV_LOG_INFO,
                   "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
                   ist->file_index, ist->st->index,
                   ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
                   ist->resample_channels, layout1,
                   decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
                   avctx->channels, layout2);
    
            ist->resample_sample_fmt     = decoded_frame->format;
            ist->resample_sample_rate    = decoded_frame->sample_rate;
            ist->resample_channel_layout = decoded_frame->channel_layout;
            ist->resample_channels       = avctx->channels;
    
            for (i = 0; i < nb_filtergraphs; i++)
    
                if (ist_in_filtergraph(filtergraphs[i], ist)) {
                    FilterGraph *fg = filtergraphs[i];
                    int j;
                    if (configure_filtergraph(fg) < 0) {
                        av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
    
                    }
                    for (j = 0; j < fg->nb_outputs; j++) {
                        OutputStream *ost = fg->outputs[j]->ost;
                        if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
                            !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
                            av_buffersink_set_frame_size(ost->filter->filter,
                                                         ost->st->codec->frame_size);
                    }
    
        /* if the decoder provides a pts, use it instead of the last packet pts.
           the decoder could be delaying output by a packet or more. */
        if (decoded_frame->pts != AV_NOPTS_VALUE) {
            ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
            decoded_frame_tb   = avctx->time_base;
        } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
            decoded_frame->pts = decoded_frame->pkt_pts;
            pkt->pts           = AV_NOPTS_VALUE;
            decoded_frame_tb   = ist->st->time_base;
        } else if (pkt->pts != AV_NOPTS_VALUE) {
            decoded_frame->pts = pkt->pts;
            pkt->pts           = AV_NOPTS_VALUE;
            decoded_frame_tb   = ist->st->time_base;
        }else {
            decoded_frame->pts = ist->dts;
            decoded_frame_tb   = AV_TIME_BASE_Q;
        }
    
        if (decoded_frame->pts != AV_NOPTS_VALUE)
            decoded_frame->pts = av_rescale_q(decoded_frame->pts,
    
                                              (AVRational){1, ist->st->codec->sample_rate});
    
        for (i = 0; i < ist->nb_filters; i++)
    
            av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame,
                                   AV_BUFFERSRC_FLAG_PUSH);
    
    
        decoded_frame->pts = AV_NOPTS_VALUE;
    
    static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
    
        AVFrame *decoded_frame;
    
        void *buffer_to_free = NULL;
    
        int i, ret = 0, resample_changed;
    
        int64_t best_effort_timestamp;
        AVRational *frame_sample_aspect;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
    
        else
            avcodec_get_frame_defaults(ist->decoded_frame);
        decoded_frame = ist->decoded_frame;
    
        pkt->dts  = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
    
        update_benchmark(NULL);
    
        ret = avcodec_decode_video2(ist->st->codec,
                                    decoded_frame, got_output, pkt);
    
        update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
    
        if (!*got_output || ret < 0) {
            if (!pkt->size) {
    
                for (i = 0; i < ist->nb_filters; i++)
    
                    av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
    
        quality = same_quant ? decoded_frame->quality : 0;
    
        if(ist->top_field_first>=0)
            decoded_frame->top_field_first = ist->top_field_first;
    
        best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
        if(best_effort_timestamp != AV_NOPTS_VALUE)
            ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
    
        if (debug_ts) {
            av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
                    "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d \n",
                    ist->st->index, av_ts2str(decoded_frame->pts),
                    av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
                    best_effort_timestamp,
                    av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
                    decoded_frame->key_frame, decoded_frame->pict_type);
    
        pkt->size = 0;
        pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
    
        if (ist->st->sample_aspect_ratio.num)
            decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
    
        resample_changed = ist->resample_width   != decoded_frame->width  ||
                           ist->resample_height  != decoded_frame->height ||
                           ist->resample_pix_fmt != decoded_frame->format;
        if (resample_changed) {
            av_log(NULL, AV_LOG_INFO,
                   "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
                   ist->file_index, ist->st->index,
                   ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
                   decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
    
            ist->resample_width   = decoded_frame->width;
            ist->resample_height  = decoded_frame->height;
            ist->resample_pix_fmt = decoded_frame->format;
    
            for (i = 0; i < nb_filtergraphs; i++)
                if (ist_in_filtergraph(filtergraphs[i], ist) &&
                    configure_filtergraph(filtergraphs[i]) < 0) {
    
                    av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
    
        frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
    
        for (i = 0; i < ist->nb_filters; i++) {
    
            int changed =      ist->st->codec->width   != ist->filters[i]->filter->outputs[0]->w
                            || ist->st->codec->height  != ist->filters[i]->filter->outputs[0]->h
                            || ist->st->codec->pix_fmt != ist->filters[i]->filter->outputs[0]->format;
    
            // XXX what an ugly hack
            if (ist->filters[i]->graph->nb_outputs == 1)
                ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
    
            if (!frame_sample_aspect->num)
                *frame_sample_aspect = ist->st->sample_aspect_ratio;
            if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {
    
                FrameBuffer      *buf = decoded_frame->opaque;
                AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
                                            decoded_frame->data, decoded_frame->linesize,
                                            AV_PERM_READ | AV_PERM_PRESERVE,
                                            ist->st->codec->width, ist->st->codec->height,
                                            ist->st->codec->pix_fmt);
    
                avfilter_copy_frame_props(fb, decoded_frame);
                fb->buf->priv           = buf;
                fb->buf->free           = filter_release_buffer;
    
    
                av_assert0(buf->refcount>0);
    
                av_buffersrc_add_ref(ist->filters[i]->filter, fb,
                                     AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
    
                                     AV_BUFFERSRC_FLAG_NO_COPY |
                                     AV_BUFFERSRC_FLAG_PUSH);
    
            if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) {
    
                av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
    
        av_free(buffer_to_free);
    
    static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
    
        AVSubtitle subtitle;
        int i, ret = avcodec_decode_subtitle2(ist->st->codec,
                                              &subtitle, got_output, pkt);
    
        if (ret < 0 || !*got_output) {
            if (!pkt->size)
                sub2video_flush(ist);
    
        if (ist->fix_sub_duration) {
            if (ist->prev_sub.got_output) {
    
                int end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
                                     1000, AV_TIME_BASE);
    
                if (end < ist->prev_sub.subtitle.end_display_time) {
                    av_log(ist->st->codec, AV_LOG_DEBUG,
                           "Subtitle duration reduced from %d to %d\n",
                           ist->prev_sub.subtitle.end_display_time, end);
                    ist->prev_sub.subtitle.end_display_time = end;
                }
            }
            FFSWAP(int,        *got_output, ist->prev_sub.got_output);
            FFSWAP(int,        ret,         ist->prev_sub.ret);
            FFSWAP(AVSubtitle, subtitle,    ist->prev_sub.subtitle);
        }
    
    
        sub2video_update(ist, &subtitle);
    
        if (!*got_output || !subtitle.num_rects)
            return ret;
    
    
        for (i = 0; i < nb_output_streams; i++) {
    
            OutputStream *ost = output_streams[i];
    
            if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
    
            do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
    
        avsubtitle_free(&subtitle);
    
    /* pkt = NULL means EOF (needed to flush decoder buffers) */
    
    static int output_packet(InputStream *ist, const AVPacket *pkt)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        if (!ist->saw_first_ts) {
            ist->dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
            ist->pts = 0;
            if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
                ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
                ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
    
        if (ist->next_dts == AV_NOPTS_VALUE)
    
            ist->next_dts = ist->dts;
        if (ist->next_pts == AV_NOPTS_VALUE)
            ist->next_pts = ist->pts;
    
        if (pkt == NULL) {
            /* EOF handling */
            av_init_packet(&avpkt);
            avpkt.data = NULL;
            avpkt.size = 0;
            goto handle_eof;
        } else {
            avpkt = *pkt;
        }
    
        if (pkt->dts != AV_NOPTS_VALUE) {
            ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
            if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
                ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
        }
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        // while we have more to decode or while the decoder did output something on EOF
    
        while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
    
            ist->pts = ist->next_pts;
            ist->dts = ist->next_dts;
    
            if (avpkt.size && avpkt.size != pkt->size) {
    
                av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
                       "Multiple frames in a packet from stream %d\n", pkt->stream_index);
    
                ist->showed_multi_packet_warning = 1;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            }
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            switch (ist->st->codec->codec_type) {
    
                ret = decode_audio    (ist, &avpkt, &got_output);
    
                break;
            case AVMEDIA_TYPE_VIDEO:
    
                ret = decode_video    (ist, &avpkt, &got_output);
    
                if (avpkt.duration) {
                    duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
                } else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {
                    int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
                    duration = ((int64_t)AV_TIME_BASE *
                                    ist->st->codec->time_base.num * ticks) /
                                    ist->st->codec->time_base.den;
                } else
                    duration = 0;
    
                if(ist->dts != AV_NOPTS_VALUE && duration) {
                    ist->next_dts += duration;
                }else
                    ist->next_dts = AV_NOPTS_VALUE;
    
                if (got_output)
                    ist->next_pts += duration; //FIXME the duration is not correct in some cases
    
                break;
            case AVMEDIA_TYPE_SUBTITLE:
                ret = transcode_subtitles(ist, &avpkt, &got_output);
                break;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            default:
                return -1;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
            avpkt.dts=
            avpkt.pts= AV_NOPTS_VALUE;
    
            // touch data and size only if not EOF
            if (pkt) {
    
                if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
                    ret = avpkt.size;
    
                avpkt.data += ret;
                avpkt.size -= ret;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            }
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            }
        }
    
    
        if (!ist->decoding_needed) {
            rate_emu_sleep(ist);
    
            ist->dts = ist->next_dts;
    
            switch (ist->st->codec->codec_type) {
            case AVMEDIA_TYPE_AUDIO:
    
                ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
    
                                 ist->st->codec->sample_rate;
                break;
            case AVMEDIA_TYPE_VIDEO:
    
                if (pkt->duration) {
                    ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
                } else if(ist->st->codec->time_base.num != 0) {
                    int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
    
                    ist->next_dts += ((int64_t)AV_TIME_BASE *
    
                                      ist->st->codec->time_base.num * ticks) /
                                      ist->st->codec->time_base.den;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                }
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            }
    
            ist->pts = ist->dts;
            ist->next_pts = ist->next_dts;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
        for (i = 0; pkt && i < nb_output_streams; i++) {
    
            OutputStream *ost = output_streams[i];
    
            if (!check_output_constraints(ist, ost) || ost->encoding_needed)
                continue;
    
    static void print_sdp(void)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
        AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        for (i = 0; i < nb_output_files; i++)
    
        av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
    
        printf("SDP:\n%s\n", sdp);
        fflush(stdout);
    
    static int init_input_stream(int ist_index, char *error, int error_len)
    
        InputStream *ist = input_streams[ist_index];
    
        if (ist->decoding_needed) {
            AVCodec *codec = ist->dec;
            if (!codec) {
    
                snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
                        avcodec_get_name(ist->st->codec->codec_id), ist->file_index, ist->st->index);
    
                return AVERROR(EINVAL);
    
            ist->dr1 = (codec->capabilities & CODEC_CAP_DR1) && !do_deinterlace;
            if (codec->type == AVMEDIA_TYPE_VIDEO && ist->dr1) {
    
                ist->st->codec->get_buffer     = codec_get_buffer;
                ist->st->codec->release_buffer = codec_release_buffer;
    
                ist->st->codec->opaque         = &ist->buffer_pool;
    
            if (!av_dict_get(ist->opts, "threads", NULL, 0))
                av_dict_set(&ist->opts, "threads", "auto", 0);
    
            if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
    
                snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
    
                        ist->file_index, ist->st->index);
                return AVERROR(EINVAL);
    
            assert_codec_experimental(ist->st->codec, 0);
            assert_avoptions(ist->opts);
    
        ist->next_pts = AV_NOPTS_VALUE;
    
        ist->next_dts = AV_NOPTS_VALUE;
    
        ist->is_start = 1;
    
        return 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    }
    
    
    static InputStream *get_input_stream(OutputStream *ost)
    
        if (ost->source_index >= 0)
            return input_streams[ost->source_index];
        return NULL;
    
    static void parse_forced_key_frames(char *kf, OutputStream *ost,
                                        AVCodecContext *avctx)
    
        char *p;
        int n = 1, i;
        int64_t t;
    
        for (p = kf; *p; p++)
            if (*p == ',')
                n++;
        ost->forced_kf_count = n;
        ost->forced_kf_pts   = av_malloc(sizeof(*ost->forced_kf_pts) * n);
        if (!ost->forced_kf_pts) {
            av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
    
        for (i = 0; i < n; i++) {
    
            char *next = strchr(p, ',');
    
            t = parse_time_or_die("force_key_frames", p, 1);
            ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
    
    static void report_new_stream(int input_index, AVPacket *pkt)
    
        InputFile *file = input_files[input_index];
        AVStream *st = file->ctx->streams[pkt->stream_index];
    
        if (pkt->stream_index < file->nb_streams_warn)
            return;
        av_log(file->ctx, AV_LOG_WARNING,
               "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
               av_get_media_type_string(st->codec->codec_type),
               input_index, pkt->stream_index,
               pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
        file->nb_streams_warn = pkt->stream_index + 1;
    
    static int transcode_init(void)
    
        AVFormatContext *oc;
    
        AVCodecContext *codec;
    
        InputStream *ist;
        char error[1024];
        int want_sdp = 1;
    
        /* init framerate emulation */
        for (i = 0; i < nb_input_files; i++) {
    
            InputFile *ifile = input_files[i];
    
            if (ifile->rate_emu)
                for (j = 0; j < ifile->nb_streams; j++)
    
                    input_streams[j + ifile->ist_index]->start = av_gettime();
    
        /* output stream init */
    
        for (i = 0; i < nb_output_files; i++) {
    
            if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
                av_dump_format(oc, i, oc->filename, 1);
    
                av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
    
        /* init complex filtergraphs */
        for (i = 0; i < nb_filtergraphs; i++)
            if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
                return ret;
    
        /* for each output stream, we compute the right encoding parameters */
    
        for (i = 0; i < nb_output_streams; i++) {
    
            AVCodecContext *icodec = NULL;
    
            ost = output_streams[i];
            oc  = output_files[ost->file_index]->ctx;
    
            ist = get_input_stream(ost);
    
            if (ost->attachment_filename)