Skip to content
Snippets Groups Projects
ffmpeg.c 218 KiB
Newer Older
  • Learn to ignore specific revisions
  •             big_picture.pict_type = AV_PICTURE_TYPE_I;
                ost->forced_kf_index++;
            }
            update_benchmark(NULL);
            ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
            update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
            if (ret < 0) {
                av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
                exit_program(1);
            }
    
            if (got_packet) {
                if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
                    pkt.pts = ost->sync_opts;
    
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
                if (pkt.dts != AV_NOPTS_VALUE)
                    pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
    
                if (debug_ts) {
                    av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
                        "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
                        av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
                        av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
    
                write_frame(s, &pkt, ost);
                frame_size = pkt.size;
                video_size += pkt.size;
                av_free_packet(&pkt);
    
                /* if two pass, output log */
                if (ost->logfile && enc->stats_out) {
                    fprintf(ost->logfile, "%s", enc->stats_out);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            }
        }
    
        ost->sync_opts++;
        /*
         * For video, number of frames in == number of packets out.
         * But there may be reordering, so we can't throw away frames on encoder
         * flush, we need to limit them here, before they go into encoder.
         */
        ost->frame_number++;
    
        if(--nb_frames)
            goto duplicate_frame;
    
    
        if (vstats_filename && frame_size)
    
            do_video_stats(output_files[ost->file_index]->ctx, ost, frame_size);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    }
    
    
    static double psnr(double d)
    {
        return -10.0 * log(d) / log(10.0);
    }
    
    static void do_video_stats(AVFormatContext *os, OutputStream *ost,
                               int frame_size)
    {
        AVCodecContext *enc;
        int frame_number;
        double ti1, bitrate, avg_bitrate;
    
        /* this is executed just the first time do_video_stats is called */
        if (!vstats_file) {
            vstats_file = fopen(vstats_filename, "w");
            if (!vstats_file) {
                perror("fopen");
                exit_program(1);
            }
        }
    
        enc = ost->st->codec;
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
            frame_number = ost->frame_number;
            fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
            if (enc->flags&CODEC_FLAG_PSNR)
                fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
    
            fprintf(vstats_file,"f_size= %6d ", frame_size);
            /* compute pts value */
            ti1 = ost->sync_opts * av_q2d(enc->time_base);
            if (ti1 < 0.01)
                ti1 = 0.01;
    
            bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
            avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
            fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
                   (double)video_size / 1024, ti1, bitrate, avg_bitrate);
            fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
        }
    }
    
    
    /* check for new output on any of the filtergraphs */
    static int poll_filters(void)
    {
        AVFilterBufferRef *picref;
        AVFrame *filtered_frame = NULL;
    
        int i, ret, ret_all;
        unsigned nb_success, nb_eof;
    
        while (1) {
            /* Reap all buffers present in the buffer sinks */
    
    Clément Bœsch's avatar
    Clément Bœsch committed
            for (i = 0; i < nb_output_streams; i++) {
                OutputStream *ost = output_streams[i];
                OutputFile    *of = output_files[ost->file_index];
    
    Clément Bœsch's avatar
    Clément Bœsch committed
    
                if (!ost->filter || ost->is_past_recording_time)
                    continue;
    
                if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
                    return AVERROR(ENOMEM);
                } else
                    avcodec_get_frame_defaults(ost->filtered_frame);
                filtered_frame = ost->filtered_frame;
    
                while (1) {
                    AVRational ist_pts_tb = ost->filter->filter->inputs[0]->time_base;
    
                    if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
                        !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
                        ret = av_buffersink_read_samples(ost->filter->filter, &picref,
                                                        ost->st->codec->frame_size);
                    else
    #ifdef SINKA
                        ret = av_buffersink_read(ost->filter->filter, &picref);
    #else
                        ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref,
                                                           AV_BUFFERSINK_FLAG_NO_REQUEST);
    #endif
    
    Clément Bœsch's avatar
    Clément Bœsch committed
                    if (ret < 0) {
    
                        if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
    
    Clément Bœsch's avatar
    Clément Bœsch committed
                            char buf[256];
                            av_strerror(ret, buf, sizeof(buf));
                            av_log(NULL, AV_LOG_WARNING,
                                   "Error in av_buffersink_get_buffer_ref(): %s\n", buf);
                        }
                        break;
                    }
    
                    frame_pts = AV_NOPTS_VALUE;
    
                        filtered_frame->pts = frame_pts = av_rescale_q(picref->pts,
                                                        ost->filter->filter->inputs[0]->time_base,
                                                        ost->st->codec->time_base) -
                                            av_rescale_q(of->start_time,
                                                        AV_TIME_BASE_Q,
                                                        ost->st->codec->time_base);
    
    
                        if (of->start_time && filtered_frame->pts < 0) {
                            avfilter_unref_buffer(picref);
                            continue;
                        }
                    }
    
    Clément Bœsch's avatar
    Clément Bœsch committed
                    //if (ost->source_index >= 0)
                    //    *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
    
    
                    switch (ost->filter->filter->inputs[0]->type) {
                    case AVMEDIA_TYPE_VIDEO:
                        avfilter_fill_frame_from_video_buffer_ref(filtered_frame, picref);
    
    Clément Bœsch's avatar
    Clément Bœsch committed
                        if (!ost->frame_aspect_ratio)
                            ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;
    
                        do_video_out(of->ctx, ost, filtered_frame,
                                     same_quant ? ost->last_quality :
                                                  ost->st->codec->global_quality);
                        break;
    
                    case AVMEDIA_TYPE_AUDIO:
                        avfilter_copy_buf_props(filtered_frame, picref);
                        filtered_frame->pts = frame_pts;
                        do_audio_out(of->ctx, ost, filtered_frame);
                        break;
    
    Clément Bœsch's avatar
    Clément Bœsch committed
                    default:
    
                        // TODO support subtitle filters
    
    Clément Bœsch's avatar
    Clément Bœsch committed
                        av_assert0(0);
    
    Clément Bœsch's avatar
    Clément Bœsch committed
                    avfilter_unref_buffer(picref);
                }
    
            /* Request frames through all the graphs */
            ret_all = nb_success = nb_eof = 0;
            for (i = 0; i < nb_filtergraphs; i++) {
                ret = avfilter_graph_request_oldest(filtergraphs[i]->graph);
                if (!ret) {
                    nb_success++;
                } else if (ret == AVERROR_EOF) {
                    nb_eof++;
                } else if (ret != AVERROR(EAGAIN)) {
                    char buf[256];
                    av_strerror(ret, buf, sizeof(buf));
                    av_log(NULL, AV_LOG_WARNING,
                           "Error in request_frame(): %s\n", buf);
                    ret_all = ret;
                }
            }
            if (!nb_success)
                break;
            /* Try again if anything succeeded */
        }
        return nb_eof == nb_filtergraphs ? AVERROR_EOF : ret_all;
    
    static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
    
        OutputStream *ost;
    
        int64_t total_size;
    
        static int64_t last_time = -1;
    
        static int qp_histogram[52];
    
        int hours, mins, secs, us;
    
        if (!print_stats && !is_last_report)
            return;
    
        if (!is_last_report) {
            if (last_time == -1) {
                last_time = cur_time;
                return;
    
            if ((cur_time - last_time) < 500000)
                return;
            last_time = cur_time;
        }
    
    
        total_size = avio_size(oc->pb);
    
        if (total_size < 0) { // FIXME improve avio_size() so it works with non seekable output too
    
            total_size = avio_tell(oc->pb);
    
        for (i = 0; i < nb_output_streams; i++) {
    
            if (!ost->stream_copy && enc->coded_frame)
    
                q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
    
            if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
    
            if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
    
                float fps, t = (cur_time-timer_start) / 1000000.0;
    
                fps = t > 1 ? frame_number / t : 0;
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
                         frame_number, fps < 9.95, fps, q);
    
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
    
                    int qp = lrintf(q);
    
                    if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
    
                        qp_histogram[qp]++;
    
                    for (j = 0; j < 32; j++)
                        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
    
                if (enc->flags&CODEC_FLAG_PSNR) {
    
                    double error, error_sum = 0;
                    double scale, scale_sum = 0;
                    char type[3] = { 'Y','U','V' };
    
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
    
                    for (j = 0; j < 3; j++) {
                        if (is_last_report) {
                            error = enc->error[j];
                            scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
                        } else {
                            error = enc->coded_frame->error[j];
                            scale = enc->width * enc->height * 255.0 * 255.0;
    
                        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
    
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
    
            pts = FFMIN(pts, av_rescale_q(ost->st->pts.val,
                                          ost->st->time_base, AV_TIME_BASE_Q));
    
        secs = pts / AV_TIME_BASE;
        us = pts % AV_TIME_BASE;
        mins = secs / 60;
        secs %= 60;
        hours = mins / 60;
        mins %= 60;
    
        bitrate = pts ? total_size * 8 / (pts / 1000.0) : 0;
    
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                 "size=%8.0fkB time=", total_size / 1024.0);
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                 "%02d:%02d:%02d.%02d ", hours, mins, secs,
                 (100 * us) / AV_TIME_BASE);
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                 "bitrate=%6.1fkbits/s", bitrate);
    
        if (nb_frames_dup || nb_frames_drop)
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
                    nb_frames_dup, nb_frames_drop);
    
        av_log(NULL, AV_LOG_INFO, "%s    \r", buf);
    
        if (is_last_report) {
    
            int64_t raw= audio_size + video_size + extra_size;
    
            av_log(NULL, AV_LOG_INFO, "\n");
            av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
    
                   video_size / 1024.0,
                   audio_size / 1024.0,
                   extra_size / 1024.0,
                   100.0 * (total_size - raw) / raw
    
            if(video_size + audio_size + extra_size == 0){
                av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
            }
    
    static void flush_encoders(void)
    
        for (i = 0; i < nb_output_streams; i++) {
    
            OutputStream   *ost = output_streams[i];
    
            AVCodecContext *enc = ost->st->codec;
    
            AVFormatContext *os = output_files[ost->file_index]->ctx;
    
            int stop_encoding = 0;
    
            if (!ost->encoding_needed)
    
            if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
    
            if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
    
                int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
                const char *desc;
                int64_t *size;
    
    
                switch (ost->st->codec->codec_type) {
                case AVMEDIA_TYPE_AUDIO:
    
                    encode = avcodec_encode_audio2;
                    desc   = "Audio";
                    size   = &audio_size;
    
                    break;
                case AVMEDIA_TYPE_VIDEO:
    
                    encode = avcodec_encode_video2;
                    desc   = "Video";
                    size   = &video_size;
                    break;
                default:
                    stop_encoding = 1;
                }
    
                if (encode) {
                    AVPacket pkt;
                    int got_packet;
                    av_init_packet(&pkt);
                    pkt.data = NULL;
                    pkt.size = 0;
    
    
                    update_benchmark(NULL);
    
                    ret = encode(enc, &pkt, NULL, &got_packet);
                    update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
    
                        av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
    
                    *size += pkt.size;
    
                    if (ost->logfile && enc->stats_out) {
                        fprintf(ost->logfile, "%s", enc->stats_out);
                    }
    
                    if (!got_packet) {
    
                        stop_encoding = 1;
                        break;
    
                    if (pkt.pts != AV_NOPTS_VALUE)
                        pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
                    if (pkt.dts != AV_NOPTS_VALUE)
                        pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
    
                    write_frame(os, &pkt, ost);
    
    /*
     * Check whether a packet from ist should be written into ost at this time
     */
    static int check_output_constraints(InputStream *ist, OutputStream *ost)
    {
    
        OutputFile *of = output_files[ost->file_index];
        int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
    
    
        if (ost->source_index != ist_index)
            return 0;
    
        if (of->start_time && ist->pts < of->start_time)
            return 0;
    
        if (of->recording_time != INT64_MAX &&
            av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time,
    
                          (AVRational){ 1, 1000000 }) >= 0) {
    
            ost->is_past_recording_time = 1;
            return 0;
        }
    
        return 1;
    }
    
    static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
    {
    
        OutputFile *of = output_files[ost->file_index];
    
        int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
        AVPicture pict;
        AVPacket opkt;
    
        av_init_packet(&opkt);
    
        if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
            !ost->copy_initial_nonkeyframes)
            return;
    
        /* force the input stream PTS */
        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
            audio_size += pkt->size;
        else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_size += pkt->size;
            ost->sync_opts++;
        }
    
        if (pkt->pts != AV_NOPTS_VALUE)
            opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
        else
            opkt.pts = AV_NOPTS_VALUE;
    
        if (pkt->dts == AV_NOPTS_VALUE)
    
            opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
    
        else
            opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
        opkt.dts -= ost_tb_start_time;
    
        opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
        opkt.flags    = pkt->flags;
    
    
        // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
        if (  ost->st->codec->codec_id != CODEC_ID_H264
    
           && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
           && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
    
           && ost->st->codec->codec_id != CODEC_ID_VC1
    
           ) {
            if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
                opkt.destruct = av_destruct_packet;
        } else {
            opkt.data = pkt->data;
            opkt.size = pkt->size;
        }
        if (of->ctx->oformat->flags & AVFMT_RAWPICTURE) {
            /* store AVPicture in AVPacket, as expected by the output format */
            avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
            opkt.data = (uint8_t *)&pict;
            opkt.size = sizeof(AVPicture);
            opkt.flags |= AV_PKT_FLAG_KEY;
        }
    
    
        write_frame(of->ctx, &opkt, ost);
    
        ost->st->codec->frame_number++;
        av_free_packet(&opkt);
    }
    
    static void rate_emu_sleep(InputStream *ist)
    {
    
        if (input_files[ist->file_index]->rate_emu) {
    
            int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
    
            int64_t now = av_gettime() - ist->start;
            if (pts > now)
                usleep(pts - now);
        }
    }
    
    
    static int guess_input_channel_layout(InputStream *ist)
    {
        AVCodecContext *dec = ist->st->codec;
    
        if (!dec->channel_layout) {
            char layout_name[256];
    
            dec->channel_layout = av_get_default_channel_layout(dec->channels);
            if (!dec->channel_layout)
                return 0;
            av_get_channel_layout_string(layout_name, sizeof(layout_name),
                                         dec->channels, dec->channel_layout);
            av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for  Input Stream "
                   "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
        }
        return 1;
    }
    
    
    static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
    
        AVFrame *decoded_frame;
        AVCodecContext *avctx = ist->st->codec;
    
        int i, ret, resample_changed;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
            return AVERROR(ENOMEM);
        else
            avcodec_get_frame_defaults(ist->decoded_frame);
        decoded_frame = ist->decoded_frame;
    
        update_benchmark(NULL);
    
        ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
    
        update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
    
        if (avctx->sample_rate <= 0) {
            av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
            return AVERROR_INVALIDDATA;
        }
    
    
        if (!*got_output) {
            /* no audio frame */
    
            if (!pkt->size)
                for (i = 0; i < ist->nb_filters; i++)
    
                    av_buffersrc_add_ref(ist->filters[i]->filter, NULL,
                                         AV_BUFFERSRC_FLAG_NO_COPY);
    
        /* if the decoder provides a pts, use it instead of the last packet pts.
           the decoder could be delaying output by a packet or more. */
        if (decoded_frame->pts != AV_NOPTS_VALUE)
    
            ist->dts = ist->next_dts = ist->pts = ist->next_pts = decoded_frame->pts;
    
        else if (pkt->pts != AV_NOPTS_VALUE) {
            decoded_frame->pts = pkt->pts;
            pkt->pts           = AV_NOPTS_VALUE;
        }else
            decoded_frame->pts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
    
    
        /* increment next_dts to use for the case where the input stream does not
    
           have timestamps or there are multiple frames in the packet */
        ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                         avctx->sample_rate;
    
        ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                         avctx->sample_rate;
    
        resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
                           ist->resample_channels       != avctx->channels               ||
                           ist->resample_channel_layout != decoded_frame->channel_layout ||
                           ist->resample_sample_rate    != decoded_frame->sample_rate;
        if (resample_changed) {
            char layout1[64], layout2[64];
    
            if (!guess_input_channel_layout(ist)) {
                av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
                       "layout for Input Stream #%d.%d\n", ist->file_index,
                       ist->st->index);
                exit_program(1);
            }
            decoded_frame->channel_layout = avctx->channel_layout;
    
            av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
                                         ist->resample_channel_layout);
            av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
                                         decoded_frame->channel_layout);
    
            av_log(NULL, AV_LOG_INFO,
                   "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
                   ist->file_index, ist->st->index,
                   ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
                   ist->resample_channels, layout1,
                   decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
                   avctx->channels, layout2);
    
            ist->resample_sample_fmt     = decoded_frame->format;
            ist->resample_sample_rate    = decoded_frame->sample_rate;
            ist->resample_channel_layout = decoded_frame->channel_layout;
            ist->resample_channels       = avctx->channels;
    
            for (i = 0; i < nb_filtergraphs; i++)
                if (ist_in_filtergraph(filtergraphs[i], ist) &&
                    configure_filtergraph(filtergraphs[i]) < 0) {
                    av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
                    exit_program(1);
                }
        }
    
    
        for (i = 0; i < ist->nb_filters; i++)
    
            av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, 0);
    
    static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
    
        void *buffer_to_free = NULL;
    
        int i, ret = 0, resample_changed;
    
        int64_t best_effort_timestamp;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
    
            return AVERROR(ENOMEM);
    
        else
            avcodec_get_frame_defaults(ist->decoded_frame);
        decoded_frame = ist->decoded_frame;
    
        pkt->dts  = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
    
        update_benchmark(NULL);
    
        ret = avcodec_decode_video2(ist->st->codec,
                                    decoded_frame, got_output, pkt);
    
        update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
    
        quality = same_quant ? decoded_frame->quality : 0;
    
        if (!*got_output) {
            /* no picture yet */
    
            if (!pkt->size)
                for (i = 0; i < ist->nb_filters; i++)
    
                    av_buffersrc_add_ref(ist->filters[i]->filter, NULL, AV_BUFFERSRC_FLAG_NO_COPY);
    
        best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
        if(best_effort_timestamp != AV_NOPTS_VALUE)
    
            ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
    
        pkt->size = 0;
        pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
    
        rate_emu_sleep(ist);
    
        if (ist->st->sample_aspect_ratio.num)
            decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
    
    
        resample_changed = ist->resample_width   != decoded_frame->width  ||
                           ist->resample_height  != decoded_frame->height ||
                           ist->resample_pix_fmt != decoded_frame->format;
        if (resample_changed) {
            av_log(NULL, AV_LOG_INFO,
                   "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
                   ist->file_index, ist->st->index,
                   ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
                   decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
    
            ist->resample_width   = decoded_frame->width;
            ist->resample_height  = decoded_frame->height;
            ist->resample_pix_fmt = decoded_frame->format;
    
    
            for (i = 0; i < nb_filtergraphs; i++)
                if (ist_in_filtergraph(filtergraphs[i], ist) &&
                    configure_filtergraph(filtergraphs[i]) < 0) {
    
                    av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
                    exit_program(1);
    
        frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
    
        for (i = 0; i < ist->nb_filters; i++) {
    
            int changed =      ist->st->codec->width   != ist->filters[i]->filter->outputs[0]->w
                            || ist->st->codec->height  != ist->filters[i]->filter->outputs[0]->h
                            || ist->st->codec->pix_fmt != ist->filters[i]->filter->outputs[0]->format;
    
            // XXX what an ugly hack
            if (ist->filters[i]->graph->nb_outputs == 1)
                ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
    
    
            if (!frame_sample_aspect->num)
                *frame_sample_aspect = ist->st->sample_aspect_ratio;
            if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {
    
                FrameBuffer      *buf = decoded_frame->opaque;
                AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
                                            decoded_frame->data, decoded_frame->linesize,
                                            AV_PERM_READ | AV_PERM_PRESERVE,
                                            ist->st->codec->width, ist->st->codec->height,
                                            ist->st->codec->pix_fmt);
    
                avfilter_copy_frame_props(fb, decoded_frame);
                fb->buf->priv           = buf;
                fb->buf->free           = filter_release_buffer;
    
    
                av_assert0(buf->refcount>0);
    
                av_buffersrc_add_ref(ist->filters[i]->filter, fb,
                                     AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
                                     AV_BUFFERSRC_FLAG_NO_COPY);
    
            if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, 0)<0) {
    
                av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
                exit_program(1);
    
        av_free(buffer_to_free);
        return ret;
    
    static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
    
        AVSubtitle subtitle;
        int i, ret = avcodec_decode_subtitle2(ist->st->codec,
                                              &subtitle, got_output, pkt);
        if (ret < 0)
            return ret;
        if (!*got_output)
    
    
        rate_emu_sleep(ist);
    
        for (i = 0; i < nb_output_streams; i++) {
    
            OutputStream *ost = output_streams[i];
    
    
            if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
                continue;
    
    
            do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
    
    
    /* pkt = NULL means EOF (needed to flush decoder buffers) */
    
    static int output_packet(InputStream *ist, const AVPacket *pkt)
    
        AVPacket avpkt;
    
        if (!ist->saw_first_ts) {
            ist->dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
            ist->pts = 0;
            if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
                ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
                ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
            }
            ist->saw_first_ts = 1;
        }
    
        if (ist->next_dts == AV_NOPTS_VALUE)
            ist->next_dts = ist->dts;
    
        if (ist->next_pts == AV_NOPTS_VALUE)
            ist->next_pts = ist->pts;
    
            av_init_packet(&avpkt);
    
            avpkt.data = NULL;
            avpkt.size = 0;
    
        if (pkt->dts != AV_NOPTS_VALUE) {
    
            ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
    
            if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
    
                ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
        }
    
        // while we have more to decode or while the decoder did output something on EOF
    
        while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
    
            ist->pts = ist->next_pts;
    
            ist->dts = ist->next_dts;
    
    
            if (avpkt.size && avpkt.size != pkt->size) {
    
                av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
                       "Multiple frames in a packet from stream %d\n", pkt->stream_index);
    
                ist->showed_multi_packet_warning = 1;
            }
    
            switch (ist->st->codec->codec_type) {
    
            case AVMEDIA_TYPE_AUDIO:
    
                ret = decode_audio    (ist, &avpkt, &got_output);
    
                break;
            case AVMEDIA_TYPE_VIDEO:
    
                ret = decode_video    (ist, &avpkt, &got_output);
    
                if (avpkt.duration) {
                    duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
    
                } else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {
    
                    int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
                    duration = ((int64_t)AV_TIME_BASE *
                                    ist->st->codec->time_base.num * ticks) /
                                    ist->st->codec->time_base.den;
                } else
                    duration = 0;
    
                if(ist->dts != AV_NOPTS_VALUE && duration) {
                    ist->next_dts += duration;
                }else
                    ist->next_dts = AV_NOPTS_VALUE;
    
                if (got_output)
                    ist->next_pts += duration; //FIXME the duration is not correct in some cases
    
                break;
            case AVMEDIA_TYPE_SUBTITLE:
                ret = transcode_subtitles(ist, &avpkt, &got_output);
    
            // touch data and size only if not EOF
            if (pkt) {
                if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
                    ret = avpkt.size;
                avpkt.data += ret;
                avpkt.size -= ret;
            }
    
        /* handle stream copy */
        if (!ist->decoding_needed) {
            rate_emu_sleep(ist);
    
            switch (ist->st->codec->codec_type) {
            case AVMEDIA_TYPE_AUDIO:
    
                ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
    
                                 ist->st->codec->sample_rate;
                break;
            case AVMEDIA_TYPE_VIDEO:
    
                    ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
    
                } else if(ist->st->codec->time_base.num != 0) {
    
                    int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
    
                    ist->next_dts += ((int64_t)AV_TIME_BASE *
    
                                      ist->st->codec->time_base.num * ticks) /
                                      ist->st->codec->time_base.den;
    
            ist->pts = ist->dts;
            ist->next_pts = ist->next_dts;
    
        for (i = 0; pkt && i < nb_output_streams; i++) {
    
            OutputStream *ost = output_streams[i];
    
            if (!check_output_constraints(ist, ost) || ost->encoding_needed)
                continue;
    
            do_streamcopy(ist, ost, pkt);
    
    static void print_sdp(void)
    
        AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
    
        for (i = 0; i < nb_output_files; i++)
    
        av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
    
    Luca Barbato's avatar
    Luca Barbato committed
        fflush(stdout);
    
    static int init_input_stream(int ist_index, char *error, int error_len)
    
        InputStream *ist = input_streams[ist_index];
    
        if (ist->decoding_needed) {
            AVCodec *codec = ist->dec;
            if (!codec) {
    
                snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
    
                        avcodec_get_name(ist->st->codec->codec_id), ist->file_index, ist->st->index);
                return AVERROR(EINVAL);
            }
    
            ist->dr1 = (codec->capabilities & CODEC_CAP_DR1) && !do_deinterlace;
    
            if (codec->type == AVMEDIA_TYPE_VIDEO && ist->dr1) {
    
                ist->st->codec->get_buffer     = codec_get_buffer;
                ist->st->codec->release_buffer = codec_release_buffer;
                ist->st->codec->opaque         = ist;
            }
    
    
            if (!av_dict_get(ist->opts, "threads", NULL, 0))
                av_dict_set(&ist->opts, "threads", "auto", 0);
    
            if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
    
                snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
    
                        ist->file_index, ist->st->index);
                return AVERROR(EINVAL);
            }
            assert_codec_experimental(ist->st->codec, 0);
            assert_avoptions(ist->opts);
        }
    
        ist->next_pts = AV_NOPTS_VALUE;
    
        ist->next_dts = AV_NOPTS_VALUE;
    
    static InputStream *get_input_stream(OutputStream *ost)
    {
        if (ost->source_index >= 0)
            return input_streams[ost->source_index];
    
        if (ost->filter) {
            FilterGraph *fg = ost->filter->graph;
            int i;
    
            for (i = 0; i < fg->nb_inputs; i++)
                if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
                    return fg->inputs[i]->ist;
        }
    
        return NULL;
    }
    
    
    static int transcode_init(void)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
        int ret = 0, i, j, k;
    
        AVFormatContext *oc;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        AVCodecContext *codec, *icodec;
    
        InputStream *ist;
    
        /* init framerate emulation */
        for (i = 0; i < nb_input_files; i++) {
    
            InputFile *ifile = input_files[i];
    
            if (ifile->rate_emu)
                for (j = 0; j < ifile->nb_streams; j++)
    
                    input_streams[j + ifile->ist_index]->start = av_gettime();
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
        /* output stream init */
    
        for (i = 0; i < nb_output_files; i++) {
    
            if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
                av_dump_format(oc, i, oc->filename, 1);
    
                av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        /* init complex filtergraphs */
        for (i = 0; i < nb_filtergraphs; i++)
            if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
                return ret;
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        /* for each output stream, we compute the right encoding parameters */
    
        for (i = 0; i < nb_output_streams; i++) {