Skip to content
Snippets Groups Projects
ffmpeg.c 133 KiB
Newer Older
  • Learn to ignore specific revisions
  •         update_benchmark(NULL);
    
            if (debug_ts) {
                av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
                       "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
                       av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
                       enc->time_base.num, enc->time_base.den);
            }
    
    
            ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
    
            update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            if (ret < 0) {
                av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
    
                exit_program(1);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            }
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            if (got_packet) {
    
                if (debug_ts) {
                    av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
                           "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
                           av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
                           av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
                }
    
    
                if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
                    pkt.pts = ost->sync_opts;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
                if (pkt.dts != AV_NOPTS_VALUE)
                    pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
    
                if (debug_ts) {
                    av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
                        "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
                        av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
                        av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
                }
    
                frame_size = pkt.size;
    
                write_frame(s, &pkt, ost);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
                /* if two pass, output log */
                if (ost->logfile && enc->stats_out) {
                    fprintf(ost->logfile, "%s", enc->stats_out);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        ost->sync_opts++;
        /*
         * For video, number of frames in == number of packets out.
         * But there may be reordering, so we can't throw away frames on encoder
         * flush, we need to limit them here, before they go into encoder.
         */
        ost->frame_number++;
    
        if (vstats_filename && frame_size)
    
            do_video_stats(ost, frame_size);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    }
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
    static double psnr(double d)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        return -10.0 * log(d) / log(10.0);
    
    static void do_video_stats(OutputStream *ost, int frame_size)
    
        AVCodecContext *enc;
        int frame_number;
        double ti1, bitrate, avg_bitrate;
    
        /* this is executed just the first time do_video_stats is called */
        if (!vstats_file) {
            vstats_file = fopen(vstats_filename, "w");
            if (!vstats_file) {
                perror("fopen");
    
                exit_program(1);
    
        enc = ost->st->codec;
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
    
            frame_number = ost->st->nb_frames;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
    
            if (enc->flags&CODEC_FLAG_PSNR)
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
    
            fprintf(vstats_file,"f_size= %6d ", frame_size);
            /* compute pts value */
    
            ti1 = ost->st->pts.val * av_q2d(enc->time_base);
    
            if (ti1 < 0.01)
                ti1 = 0.01;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
    
            avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
    
            fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
    
                   (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
    
            fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
    
     * Get and encode new output from any of the filtergraphs, without causing
     * activity.
     *
     * @return  0 for success, <0 for severe errors
     */
    static int reap_filters(void)
    
    {
        AVFrame *filtered_frame = NULL;
    
        /* Reap all buffers present in the buffer sinks */
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost = output_streams[i];
            OutputFile    *of = output_files[ost->file_index];
    
            AVFilterContext *filter;
            AVCodecContext *enc = ost->st->codec;
    
            int ret = 0;
    
            if (!ost->filter)
                continue;
    
            if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
    
                return AVERROR(ENOMEM);
    
            filtered_frame = ost->filtered_frame;
    
            while (1) {
    
                ret = av_buffersink_get_frame_flags(filter, filtered_frame,
    
                                                   AV_BUFFERSINK_FLAG_NO_REQUEST);
                if (ret < 0) {
                    if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
                        av_log(NULL, AV_LOG_WARNING,
    
                               "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
    
                if (ost->finished) {
                    av_frame_unref(filtered_frame);
                    continue;
                }
    
                frame_pts = AV_NOPTS_VALUE;
    
                if (filtered_frame->pts != AV_NOPTS_VALUE) {
    
                    int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
    
                    filtered_frame->pts = frame_pts =
                        av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
                        av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
    
                //if (ost->source_index >= 0)
                //    *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
    
    
                case AVMEDIA_TYPE_VIDEO:
                    filtered_frame->pts = frame_pts;
    
                    if (!ost->frame_aspect_ratio.num)
    
                        enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
    
                    if (debug_ts) {
                        av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s time_base:%d/%d\n",
                                av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
                                enc->time_base.num, enc->time_base.den);
                    }
    
    
                    do_video_out(of->ctx, ost, filtered_frame);
    
                    break;
                case AVMEDIA_TYPE_AUDIO:
                    filtered_frame->pts = frame_pts;
    
                    if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
                        enc->channels != av_frame_get_channels(filtered_frame)) {
    
                        av_log(NULL, AV_LOG_ERROR,
                               "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
                        break;
                    }
    
                    do_audio_out(of->ctx, ost, filtered_frame);
                    break;
                default:
                    // TODO support subtitle filters
                    av_assert0(0);
                }
    
    
                av_frame_unref(filtered_frame);
    
    static void print_final_stats(int64_t total_size)
    {
        uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
        uint64_t subtitle_size = 0;
        uint64_t data_size = 0;
        float percent = -1.0;
    
    
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost = output_streams[i];
            switch (ost->st->codec->codec_type) {
                case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
                case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
                case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
                default:                 other_size += ost->data_size; break;
            }
            extra_size += ost->st->codec->extradata_size;
            data_size  += ost->data_size;
        }
    
        if (data_size && total_size >= data_size)
            percent = 100.0 * (total_size - data_size) / data_size;
    
        av_log(NULL, AV_LOG_INFO, "\n");
        av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
               video_size / 1024.0,
               audio_size / 1024.0,
               subtitle_size / 1024.0,
               other_size / 1024.0,
               extra_size / 1024.0);
        if (percent >= 0.0)
            av_log(NULL, AV_LOG_INFO, "%f%%", percent);
        else
            av_log(NULL, AV_LOG_INFO, "unknown");
        av_log(NULL, AV_LOG_INFO, "\n");
    
    
        /* print verbose per-stream stats */
        for (i = 0; i < nb_input_files; i++) {
            InputFile *f = input_files[i];
            uint64_t total_packets = 0, total_size = 0;
    
            av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
                   i, f->ctx->filename);
    
            for (j = 0; j < f->nb_streams; j++) {
                InputStream *ist = input_streams[f->ist_index + j];
                enum AVMediaType type = ist->st->codec->codec_type;
    
                total_size    += ist->data_size;
                total_packets += ist->nb_packets;
    
                av_log(NULL, AV_LOG_VERBOSE, "  Input stream #%d:%d (%s): ",
                       i, j, media_type_string(type));
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
                       ist->nb_packets, ist->data_size);
    
                if (ist->decoding_needed) {
                    av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
                           ist->frames_decoded);
                    if (type == AVMEDIA_TYPE_AUDIO)
                        av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
                    av_log(NULL, AV_LOG_VERBOSE, "; ");
                }
    
                av_log(NULL, AV_LOG_VERBOSE, "\n");
            }
    
            av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
                   total_packets, total_size);
        }
    
        for (i = 0; i < nb_output_files; i++) {
            OutputFile *of = output_files[i];
            uint64_t total_packets = 0, total_size = 0;
    
            av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
                   i, of->ctx->filename);
    
            for (j = 0; j < of->ctx->nb_streams; j++) {
                OutputStream *ost = output_streams[of->ost_index + j];
                enum AVMediaType type = ost->st->codec->codec_type;
    
                total_size    += ost->data_size;
                total_packets += ost->packets_written;
    
                av_log(NULL, AV_LOG_VERBOSE, "  Output stream #%d:%d (%s): ",
                       i, j, media_type_string(type));
                if (ost->encoding_needed) {
                    av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
                           ost->frames_encoded);
                    if (type == AVMEDIA_TYPE_AUDIO)
                        av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
                    av_log(NULL, AV_LOG_VERBOSE, "; ");
                }
    
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
                       ost->packets_written, ost->data_size);
    
                av_log(NULL, AV_LOG_VERBOSE, "\n");
            }
    
            av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
                   total_packets, total_size);
        }
    
        if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
            av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
        }
    }
    
    
    static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
    
        AVBPrint buf_script;
    
        OutputStream *ost;
        AVFormatContext *oc;
        int64_t total_size;
        AVCodecContext *enc;
        int frame_number, vid, i;
    
        static int64_t last_time = -1;
        static int qp_histogram[52];
    
        int hours, mins, secs, us;
    
        if (!print_stats && !is_last_report && !progress_avio)
    
        if (!is_last_report) {
            if (last_time == -1) {
                last_time = cur_time;
    
    Ramiro Polla's avatar
    Ramiro Polla committed
            }
    
            if ((cur_time - last_time) < 500000)
                return;
            last_time = cur_time;
    
        total_size = avio_size(oc->pb);
    
        if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            total_size = avio_tell(oc->pb);
    
        av_bprint_init(&buf_script, 0, 1);
    
        for (i = 0; i < nb_output_streams; i++) {
    
            enc = ost->st->codec;
    
            if (!ost->stream_copy && enc->coded_frame)
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
    
            if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
    
                av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
                           ost->file_index, ost->index, q);
    
            if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
    
                float fps, t = (cur_time-timer_start) / 1000000.0;
    
                frame_number = ost->frame_number;
    
                fps = t > 1 ? frame_number / t : 0;
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
                         frame_number, fps < 9.95, fps, q);
                av_bprintf(&buf_script, "frame=%d\n", frame_number);
                av_bprintf(&buf_script, "fps=%.1f\n", fps);
                av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
                           ost->file_index, ost->index, q);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (is_last_report)
    
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (qp_hist) {
    
                    int j;
                    int qp = lrintf(q);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    for (j = 0; j < 32; j++)
    
                        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                }
    
                if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    double error, error_sum = 0;
                    double scale, scale_sum = 0;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    char type[3] = { 'Y','U','V' };
    
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    for (j = 0; j < 3; j++) {
                        if (is_last_report) {
                            error = enc->error[j];
                            scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
                        } else {
                            error = enc->coded_frame->error[j];
                            scale = enc->width * enc->height * 255.0 * 255.0;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                        if (j)
                            scale /= 4;
    
                        error_sum += error;
                        scale_sum += scale;
    
                        p = psnr(error / scale);
                        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
                        av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
    
                                   ost->file_index, ost->index, type[j] | 32, p);
    
                    p = psnr(error_sum / scale_sum);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
    
                    av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
                               ost->file_index, ost->index, p);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            }
    
            /* compute min output value */
    
            if (ost->st->pts.val != AV_NOPTS_VALUE)
    
                pts = FFMAX(pts, av_rescale_q(ost->st->pts.val,
                                              ost->st->time_base, AV_TIME_BASE_Q));
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        secs = pts / AV_TIME_BASE;
        us = pts % AV_TIME_BASE;
        mins = secs / 60;
        secs %= 60;
        hours = mins / 60;
        mins %= 60;
    
        bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
    
        if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                                     "size=N/A time=");
        else                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                                     "size=%8.0fkB time=", total_size / 1024.0);
    
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                 "%02d:%02d:%02d.%02d ", hours, mins, secs,
                 (100 * us) / AV_TIME_BASE);
    
        if (bitrate < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                                  "bitrate=N/A");
        else             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                                  "bitrate=%6.1fkbits/s", bitrate);
        if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
        else                av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
    
        av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
        av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
                   hours, mins, secs, us);
    
        if (nb_frames_dup || nb_frames_drop)
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
                    nb_frames_dup, nb_frames_drop);
    
        av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
        av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
    
        if (print_stats || is_last_report) {
    
            if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
                fprintf(stderr, "%s    \r", buf);
            } else
                av_log(NULL, AV_LOG_INFO, "%s    \r", buf);
    
        fflush(stderr);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        if (progress_avio) {
            av_bprintf(&buf_script, "progress=%s\n",
                       is_last_report ? "end" : "continue");
            avio_write(progress_avio, buf_script.str,
                       FFMIN(buf_script.len, buf_script.size - 1));
            avio_flush(progress_avio);
            av_bprint_finalize(&buf_script, NULL);
            if (is_last_report) {
                avio_close(progress_avio);
                progress_avio = NULL;
    
        if (is_last_report)
            print_final_stats(total_size);
    
    static void flush_encoders(void)
    
        for (i = 0; i < nb_output_streams; i++) {
    
            OutputStream   *ost = output_streams[i];
    
            AVCodecContext *enc = ost->st->codec;
    
            AVFormatContext *os = output_files[ost->file_index]->ctx;
    
            int stop_encoding = 0;
    
            if (!ost->encoding_needed)
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
    
            if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            for (;;) {
    
                int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
                const char *desc;
    
    
                switch (ost->st->codec->codec_type) {
                case AVMEDIA_TYPE_AUDIO:
    
                    encode = avcodec_encode_audio2;
                    desc   = "Audio";
    
                    break;
                case AVMEDIA_TYPE_VIDEO:
    
                    encode = avcodec_encode_video2;
                    desc   = "Video";
                    break;
                default:
                    stop_encoding = 1;
                }
    
                if (encode) {
                    AVPacket pkt;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                    int pkt_size;
    
                    int got_packet;
                    av_init_packet(&pkt);
                    pkt.data = NULL;
                    pkt.size = 0;
    
    
                    update_benchmark(NULL);
    
                    ret = encode(enc, &pkt, NULL, &got_packet);
    
                    update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
    
                        av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
    
                        exit_program(1);
    
                    }
                    if (ost->logfile && enc->stats_out) {
                        fprintf(ost->logfile, "%s", enc->stats_out);
                    }
    
                    if (!got_packet) {
    
                        stop_encoding = 1;
                        break;
                    }
    
                    if (ost->finished & MUXER_FINISHED) {
    
                    if (pkt.pts != AV_NOPTS_VALUE)
                        pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
                    if (pkt.dts != AV_NOPTS_VALUE)
                        pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
    
                    if (pkt.duration > 0)
                        pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                    pkt_size = pkt.size;
    
                    write_frame(os, &pkt, ost);
    
                    if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                        do_video_stats(ost, pkt_size);
    
                if (stop_encoding)
    
    /*
     * Check whether a packet from ist should be written into ost at this time
     */
    static int check_output_constraints(InputStream *ist, OutputStream *ost)
    {
    
        OutputFile *of = output_files[ost->file_index];
        int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
    
        if (ost->source_index != ist_index)
            return 0;
    
        if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
    
    static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
    {
    
        OutputFile *of = output_files[ost->file_index];
    
        InputFile   *f = input_files [ist->file_index];
    
        int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
        int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
    
        int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
    
        av_init_packet(&opkt);
    
        if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
            !ost->copy_initial_nonkeyframes)
            return;
    
    
            if (!ost->frame_number && ist->pts < start_time &&
    
                !ost->copy_prior_start)
                return;
        } else {
            if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
                !ost->copy_prior_start)
                return;
        }
    
        if (of->recording_time != INT64_MAX &&
    
            ist->pts >= of->recording_time + start_time) {
    
            close_output_stream(ost);
    
        if (f->recording_time != INT64_MAX) {
            start_time = f->ctx->start_time;
            if (f->start_time != AV_NOPTS_VALUE)
                start_time += f->start_time;
    
            if (ist->pts >= f->recording_time + start_time) {
                close_output_stream(ost);
    
        /* force the input stream PTS */
    
        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        if (pkt->pts != AV_NOPTS_VALUE)
            opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
        else
            opkt.pts = AV_NOPTS_VALUE;
    
        if (pkt->dts == AV_NOPTS_VALUE)
    
            opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
    
        else
            opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
        opkt.dts -= ost_tb_start_time;
    
        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
            int duration = av_get_audio_frame_duration(ist->st->codec, pkt->size);
            if(!duration)
                duration = ist->st->codec->frame_size;
            opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
                                                   (AVRational){1, ist->st->codec->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
    
                                                   ost->st->time_base) - ost_tb_start_time;
    
        opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
        opkt.flags    = pkt->flags;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
    
        if (  ost->st->codec->codec_id != AV_CODEC_ID_H264
           && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
           && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
           && ost->st->codec->codec_id != AV_CODEC_ID_VC1
    
            if (av_parser_change(ost->parser, ost->st->codec,
                                 &opkt.data, &opkt.size,
                                 pkt->data, pkt->size,
                                 pkt->flags & AV_PKT_FLAG_KEY)) {
    
                opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
                if (!opkt.buf)
    
                    exit_program(1);
    
        } else {
            opkt.data = pkt->data;
            opkt.size = pkt->size;
    
        av_copy_packet_side_data(&opkt, pkt);
    
        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
            /* store AVPicture in AVPacket, as expected by the output format */
            avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
            opkt.data = (uint8_t *)&pict;
            opkt.size = sizeof(AVPicture);
            opkt.flags |= AV_PKT_FLAG_KEY;
        }
    
        write_frame(of->ctx, &opkt, ost);
    
        ost->st->codec->frame_number++;
    
    int guess_input_channel_layout(InputStream *ist)
    
        AVCodecContext *dec = ist->st->codec;
    
        if (!dec->channel_layout) {
            char layout_name[256];
    
            if (dec->channels > ist->guess_layout_max)
                return 0;
    
            dec->channel_layout = av_get_default_channel_layout(dec->channels);
            if (!dec->channel_layout)
                return 0;
            av_get_channel_layout_string(layout_name, sizeof(layout_name),
                                         dec->channels, dec->channel_layout);
            av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for  Input Stream "
                   "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
    
    static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
    
        AVFrame *decoded_frame, *f;
    
        AVCodecContext *avctx = ist->st->codec;
    
        int i, ret, err = 0, resample_changed;
    
        AVRational decoded_frame_tb;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
    
        if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
            return AVERROR(ENOMEM);
    
        decoded_frame = ist->decoded_frame;
    
        update_benchmark(NULL);
    
        ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
    
        update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
    
    
        if (ret >= 0 && avctx->sample_rate <= 0) {
    
            av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
    
        if (*got_output || ret<0 || pkt->size)
            decode_error_stat[ret<0] ++;
    
    
        if (!*got_output || ret < 0) {
            if (!pkt->size) {
    
                for (i = 0; i < ist->nb_filters; i++)
    
                    av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
    
                    av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
    
        ist->samples_decoded += decoded_frame->nb_samples;
        ist->frames_decoded++;
    
    
    #if 1
        /* increment next_dts to use for the case where the input stream does not
           have timestamps or there are multiple frames in the packet */
        ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                         avctx->sample_rate;
        ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                         avctx->sample_rate;
    #endif
    
        resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
                           ist->resample_channels       != avctx->channels               ||
                           ist->resample_channel_layout != decoded_frame->channel_layout ||
                           ist->resample_sample_rate    != decoded_frame->sample_rate;
        if (resample_changed) {
            char layout1[64], layout2[64];
    
            if (!guess_input_channel_layout(ist)) {
                av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
                       "layout for Input Stream #%d.%d\n", ist->file_index,
                       ist->st->index);
    
                exit_program(1);
    
            }
            decoded_frame->channel_layout = avctx->channel_layout;
    
            av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
                                         ist->resample_channel_layout);
            av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
                                         decoded_frame->channel_layout);
    
            av_log(NULL, AV_LOG_INFO,
                   "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
                   ist->file_index, ist->st->index,
                   ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
                   ist->resample_channels, layout1,
                   decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
                   avctx->channels, layout2);
    
            ist->resample_sample_fmt     = decoded_frame->format;
            ist->resample_sample_rate    = decoded_frame->sample_rate;
            ist->resample_channel_layout = decoded_frame->channel_layout;
            ist->resample_channels       = avctx->channels;
    
            for (i = 0; i < nb_filtergraphs; i++)
    
                if (ist_in_filtergraph(filtergraphs[i], ist)) {
                    FilterGraph *fg = filtergraphs[i];
                    int j;
                    if (configure_filtergraph(fg) < 0) {
                        av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
    
                    }
                    for (j = 0; j < fg->nb_outputs; j++) {
                        OutputStream *ost = fg->outputs[j]->ost;
                        if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
                            !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
                            av_buffersink_set_frame_size(ost->filter->filter,
                                                         ost->st->codec->frame_size);
                    }
    
        /* if the decoder provides a pts, use it instead of the last packet pts.
           the decoder could be delaying output by a packet or more. */
        if (decoded_frame->pts != AV_NOPTS_VALUE) {
            ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
            decoded_frame_tb   = avctx->time_base;
        } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
            decoded_frame->pts = decoded_frame->pkt_pts;
            decoded_frame_tb   = ist->st->time_base;
        } else if (pkt->pts != AV_NOPTS_VALUE) {
            decoded_frame->pts = pkt->pts;
            decoded_frame_tb   = ist->st->time_base;
        }else {
            decoded_frame->pts = ist->dts;
            decoded_frame_tb   = AV_TIME_BASE_Q;
        }
    
        if (decoded_frame->pts != AV_NOPTS_VALUE)
    
            decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
                                                  (AVRational){1, ist->st->codec->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
                                                  (AVRational){1, ist->st->codec->sample_rate});
    
        for (i = 0; i < ist->nb_filters; i++) {
            if (i < ist->nb_filters - 1) {
                f = ist->filter_frame;
                err = av_frame_ref(f, decoded_frame);
                if (err < 0)
                    break;
            } else
                f = decoded_frame;
    
            err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
    
                                         AV_BUFFERSRC_FLAG_PUSH);
    
            if (err == AVERROR_EOF)
                err = 0; /* ignore */
    
        decoded_frame->pts = AV_NOPTS_VALUE;
    
        av_frame_unref(ist->filter_frame);
        av_frame_unref(decoded_frame);
        return err < 0 ? err : ret;
    
    static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
    
        AVFrame *decoded_frame, *f;
        int i, ret = 0, err = 0, resample_changed;
    
        int64_t best_effort_timestamp;
        AVRational *frame_sample_aspect;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
            return AVERROR(ENOMEM);
        if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
    
        decoded_frame = ist->decoded_frame;
    
        pkt->dts  = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
    
        update_benchmark(NULL);
    
        ret = avcodec_decode_video2(ist->st->codec,
                                    decoded_frame, got_output, pkt);
    
        update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
    
    
        if (*got_output || ret<0 || pkt->size)
            decode_error_stat[ret<0] ++;
    
    
        if (!*got_output || ret < 0) {
            if (!pkt->size) {
    
                for (i = 0; i < ist->nb_filters; i++)
    
                    av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
    
                    av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
    
        if(ist->top_field_first>=0)
            decoded_frame->top_field_first = ist->top_field_first;
    
        if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
            err = ist->hwaccel_retrieve_data(ist->st->codec, decoded_frame);
            if (err < 0)
                goto fail;
        }
    
        ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
    
    
        best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
        if(best_effort_timestamp != AV_NOPTS_VALUE)
            ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
    
        if (debug_ts) {
            av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
    
                   "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
                   ist->st->index, av_ts2str(decoded_frame->pts),
                   av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
                   best_effort_timestamp,
                   av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
                   decoded_frame->key_frame, decoded_frame->pict_type,
                   ist->st->time_base.num, ist->st->time_base.den);
    
        if (ist->st->sample_aspect_ratio.num)
            decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
    
        resample_changed = ist->resample_width   != decoded_frame->width  ||
                           ist->resample_height  != decoded_frame->height ||
                           ist->resample_pix_fmt != decoded_frame->format;
        if (resample_changed) {
            av_log(NULL, AV_LOG_INFO,
                   "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
                   ist->file_index, ist->st->index,
                   ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
                   decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
    
            ist->resample_width   = decoded_frame->width;
            ist->resample_height  = decoded_frame->height;
            ist->resample_pix_fmt = decoded_frame->format;
    
            for (i = 0; i < nb_filtergraphs; i++) {
                if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
    
                    configure_filtergraph(filtergraphs[i]) < 0) {
    
                    av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
    
                    exit_program(1);
    
        frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
    
        for (i = 0; i < ist->nb_filters; i++) {
    
            if (!frame_sample_aspect->num)
                *frame_sample_aspect = ist->st->sample_aspect_ratio;
    
            if (i < ist->nb_filters - 1) {
                f = ist->filter_frame;
                err = av_frame_ref(f, decoded_frame);
                if (err < 0)
                    break;
    
                f = decoded_frame;
    
            ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
    
            if (ret == AVERROR_EOF) {
                ret = 0; /* ignore */
            } else if (ret < 0) {
    
                av_log(NULL, AV_LOG_FATAL,
                       "Failed to inject frame into filter network: %s\n", av_err2str(ret));
    
        av_frame_unref(ist->filter_frame);
        av_frame_unref(decoded_frame);
        return err < 0 ? err : ret;
    
    static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
    
        AVSubtitle subtitle;
        int i, ret = avcodec_decode_subtitle2(ist->st->codec,
                                              &subtitle, got_output, pkt);
    
    
        if (*got_output || ret<0 || pkt->size)
            decode_error_stat[ret<0] ++;
    
    
        if (ret < 0 || !*got_output) {
            if (!pkt->size)
                sub2video_flush(ist);
    
        if (ist->fix_sub_duration) {
    
            if (ist->prev_sub.got_output) {
    
                end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
                                 1000, AV_TIME_BASE);
    
                if (end < ist->prev_sub.subtitle.end_display_time) {
                    av_log(ist->st->codec, AV_LOG_DEBUG,
    
                           "Subtitle duration reduced from %d to %d%s\n",
                           ist->prev_sub.subtitle.end_display_time, end,
                           end <= 0 ? ", dropping it" : "");
    
                    ist->prev_sub.subtitle.end_display_time = end;
                }
            }
            FFSWAP(int,        *got_output, ist->prev_sub.got_output);
            FFSWAP(int,        ret,         ist->prev_sub.ret);
            FFSWAP(AVSubtitle, subtitle,    ist->prev_sub.subtitle);
    
        sub2video_update(ist, &subtitle);
    
        for (i = 0; i < nb_output_streams; i++) {
    
            OutputStream *ost = output_streams[i];
    
            if (!check_output_constraints(ist, ost) || !ost->encoding_needed
                || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
    
            do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);