Skip to content
Snippets Groups Projects
ffmpeg.c 149 KiB
Newer Older
  • Learn to ignore specific revisions
  •                 && input_files[ist->file_index]->ctx->nb_streams == 1
                    && input_files[ist->file_index]->input_ts_offset == 0) {
                    format_video_sync = VSYNC_VSCFR;
                }
                if (format_video_sync == VSYNC_CFR && copy_ts) {
                    format_video_sync = VSYNC_VSCFR;
                }
    
            if (delta0 < 0 &&
                delta > 0 &&
                format_video_sync != VSYNC_PASSTHROUGH &&
                format_video_sync != VSYNC_DROP) {
                double cor = FFMIN(-delta0, duration);
                if (delta0 < -0.6) {
                    av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
                } else
                    av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
                sync_ipts += cor;
                duration -= cor;
                delta0 += cor;
            }
    
            switch (format_video_sync) {
            case VSYNC_VSCFR:
                if (ost->frame_number == 0 && delta - duration >= 0.5) {
                    av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
                    delta = duration;
                    delta0 = 0;
                    ost->sync_opts = lrint(sync_ipts);
                }
            case VSYNC_CFR:
                // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
                if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
                    nb_frames = 0;
                } else if (delta < -1.1)
                    nb_frames = 0;
                else if (delta > 1.1) {
                    nb_frames = lrintf(delta);
                    if (delta0 > 1.1)
                        nb0_frames = lrintf(delta0 - 0.6);
                }
                break;
            case VSYNC_VFR:
                if (delta <= -0.6)
                    nb_frames = 0;
                else if (delta > 0.6)
                    ost->sync_opts = lrint(sync_ipts);
                break;
            case VSYNC_DROP:
            case VSYNC_PASSTHROUGH:
    
                break;
            default:
                av_assert0(0);
    
    
        nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
    
        nb0_frames = FFMIN(nb0_frames, nb_frames);
    
    
        memmove(ost->last_nb0_frames + 1,
                ost->last_nb0_frames,
                sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
        ost->last_nb0_frames[0] = nb0_frames;
    
    
        if (nb0_frames == 0 && ost->last_droped) {
    
            nb_frames_drop++;
    
                   "*** dropping frame %d from stream %d at ts %"PRId64"\n",
    
                   ost->frame_number, ost->st->index, ost->last_frame->pts);
    
        }
        if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
    
            if (nb_frames > dts_error_threshold * 30) {
    
    Clément Bœsch's avatar
    Clément Bœsch committed
                av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
    
                nb_frames_drop++;
                return;
    
            nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
    
            av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
    
        ost->last_droped = nb_frames == nb0_frames && next_picture;
    
      /* duplicates frame if needed */
      for (i = 0; i < nb_frames; i++) {
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        av_init_packet(&pkt);
        pkt.data = NULL;
        pkt.size = 0;
    
        if (i < nb0_frames && ost->last_frame) {
            in_picture = ost->last_frame;
        } else
            in_picture = next_picture;
    
    
        in_picture->pts = ost->sync_opts;
    
        if (!check_recording_time(ost))
    
        if (ost->frame_number >= ost->max_frames)
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            return;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        if (s->oformat->flags & AVFMT_RAWPICTURE &&
    
            enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            /* raw pictures are written as AVPicture structure to
               avoid any copies. We support temporarily the older
               method. */
    
            if (in_picture->interlaced_frame)
                mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
    
                mux_enc->field_order = AV_FIELD_PROGRESSIVE;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            pkt.data   = (uint8_t *)in_picture;
            pkt.size   =  sizeof(AVPicture);
            pkt.pts    = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
            pkt.flags |= AV_PKT_FLAG_KEY;
    
            write_frame(s, &pkt, ost);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        } else {
    
            int got_packet, forced_keyframe = 0;
            double pts_time;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
            if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
    
                ost->top_field_first >= 0)
                in_picture->top_field_first = !!ost->top_field_first;
    
            if (in_picture->interlaced_frame) {
    
                if (enc->codec->id == AV_CODEC_ID_MJPEG)
    
                    mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
    
                    mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
    
                mux_enc->field_order = AV_FIELD_PROGRESSIVE;
    
            in_picture->quality = enc->global_quality;
    
            pts_time = in_picture->pts != AV_NOPTS_VALUE ?
                in_picture->pts * av_q2d(enc->time_base) : NAN;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            if (ost->forced_kf_index < ost->forced_kf_count &&
    
                in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
    
    Anton Khirnov's avatar
    Anton Khirnov committed
                ost->forced_kf_index++;
    
                forced_keyframe = 1;
            } else if (ost->forced_keyframes_pexpr) {
                double res;
                ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
                res = av_expr_eval(ost->forced_keyframes_pexpr,
                                   ost->forced_keyframes_expr_const_values, NULL);
    
                ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
    
                        ost->forced_keyframes_expr_const_values[FKF_N],
                        ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
                        ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
                        ost->forced_keyframes_expr_const_values[FKF_T],
                        ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
                        res);
                if (res) {
                    forced_keyframe = 1;
                    ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
                        ost->forced_keyframes_expr_const_values[FKF_N];
                    ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
                        ost->forced_keyframes_expr_const_values[FKF_T];
                    ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
                }
    
                ost->forced_keyframes_expr_const_values[FKF_N] += 1;
    
            } else if (   ost->forced_keyframes
                       && !strncmp(ost->forced_keyframes, "source", 6)
                       && in_picture->key_frame==1) {
                forced_keyframe = 1;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            }
    
                in_picture->pict_type = AV_PICTURE_TYPE_I;
    
                av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
            }
    
    
            update_benchmark(NULL);
    
            if (debug_ts) {
                av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
                       "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
                       av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
                       enc->time_base.num, enc->time_base.den);
            }
    
    
            ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
    
            update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            if (ret < 0) {
                av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
    
                exit_program(1);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            }
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            if (got_packet) {
    
                if (debug_ts) {
                    av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
                           "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
                           av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
                           av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
                }
    
    
                if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
    
                    pkt.pts = ost->sync_opts;
    
                av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
    
                if (debug_ts) {
                    av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
                        "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
                        av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
                        av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
                }
    
                frame_size = pkt.size;
    
                write_frame(s, &pkt, ost);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
                /* if two pass, output log */
                if (ost->logfile && enc->stats_out) {
                    fprintf(ost->logfile, "%s", enc->stats_out);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        ost->sync_opts++;
        /*
         * For video, number of frames in == number of packets out.
         * But there may be reordering, so we can't throw away frames on encoder
         * flush, we need to limit them here, before they go into encoder.
         */
        ost->frame_number++;
    
        if (vstats_filename && frame_size)
    
            do_video_stats(ost, frame_size);
    
    
        if (!ost->last_frame)
            ost->last_frame = av_frame_alloc();
        av_frame_unref(ost->last_frame);
    
        if (next_picture && ost->last_frame)
    
            av_frame_ref(ost->last_frame, next_picture);
    
        else
            av_frame_free(&ost->last_frame);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    }
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
    static double psnr(double d)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        return -10.0 * log(d) / log(10.0);
    
    static void do_video_stats(OutputStream *ost, int frame_size)
    
        AVCodecContext *enc;
        int frame_number;
        double ti1, bitrate, avg_bitrate;
    
        /* this is executed just the first time do_video_stats is called */
        if (!vstats_file) {
            vstats_file = fopen(vstats_filename, "w");
            if (!vstats_file) {
                perror("fopen");
    
                exit_program(1);
    
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
    
            frame_number = ost->st->nb_frames;
    
            fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
                    ost->quality / (float)FF_QP2LAMBDA);
    
            if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
                fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
    
            fprintf(vstats_file,"f_size= %6d ", frame_size);
            /* compute pts value */
    
            ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
    
            if (ti1 < 0.01)
                ti1 = 0.01;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
    
            avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
    
            fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
    
                   (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
    
            fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
    
    static void finish_output_stream(OutputStream *ost)
    {
        OutputFile *of = output_files[ost->file_index];
        int i;
    
        ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
    
        if (of->shortest) {
            for (i = 0; i < of->ctx->nb_streams; i++)
                output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
        }
    }
    
    
     * Get and encode new output from any of the filtergraphs, without causing
     * activity.
     *
     * @return  0 for success, <0 for severe errors
     */
    
    static int reap_filters(int flush)
    
    {
        AVFrame *filtered_frame = NULL;
    
        /* Reap all buffers present in the buffer sinks */
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost = output_streams[i];
            OutputFile    *of = output_files[ost->file_index];
    
            AVCodecContext *enc = ost->enc_ctx;
    
            int ret = 0;
    
            if (!ost->filter)
                continue;
    
            if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
    
                return AVERROR(ENOMEM);
    
            filtered_frame = ost->filtered_frame;
    
            while (1) {
    
                double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
    
                ret = av_buffersink_get_frame_flags(filter, filtered_frame,
    
                                                   AV_BUFFERSINK_FLAG_NO_REQUEST);
                if (ret < 0) {
                    if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
                        av_log(NULL, AV_LOG_WARNING,
    
                               "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
    
                    } else if (flush && ret == AVERROR_EOF) {
    
                        if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
                            do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
    
                if (ost->finished) {
                    av_frame_unref(filtered_frame);
                    continue;
                }
    
                if (filtered_frame->pts != AV_NOPTS_VALUE) {
    
                    int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
    
                    AVRational tb = enc->time_base;
                    int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
    
                    tb.den <<= extra_bits;
                    float_pts =
                        av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
                        av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
                    float_pts /= 1 << extra_bits;
                    // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
                    float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
    
    
                    filtered_frame->pts =
    
                        av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
                        av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
    
                //if (ost->source_index >= 0)
                //    *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
    
    
                case AVMEDIA_TYPE_VIDEO:
    
                    if (!ost->frame_aspect_ratio.num)
    
                        enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
    
                        av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
    
                                av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
    
                                enc->time_base.num, enc->time_base.den);
                    }
    
    
                    do_video_out(of->ctx, ost, filtered_frame, float_pts);
    
                    break;
                case AVMEDIA_TYPE_AUDIO:
    
                    if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
    
                        enc->channels != av_frame_get_channels(filtered_frame)) {
    
                        av_log(NULL, AV_LOG_ERROR,
                               "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
                        break;
                    }
    
                    do_audio_out(of->ctx, ost, filtered_frame);
                    break;
                default:
                    // TODO support subtitle filters
                    av_assert0(0);
                }
    
    
                av_frame_unref(filtered_frame);
    
    static void print_final_stats(int64_t total_size)
    {
        uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
        uint64_t subtitle_size = 0;
        uint64_t data_size = 0;
        float percent = -1.0;
    
    
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost = output_streams[i];
    
            switch (ost->enc_ctx->codec_type) {
    
                case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
                case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
                case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
                default:                 other_size += ost->data_size; break;
            }
    
            extra_size += ost->enc_ctx->extradata_size;
    
            data_size  += ost->data_size;
    
            if (   (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
                != AV_CODEC_FLAG_PASS1)
    
        if (data_size && total_size>0 && total_size >= data_size)
    
            percent = 100.0 * (total_size - data_size) / data_size;
    
        av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
               video_size / 1024.0,
               audio_size / 1024.0,
               subtitle_size / 1024.0,
               other_size / 1024.0,
               extra_size / 1024.0);
        if (percent >= 0.0)
            av_log(NULL, AV_LOG_INFO, "%f%%", percent);
        else
            av_log(NULL, AV_LOG_INFO, "unknown");
        av_log(NULL, AV_LOG_INFO, "\n");
    
    
        /* print verbose per-stream stats */
        for (i = 0; i < nb_input_files; i++) {
            InputFile *f = input_files[i];
            uint64_t total_packets = 0, total_size = 0;
    
            av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
                   i, f->ctx->filename);
    
            for (j = 0; j < f->nb_streams; j++) {
                InputStream *ist = input_streams[f->ist_index + j];
    
                enum AVMediaType type = ist->dec_ctx->codec_type;
    
    
                total_size    += ist->data_size;
                total_packets += ist->nb_packets;
    
                av_log(NULL, AV_LOG_VERBOSE, "  Input stream #%d:%d (%s): ",
                       i, j, media_type_string(type));
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
                       ist->nb_packets, ist->data_size);
    
                if (ist->decoding_needed) {
                    av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
                           ist->frames_decoded);
                    if (type == AVMEDIA_TYPE_AUDIO)
                        av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
                    av_log(NULL, AV_LOG_VERBOSE, "; ");
                }
    
                av_log(NULL, AV_LOG_VERBOSE, "\n");
            }
    
            av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
                   total_packets, total_size);
        }
    
        for (i = 0; i < nb_output_files; i++) {
            OutputFile *of = output_files[i];
            uint64_t total_packets = 0, total_size = 0;
    
            av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
                   i, of->ctx->filename);
    
            for (j = 0; j < of->ctx->nb_streams; j++) {
                OutputStream *ost = output_streams[of->ost_index + j];
    
                enum AVMediaType type = ost->enc_ctx->codec_type;
    
    
                total_size    += ost->data_size;
                total_packets += ost->packets_written;
    
                av_log(NULL, AV_LOG_VERBOSE, "  Output stream #%d:%d (%s): ",
                       i, j, media_type_string(type));
                if (ost->encoding_needed) {
                    av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
                           ost->frames_encoded);
                    if (type == AVMEDIA_TYPE_AUDIO)
                        av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
                    av_log(NULL, AV_LOG_VERBOSE, "; ");
                }
    
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
                       ost->packets_written, ost->data_size);
    
                av_log(NULL, AV_LOG_VERBOSE, "\n");
            }
    
            av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
                   total_packets, total_size);
        }
    
        if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
    
            av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
            if (pass1_used) {
                av_log(NULL, AV_LOG_WARNING, "\n");
            } else {
                av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
            }
    
    static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
    
        AVBPrint buf_script;
    
        OutputStream *ost;
        AVFormatContext *oc;
        int64_t total_size;
        AVCodecContext *enc;
        int frame_number, vid, i;
    
        static int64_t last_time = -1;
        static int qp_histogram[52];
    
        int hours, mins, secs, us;
    
        if (!print_stats && !is_last_report && !progress_avio)
    
        if (!is_last_report) {
            if (last_time == -1) {
                last_time = cur_time;
    
    Ramiro Polla's avatar
    Ramiro Polla committed
            }
    
            if ((cur_time - last_time) < 500000)
                return;
            last_time = cur_time;
    
        total_size = avio_size(oc->pb);
    
        if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            total_size = avio_tell(oc->pb);
    
        av_bprint_init(&buf_script, 0, 1);
    
        for (i = 0; i < nb_output_streams; i++) {
    
            if (!ost->stream_copy)
                q = ost->quality / (float) FF_QP2LAMBDA;
    
    
            if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
    
                av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
                           ost->file_index, ost->index, q);
    
            if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
    
                float fps, t = (cur_time-timer_start) / 1000000.0;
    
                frame_number = ost->frame_number;
    
                fps = t > 1 ? frame_number / t : 0;
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
                         frame_number, fps < 9.95, fps, q);
                av_bprintf(&buf_script, "frame=%d\n", frame_number);
                av_bprintf(&buf_script, "fps=%.1f\n", fps);
                av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
                           ost->file_index, ost->index, q);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (is_last_report)
    
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (qp_hist) {
    
                    int j;
                    int qp = lrintf(q);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    for (j = 0; j < 32; j++)
    
                        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                }
    
                if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    double error, error_sum = 0;
                    double scale, scale_sum = 0;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    char type[3] = { 'Y','U','V' };
    
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    for (j = 0; j < 3; j++) {
                        if (is_last_report) {
                            error = enc->error[j];
                            scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
                        } else {
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                            scale = enc->width * enc->height * 255.0 * 255.0;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                        if (j)
                            scale /= 4;
    
                        error_sum += error;
                        scale_sum += scale;
    
                        p = psnr(error / scale);
                        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
                        av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
    
                                   ost->file_index, ost->index, type[j] | 32, p);
    
                    p = psnr(error_sum / scale_sum);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
    
                    av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
                               ost->file_index, ost->index, p);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            }
    
            /* compute min output value */
    
            if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
                pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
    
                                              ost->st->time_base, AV_TIME_BASE_Q));
    
            if (is_last_report)
                nb_frames_drop += ost->last_droped;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        secs = FFABS(pts) / AV_TIME_BASE;
        us = FFABS(pts) % AV_TIME_BASE;
    
        mins = secs / 60;
        secs %= 60;
        hours = mins / 60;
        mins %= 60;
    
        bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
    
        if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                                     "size=N/A time=");
        else                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                                     "size=%8.0fkB time=", total_size / 1024.0);
    
        if (pts < 0)
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
    
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                 "%02d:%02d:%02d.%02d ", hours, mins, secs,
                 (100 * us) / AV_TIME_BASE);
    
    
        if (bitrate < 0) {
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
            av_bprintf(&buf_script, "bitrate=N/A\n");
        }else{
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
            av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
        }
    
    
        if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
        else                av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
    
        av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
        av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
                   hours, mins, secs, us);
    
        if (nb_frames_dup || nb_frames_drop)
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
                    nb_frames_dup, nb_frames_drop);
    
        av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
        av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
    
        if (print_stats || is_last_report) {
    
            const char end = is_last_report ? '\n' : '\r';
    
            if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
    
                fprintf(stderr, "%s    %c", buf, end);
    
                av_log(NULL, AV_LOG_INFO, "%s    %c", buf, end);
    
        fflush(stderr);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        if (progress_avio) {
            av_bprintf(&buf_script, "progress=%s\n",
                       is_last_report ? "end" : "continue");
            avio_write(progress_avio, buf_script.str,
                       FFMIN(buf_script.len, buf_script.size - 1));
            avio_flush(progress_avio);
            av_bprint_finalize(&buf_script, NULL);
            if (is_last_report) {
    
        if (is_last_report)
            print_final_stats(total_size);
    
    static void flush_encoders(void)
    
        for (i = 0; i < nb_output_streams; i++) {
    
            OutputStream   *ost = output_streams[i];
    
            AVCodecContext *enc = ost->enc_ctx;
    
            AVFormatContext *os = output_files[ost->file_index]->ctx;
    
            int stop_encoding = 0;
    
            if (!ost->encoding_needed)
    
            if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
    
            if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            for (;;) {
    
                int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
                const char *desc;
    
                    encode = avcodec_encode_audio2;
                    desc   = "Audio";
    
                    break;
                case AVMEDIA_TYPE_VIDEO:
    
                    encode = avcodec_encode_video2;
                    desc   = "Video";
                    break;
                default:
                    stop_encoding = 1;
                }
    
                if (encode) {
                    AVPacket pkt;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                    int pkt_size;
    
                    int got_packet;
                    av_init_packet(&pkt);
                    pkt.data = NULL;
                    pkt.size = 0;
    
    
                    update_benchmark(NULL);
    
                    ret = encode(enc, &pkt, NULL, &got_packet);
    
                    update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
    
                        av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
                               desc,
                               av_err2str(ret));
    
                        exit_program(1);
    
                    }
                    if (ost->logfile && enc->stats_out) {
                        fprintf(ost->logfile, "%s", enc->stats_out);
                    }
    
                    if (!got_packet) {
    
                        stop_encoding = 1;
                        break;
                    }
    
                    if (ost->finished & MUXER_FINISHED) {
    
                    av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                    pkt_size = pkt.size;
    
                    write_frame(os, &pkt, ost);
    
                    if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                        do_video_stats(ost, pkt_size);
    
                if (stop_encoding)
    
    /*
     * Check whether a packet from ist should be written into ost at this time
     */
    static int check_output_constraints(InputStream *ist, OutputStream *ost)
    {
    
        OutputFile *of = output_files[ost->file_index];
        int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
    
        if (ost->source_index != ist_index)
            return 0;
    
        if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
    
    static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
    {
    
        OutputFile *of = output_files[ost->file_index];
    
        InputFile   *f = input_files [ist->file_index];
    
        int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
        int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
    
        int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
    
        av_init_packet(&opkt);
    
        if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
            !ost->copy_initial_nonkeyframes)
            return;
    
    
            if (!ost->frame_number && ist->pts < start_time &&
    
                !ost->copy_prior_start)
                return;
        } else {
            if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
                !ost->copy_prior_start)
                return;
        }
    
        if (of->recording_time != INT64_MAX &&
    
            ist->pts >= of->recording_time + start_time) {
    
            close_output_stream(ost);
    
        if (f->recording_time != INT64_MAX) {
            start_time = f->ctx->start_time;
            if (f->start_time != AV_NOPTS_VALUE)
                start_time += f->start_time;
    
            if (ist->pts >= f->recording_time + start_time) {
                close_output_stream(ost);
    
        /* force the input stream PTS */
    
        if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        if (pkt->pts != AV_NOPTS_VALUE)
            opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
        else
            opkt.pts = AV_NOPTS_VALUE;
    
        if (pkt->dts == AV_NOPTS_VALUE)
    
            opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
    
        else
            opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
        opkt.dts -= ost_tb_start_time;
    
        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
    
            int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
    
            opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
    
                                                   (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
    
                                                   ost->st->time_base) - ost_tb_start_time;
    
        opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
        opkt.flags    = pkt->flags;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
    
        if (  ost->st->codec->codec_id != AV_CODEC_ID_H264
           && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
           && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
           && ost->st->codec->codec_id != AV_CODEC_ID_VC1
    
            int ret = av_parser_change(ost->parser, ost->st->codec,
    
                                 &opkt.data, &opkt.size,
                                 pkt->data, pkt->size,
    
                                 pkt->flags & AV_PKT_FLAG_KEY);
            if (ret < 0) {
    
                av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
                       av_err2str(ret));
    
                opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
                if (!opkt.buf)
    
                    exit_program(1);
    
        } else {
            opkt.data = pkt->data;
            opkt.size = pkt->size;
    
        av_copy_packet_side_data(&opkt, pkt);
    
        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
            ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
            (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
    
            /* store AVPicture in AVPacket, as expected by the output format */
    
            int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
            if (ret < 0) {
    
                av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
                       av_err2str(ret));
    
            opkt.data = (uint8_t *)&pict;
            opkt.size = sizeof(AVPicture);
            opkt.flags |= AV_PKT_FLAG_KEY;
        }
    
        write_frame(of->ctx, &opkt, ost);
    
    int guess_input_channel_layout(InputStream *ist)
    
        AVCodecContext *dec = ist->dec_ctx;
    
        if (!dec->channel_layout) {
            char layout_name[256];
    
            if (dec->channels > ist->guess_layout_max)
                return 0;
    
            dec->channel_layout = av_get_default_channel_layout(dec->channels);
            if (!dec->channel_layout)
                return 0;
            av_get_channel_layout_string(layout_name, sizeof(layout_name),
                                         dec->channels, dec->channel_layout);
            av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for  Input Stream "
                   "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
    
    static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
    
        AVFrame *decoded_frame, *f;
    
        AVCodecContext *avctx = ist->dec_ctx;
    
        int i, ret, err = 0, resample_changed;
    
        AVRational decoded_frame_tb;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
    
        if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
            return AVERROR(ENOMEM);
    
        decoded_frame = ist->decoded_frame;
    
        update_benchmark(NULL);
    
        ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
    
        update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
    
    
        if (ret >= 0 && avctx->sample_rate <= 0) {
    
            av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
    
        if (ret < 0 && exit_on_error)
            exit_program(1);
    
    
        ist->samples_decoded += decoded_frame->nb_samples;
        ist->frames_decoded++;
    
    
    #if 1
        /* increment next_dts to use for the case where the input stream does not
           have timestamps or there are multiple frames in the packet */
        ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                         avctx->sample_rate;
        ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                         avctx->sample_rate;
    #endif
    
        resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
                           ist->resample_channels       != avctx->channels               ||
                           ist->resample_channel_layout != decoded_frame->channel_layout ||
                           ist->resample_sample_rate    != decoded_frame->sample_rate;
        if (resample_changed) {
            char layout1[64], layout2[64];
    
            if (!guess_input_channel_layout(ist)) {
                av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
                       "layout for Input Stream #%d.%d\n", ist->file_index,
                       ist->st->index);
    
                exit_program(1);
    
            }
            decoded_frame->channel_layout = avctx->channel_layout;
    
            av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
                                         ist->resample_channel_layout);
            av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
                                         decoded_frame->channel_layout);
    
            av_log(NULL, AV_LOG_INFO,
                   "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
                   ist->file_index, ist->st->index,
                   ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
                   ist->resample_channels, layout1,
                   decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
                   avctx->channels, layout2);
    
            ist->resample_sample_fmt     = decoded_frame->format;
            ist->resample_sample_rate    = decoded_frame->sample_rate;
            ist->resample_channel_layout = decoded_frame->channel_layout;