Skip to content
Snippets Groups Projects
ffmpeg.c 166 KiB
Newer Older
  • Learn to ignore specific revisions
  •         sub->start_display_time = 0;
    
            if (i == 1)
                sub->num_rects = 0;
    
            subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
                                                        subtitle_out_max_size, sub);
    
            if (i == 1)
                sub->num_rects = save_num_rects;
    
            if (subtitle_out_size < 0) {
    
                av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
    
                exit_program(1);
    
            }
    
            av_init_packet(&pkt);
            pkt.data = subtitle_out;
            pkt.size = subtitle_out_size;
    
            pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
            pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
    
            if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
    
                /* XXX: the pts correction is handled here. Maybe handling
                   it in the codec would be better */
                if (i == 0)
    
                    pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
    
                    pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
    
            pkt.dts = pkt.pts;
    
            output_packet(of, &pkt, ost);
    
    static void do_video_out(OutputFile *of,
    
                             OutputStream *ost,
    
        int ret, format_video_sync;
        AVPacket pkt;
    
        AVCodecContext *enc = ost->enc_ctx;
    
        AVCodecParameters *mux_par = ost->st->codecpar;
    
        AVRational frame_rate;
    
        int nb_frames, nb0_frames, i;
    
        double duration = 0;
        int frame_size = 0;
        InputStream *ist = NULL;
    
        AVFilterContext *filter = ost->filter->filter;
    
        if (ost->source_index >= 0)
            ist = input_streams[ost->source_index];
    
        frame_rate = av_buffersink_get_frame_rate(filter);
        if (frame_rate.num > 0 && frame_rate.den > 0)
            duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
    
    
        if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
            duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
    
        if (!ost->filters_script &&
            !ost->filters &&
            next_picture &&
            ist &&
    
            lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
            duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
    
        if (!next_picture) {
            //end, flushing
            nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
                                              ost->last_nb0_frames[1],
                                              ost->last_nb0_frames[2]);
        } else {
    
            delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
    
            delta  = delta0 + duration;
    
            /* by default, we output a single frame */
    
            nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
    
            nb_frames = 1;
    
            format_video_sync = video_sync_method;
            if (format_video_sync == VSYNC_AUTO) {
    
                if(!strcmp(of->ctx->oformat->name, "avi")) {
    
                    format_video_sync = VSYNC_VFR;
                } else
    
                    format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
    
                if (   ist
                    && format_video_sync == VSYNC_CFR
                    && input_files[ist->file_index]->ctx->nb_streams == 1
                    && input_files[ist->file_index]->input_ts_offset == 0) {
                    format_video_sync = VSYNC_VSCFR;
                }
                if (format_video_sync == VSYNC_CFR && copy_ts) {
                    format_video_sync = VSYNC_VSCFR;
                }
    
            ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
    
            if (delta0 < 0 &&
                delta > 0 &&
                format_video_sync != VSYNC_PASSTHROUGH &&
                format_video_sync != VSYNC_DROP) {
                if (delta0 < -0.6) {
                    av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
                } else
    
                    av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
    
                sync_ipts = ost->sync_opts;
                duration += delta0;
                delta0 = 0;
    
            switch (format_video_sync) {
            case VSYNC_VSCFR:
    
                if (ost->frame_number == 0 && delta0 >= 0.5) {
                    av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
    
                    delta = duration;
                    delta0 = 0;
                    ost->sync_opts = lrint(sync_ipts);
                }
            case VSYNC_CFR:
                // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
                if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
                    nb_frames = 0;
                } else if (delta < -1.1)
                    nb_frames = 0;
                else if (delta > 1.1) {
                    nb_frames = lrintf(delta);
                    if (delta0 > 1.1)
                        nb0_frames = lrintf(delta0 - 0.6);
                }
                break;
            case VSYNC_VFR:
                if (delta <= -0.6)
                    nb_frames = 0;
                else if (delta > 0.6)
                    ost->sync_opts = lrint(sync_ipts);
                break;
            case VSYNC_DROP:
            case VSYNC_PASSTHROUGH:
    
                break;
            default:
                av_assert0(0);
    
    
        nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
    
        nb0_frames = FFMIN(nb0_frames, nb_frames);
    
    
        memmove(ost->last_nb0_frames + 1,
                ost->last_nb0_frames,
                sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
        ost->last_nb0_frames[0] = nb0_frames;
    
    
        if (nb0_frames == 0 && ost->last_dropped) {
    
            nb_frames_drop++;
    
                   "*** dropping frame %d from stream %d at ts %"PRId64"\n",
    
                   ost->frame_number, ost->st->index, ost->last_frame->pts);
    
        if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
    
            if (nb_frames > dts_error_threshold * 30) {
    
    Clément Bœsch's avatar
    Clément Bœsch committed
                av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
    
                nb_frames_drop++;
                return;
    
            nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
    
            av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
    
            if (nb_frames_dup > dup_warning) {
                av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
                dup_warning *= 10;
            }
    
        ost->last_dropped = nb_frames == nb0_frames && next_picture;
    
      /* duplicates frame if needed */
      for (i = 0; i < nb_frames; i++) {
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        av_init_packet(&pkt);
        pkt.data = NULL;
        pkt.size = 0;
    
        if (i < nb0_frames && ost->last_frame) {
            in_picture = ost->last_frame;
        } else
            in_picture = next_picture;
    
    
        in_picture->pts = ost->sync_opts;
    
        if (!check_recording_time(ost))
    
        if (ost->frame_number >= ost->max_frames)
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            return;
    
        if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
    
            enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            /* raw pictures are written as AVPicture structure to
               avoid any copies. We support temporarily the older
               method. */
    
            if (in_picture->interlaced_frame)
    
                mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
    
                mux_par->field_order = AV_FIELD_PROGRESSIVE;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            pkt.data   = (uint8_t *)in_picture;
            pkt.size   =  sizeof(AVPicture);
    
            pkt.pts    = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            pkt.flags |= AV_PKT_FLAG_KEY;
    
            output_packet(of, &pkt, ost);
    
    wm4's avatar
    wm4 committed
            int forced_keyframe = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
            if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
    
                ost->top_field_first >= 0)
                in_picture->top_field_first = !!ost->top_field_first;
    
            if (in_picture->interlaced_frame) {
    
                if (enc->codec->id == AV_CODEC_ID_MJPEG)
    
                    mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
    
                    mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
    
                mux_par->field_order = AV_FIELD_PROGRESSIVE;
    
            in_picture->quality = enc->global_quality;
    
            pts_time = in_picture->pts != AV_NOPTS_VALUE ?
                in_picture->pts * av_q2d(enc->time_base) : NAN;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            if (ost->forced_kf_index < ost->forced_kf_count &&
    
                in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
    
    Anton Khirnov's avatar
    Anton Khirnov committed
                ost->forced_kf_index++;
    
                forced_keyframe = 1;
            } else if (ost->forced_keyframes_pexpr) {
                double res;
                ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
                res = av_expr_eval(ost->forced_keyframes_pexpr,
                                   ost->forced_keyframes_expr_const_values, NULL);
    
                ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
    
                        ost->forced_keyframes_expr_const_values[FKF_N],
                        ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
                        ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
                        ost->forced_keyframes_expr_const_values[FKF_T],
                        ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
                        res);
                if (res) {
                    forced_keyframe = 1;
                    ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
                        ost->forced_keyframes_expr_const_values[FKF_N];
                    ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
                        ost->forced_keyframes_expr_const_values[FKF_T];
                    ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
                }
    
                ost->forced_keyframes_expr_const_values[FKF_N] += 1;
    
            } else if (   ost->forced_keyframes
                       && !strncmp(ost->forced_keyframes, "source", 6)
                       && in_picture->key_frame==1) {
                forced_keyframe = 1;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            }
    
                in_picture->pict_type = AV_PICTURE_TYPE_I;
    
                av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
            }
    
    
            update_benchmark(NULL);
    
            if (debug_ts) {
                av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
                       "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
                       av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
                       enc->time_base.num, enc->time_base.den);
            }
    
    
    wm4's avatar
    wm4 committed
            ret = avcodec_send_frame(enc, in_picture);
            if (ret < 0)
                goto error;
    
            while (1) {
                ret = avcodec_receive_packet(enc, &pkt);
                update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
                if (ret == AVERROR(EAGAIN))
                    break;
                if (ret < 0)
                    goto error;
    
                if (debug_ts) {
                    av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
                           "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
                           av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
                           av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
                }
    
    
                if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
    
                    pkt.pts = ost->sync_opts;
    
                av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
    
                if (debug_ts) {
                    av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
                        "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
    
                        av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
                        av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
    
                output_packet(of, &pkt, ost);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
                /* if two pass, output log */
                if (ost->logfile && enc->stats_out) {
                    fprintf(ost->logfile, "%s", enc->stats_out);
    
    Anton Khirnov's avatar
    Anton Khirnov committed
        ost->sync_opts++;
        /*
         * For video, number of frames in == number of packets out.
         * But there may be reordering, so we can't throw away frames on encoder
         * flush, we need to limit them here, before they go into encoder.
         */
        ost->frame_number++;
    
        if (vstats_filename && frame_size)
    
            do_video_stats(ost, frame_size);
    
    
        if (!ost->last_frame)
            ost->last_frame = av_frame_alloc();
        av_frame_unref(ost->last_frame);
    
        if (next_picture && ost->last_frame)
    
            av_frame_ref(ost->last_frame, next_picture);
    
        else
            av_frame_free(&ost->last_frame);
    
    wm4's avatar
    wm4 committed
    
        return;
    error:
        av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
        exit_program(1);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    }
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
    static double psnr(double d)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
        return -10.0 * log10(d);
    
    static void do_video_stats(OutputStream *ost, int frame_size)
    
        AVCodecContext *enc;
        int frame_number;
        double ti1, bitrate, avg_bitrate;
    
        /* this is executed just the first time do_video_stats is called */
        if (!vstats_file) {
            vstats_file = fopen(vstats_filename, "w");
            if (!vstats_file) {
                perror("fopen");
    
                exit_program(1);
    
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
    
            frame_number = ost->st->nb_frames;
    
            if (vstats_version <= 1) {
                fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
                        ost->quality / (float)FF_QP2LAMBDA);
            } else  {
                fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
                        ost->quality / (float)FF_QP2LAMBDA);
            }
    
            if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
                fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
    
            fprintf(vstats_file,"f_size= %6d ", frame_size);
            /* compute pts value */
    
            ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
    
            if (ti1 < 0.01)
                ti1 = 0.01;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
    
            avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
    
            fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
    
                   (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
    
            fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
    
    static int init_output_stream(OutputStream *ost, char *error, int error_len);
    
    
    static void finish_output_stream(OutputStream *ost)
    {
        OutputFile *of = output_files[ost->file_index];
        int i;
    
        ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
    
        if (of->shortest) {
            for (i = 0; i < of->ctx->nb_streams; i++)
                output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
        }
    }
    
    
     * Get and encode new output from any of the filtergraphs, without causing
     * activity.
     *
     * @return  0 for success, <0 for severe errors
     */
    
    static int reap_filters(int flush)
    
    {
        AVFrame *filtered_frame = NULL;
    
        /* Reap all buffers present in the buffer sinks */
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost = output_streams[i];
            OutputFile    *of = output_files[ost->file_index];
    
            AVCodecContext *enc = ost->enc_ctx;
    
            if (!ost->filter || !ost->filter->graph->graph)
    
                char error[1024] = "";
    
                ret = init_output_stream(ost, error, sizeof(error));
                if (ret < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
                           ost->file_index, ost->index, error);
                    exit_program(1);
                }
            }
    
    
            if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
    
                return AVERROR(ENOMEM);
    
            filtered_frame = ost->filtered_frame;
    
            while (1) {
    
                double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
    
                ret = av_buffersink_get_frame_flags(filter, filtered_frame,
    
                                                   AV_BUFFERSINK_FLAG_NO_REQUEST);
                if (ret < 0) {
                    if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
                        av_log(NULL, AV_LOG_WARNING,
    
                               "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
    
                    } else if (flush && ret == AVERROR_EOF) {
    
                        if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
    
                            do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
    
                if (ost->finished) {
                    av_frame_unref(filtered_frame);
                    continue;
                }
    
                if (filtered_frame->pts != AV_NOPTS_VALUE) {
    
                    int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
    
                    AVRational filter_tb = av_buffersink_get_time_base(filter);
    
                    AVRational tb = enc->time_base;
                    int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
    
                    tb.den <<= extra_bits;
                    float_pts =
    
                        av_rescale_q(filtered_frame->pts, filter_tb, tb) -
    
                        av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
                    float_pts /= 1 << extra_bits;
                    // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
                    float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
    
    
                    filtered_frame->pts =
    
                        av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
    
                        av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
    
                //if (ost->source_index >= 0)
                //    *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
    
    
                switch (av_buffersink_get_type(filter)) {
    
                case AVMEDIA_TYPE_VIDEO:
    
                    if (!ost->frame_aspect_ratio.num)
    
                        enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
    
                        av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
    
                                av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
    
                                enc->time_base.num, enc->time_base.den);
                    }
    
    
                    do_video_out(of, ost, filtered_frame, float_pts);
    
                    break;
                case AVMEDIA_TYPE_AUDIO:
    
                    if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
    
                        enc->channels != filtered_frame->channels) {
    
                        av_log(NULL, AV_LOG_ERROR,
                               "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
                        break;
                    }
    
                    do_audio_out(of, ost, filtered_frame);
    
                    break;
                default:
                    // TODO support subtitle filters
                    av_assert0(0);
                }
    
    
                av_frame_unref(filtered_frame);
    
    static void print_final_stats(int64_t total_size)
    {
        uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
        uint64_t subtitle_size = 0;
        uint64_t data_size = 0;
        float percent = -1.0;
    
    
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost = output_streams[i];
    
            switch (ost->enc_ctx->codec_type) {
    
                case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
                case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
                case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
                default:                 other_size += ost->data_size; break;
            }
    
            extra_size += ost->enc_ctx->extradata_size;
    
            data_size  += ost->data_size;
    
            if (   (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
    
        if (data_size && total_size>0 && total_size >= data_size)
    
            percent = 100.0 * (total_size - data_size) / data_size;
    
        av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
               video_size / 1024.0,
               audio_size / 1024.0,
               subtitle_size / 1024.0,
               other_size / 1024.0,
               extra_size / 1024.0);
        if (percent >= 0.0)
            av_log(NULL, AV_LOG_INFO, "%f%%", percent);
        else
            av_log(NULL, AV_LOG_INFO, "unknown");
        av_log(NULL, AV_LOG_INFO, "\n");
    
    
        /* print verbose per-stream stats */
        for (i = 0; i < nb_input_files; i++) {
            InputFile *f = input_files[i];
            uint64_t total_packets = 0, total_size = 0;
    
            av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
                   i, f->ctx->filename);
    
            for (j = 0; j < f->nb_streams; j++) {
                InputStream *ist = input_streams[f->ist_index + j];
    
                enum AVMediaType type = ist->dec_ctx->codec_type;
    
    
                total_size    += ist->data_size;
                total_packets += ist->nb_packets;
    
                av_log(NULL, AV_LOG_VERBOSE, "  Input stream #%d:%d (%s): ",
                       i, j, media_type_string(type));
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
                       ist->nb_packets, ist->data_size);
    
                if (ist->decoding_needed) {
                    av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
                           ist->frames_decoded);
                    if (type == AVMEDIA_TYPE_AUDIO)
                        av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
                    av_log(NULL, AV_LOG_VERBOSE, "; ");
                }
    
                av_log(NULL, AV_LOG_VERBOSE, "\n");
            }
    
            av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
                   total_packets, total_size);
        }
    
        for (i = 0; i < nb_output_files; i++) {
            OutputFile *of = output_files[i];
            uint64_t total_packets = 0, total_size = 0;
    
            av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
                   i, of->ctx->filename);
    
            for (j = 0; j < of->ctx->nb_streams; j++) {
                OutputStream *ost = output_streams[of->ost_index + j];
    
                enum AVMediaType type = ost->enc_ctx->codec_type;
    
    
                total_size    += ost->data_size;
                total_packets += ost->packets_written;
    
                av_log(NULL, AV_LOG_VERBOSE, "  Output stream #%d:%d (%s): ",
                       i, j, media_type_string(type));
                if (ost->encoding_needed) {
                    av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
                           ost->frames_encoded);
                    if (type == AVMEDIA_TYPE_AUDIO)
                        av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
                    av_log(NULL, AV_LOG_VERBOSE, "; ");
                }
    
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
                       ost->packets_written, ost->data_size);
    
                av_log(NULL, AV_LOG_VERBOSE, "\n");
            }
    
            av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
                   total_packets, total_size);
        }
    
        if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
    
            av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
            if (pass1_used) {
                av_log(NULL, AV_LOG_WARNING, "\n");
            } else {
                av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
            }
    
    static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
    
        AVBPrint buf_script;
    
        OutputStream *ost;
        AVFormatContext *oc;
        int64_t total_size;
        AVCodecContext *enc;
        int frame_number, vid, i;
    
        int64_t pts = INT64_MIN + 1;
    
        static int64_t last_time = -1;
        static int qp_histogram[52];
    
        int hours, mins, secs, us;
    
        if (!print_stats && !is_last_report && !progress_avio)
    
        if (!is_last_report) {
            if (last_time == -1) {
                last_time = cur_time;
    
    Ramiro Polla's avatar
    Ramiro Polla committed
            }
    
            if ((cur_time - last_time) < 500000)
                return;
            last_time = cur_time;
    
        t = (cur_time-timer_start) / 1000000.0;
    
    
        total_size = avio_size(oc->pb);
    
        if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            total_size = avio_tell(oc->pb);
    
        av_bprint_init(&buf_script, 0, 1);
    
        for (i = 0; i < nb_output_streams; i++) {
    
            if (!ost->stream_copy)
                q = ost->quality / (float) FF_QP2LAMBDA;
    
    
            if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
    
                av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
                           ost->file_index, ost->index, q);
    
            if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
    
                frame_number = ost->frame_number;
    
                fps = t > 1 ? frame_number / t : 0;
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
                         frame_number, fps < 9.95, fps, q);
                av_bprintf(&buf_script, "frame=%d\n", frame_number);
                av_bprintf(&buf_script, "fps=%.1f\n", fps);
                av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
                           ost->file_index, ost->index, q);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (is_last_report)
    
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (qp_hist) {
    
                    int j;
                    int qp = lrintf(q);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    for (j = 0; j < 32; j++)
    
                        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                }
    
                if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    double error, error_sum = 0;
                    double scale, scale_sum = 0;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    char type[3] = { 'Y','U','V' };
    
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    for (j = 0; j < 3; j++) {
                        if (is_last_report) {
                            error = enc->error[j];
                            scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
                        } else {
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                            scale = enc->width * enc->height * 255.0 * 255.0;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                        if (j)
                            scale /= 4;
    
                        error_sum += error;
                        scale_sum += scale;
    
                        p = psnr(error / scale);
                        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
                        av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
    
                                   ost->file_index, ost->index, type[j] | 32, p);
    
                    p = psnr(error_sum / scale_sum);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
    
                    av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
                               ost->file_index, ost->index, p);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            }
    
            /* compute min output value */
    
            if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
                pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
    
                                              ost->st->time_base, AV_TIME_BASE_Q));
    
                nb_frames_drop += ost->last_dropped;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        secs = FFABS(pts) / AV_TIME_BASE;
        us = FFABS(pts) % AV_TIME_BASE;
    
        mins = secs / 60;
        secs %= 60;
        hours = mins / 60;
        mins %= 60;
    
        bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
    
        speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
    
        if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                                     "size=N/A time=");
        else                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                                     "size=%8.0fkB time=", total_size / 1024.0);
    
        if (pts < 0)
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
    
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                 "%02d:%02d:%02d.%02d ", hours, mins, secs,
                 (100 * us) / AV_TIME_BASE);
    
    
        if (bitrate < 0) {
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
            av_bprintf(&buf_script, "bitrate=N/A\n");
        }else{
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
            av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
        }
    
    
        if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
        else                av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
    
        av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
        av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
                   hours, mins, secs, us);
    
        if (nb_frames_dup || nb_frames_drop)
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
                    nb_frames_dup, nb_frames_drop);
    
        av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
        av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
    
        if (speed < 0) {
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
            av_bprintf(&buf_script, "speed=N/A\n");
        } else {
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
            av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
        }
    
    
        if (print_stats || is_last_report) {
    
            const char end = is_last_report ? '\n' : '\r';
    
            if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
    
                fprintf(stderr, "%s    %c", buf, end);
    
                av_log(NULL, AV_LOG_INFO, "%s    %c", buf, end);
    
        fflush(stderr);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        if (progress_avio) {
            av_bprintf(&buf_script, "progress=%s\n",
                       is_last_report ? "end" : "continue");
            avio_write(progress_avio, buf_script.str,
                       FFMIN(buf_script.len, buf_script.size - 1));
            avio_flush(progress_avio);
            av_bprint_finalize(&buf_script, NULL);
            if (is_last_report) {
    
                if ((ret = avio_closep(&progress_avio)) < 0)
                    av_log(NULL, AV_LOG_ERROR,
                           "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
    
        if (is_last_report)
            print_final_stats(total_size);
    
    static void flush_encoders(void)
    
        for (i = 0; i < nb_output_streams; i++) {
    
            OutputStream   *ost = output_streams[i];
    
            AVCodecContext *enc = ost->enc_ctx;
    
            OutputFile      *of = output_files[ost->file_index];
    
            if (!ost->encoding_needed)
    
            // Try to enable encoding with no input frames.
            // Maybe we should just let encoding fail instead.
            if (!ost->initialized) {
                FilterGraph *fg = ost->filter->graph;
    
                char error[1024] = "";
    
    
                av_log(NULL, AV_LOG_WARNING,
                       "Finishing stream %d:%d without any data written to it.\n",
                       ost->file_index, ost->st->index);
    
                if (ost->filter && !fg->graph) {
                    int x;
                    for (x = 0; x < fg->nb_inputs; x++) {
                        InputFilter *ifilter = fg->inputs[x];
                        if (ifilter->format < 0) {
                            AVCodecParameters *par = ifilter->ist->st->codecpar;
                            // We never got any input. Set a fake format, which will
                            // come from libavformat.
                            ifilter->format                 = par->format;
                            ifilter->sample_rate            = par->sample_rate;
                            ifilter->channels               = par->channels;
                            ifilter->channel_layout         = par->channel_layout;
                            ifilter->width                  = par->width;
                            ifilter->height                 = par->height;
                            ifilter->sample_aspect_ratio    = par->sample_aspect_ratio;
                        }
                    }
    
                    if (!ifilter_has_all_input_formats(fg))
                        continue;
    
                    ret = configure_filtergraph(fg);
                    if (ret < 0) {
                        av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
                        exit_program(1);
                    }
    
                    finish_output_stream(ost);
                }
    
                ret = init_output_stream(ost, error, sizeof(error));
                if (ret < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
                           ost->file_index, ost->index, error);
                    exit_program(1);
                }
            }
    
    
            if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
    
            if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
    
    wm4's avatar
    wm4 committed
            if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
                continue;
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            for (;;) {
    
    wm4's avatar
    wm4 committed
                const char *desc = NULL;
    
                    break;
                case AVMEDIA_TYPE_VIDEO:
    
    wm4's avatar
    wm4 committed
                    av_assert0(0);
    
                }
    
                    av_init_packet(&pkt);
                    pkt.data = NULL;
                    pkt.size = 0;
    
    
                    update_benchmark(NULL);
    
    
                    while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
                        ret = avcodec_send_frame(enc, NULL);
                        if (ret < 0) {
                            av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
                                   desc,
                                   av_err2str(ret));
                            exit_program(1);
                        }
                    }
    
    
                    update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
    
    wm4's avatar
    wm4 committed
                    if (ret < 0 && ret != AVERROR_EOF) {
    
                        av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
                               desc,
                               av_err2str(ret));
    
                        exit_program(1);
    
                    }
                    if (ost->logfile && enc->stats_out) {
                        fprintf(ost->logfile, "%s", enc->stats_out);
                    }
    
    wm4's avatar
    wm4 committed
                    if (ret == AVERROR_EOF) {
    
                    if (ost->finished & MUXER_FINISHED) {
    
                    av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                    pkt_size = pkt.size;
    
                    output_packet(of, &pkt, ost);
    
                    if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                        do_video_stats(ost, pkt_size);
    
    /*
     * Check whether a packet from ist should be written into ost at this time
     */
    static int check_output_constraints(InputStream *ist, OutputStream *ost)
    {
    
        OutputFile *of = output_files[ost->file_index];
        int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
    
        if (ost->source_index != ist_index)
            return 0;
    
        if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
    
    static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
    {
    
        OutputFile *of = output_files[ost->file_index];
    
        InputFile   *f = input_files [ist->file_index];
    
        int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
    
        int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
    
        av_init_packet(&opkt);
    
        if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
            !ost->copy_initial_nonkeyframes)
            return;