Skip to content
Snippets Groups Projects
ffmpeg.c 159 KiB
Newer Older
  • Learn to ignore specific revisions
  •     if (!dec->channel_layout) {
            char layout_name[256];
    
            if (dec->channels > ist->guess_layout_max)
                return 0;
    
            dec->channel_layout = av_get_default_channel_layout(dec->channels);
            if (!dec->channel_layout)
                return 0;
            av_get_channel_layout_string(layout_name, sizeof(layout_name),
                                         dec->channels, dec->channel_layout);
    
            av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
    
                   "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
    
    static void check_decode_result(InputStream *ist, int *got_output, int ret)
    
    {
        if (*got_output || ret<0)
            decode_error_stat[ret<0] ++;
    
        if (ret < 0 && exit_on_error)
            exit_program(1);
    
    
        if (exit_on_error && *got_output && ist) {
            if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
                av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
                exit_program(1);
            }
        }
    
    wm4's avatar
    wm4 committed
    // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
    // There is the following difference: if you got a frame, you must call
    // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
    // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
    static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
    {
        int ret;
    
        *got_frame = 0;
    
        if (pkt) {
            ret = avcodec_send_packet(avctx, pkt);
            // In particular, we don't expect AVERROR(EAGAIN), because we read all
            // decoded frames with avcodec_receive_frame() until done.
            if (ret < 0 && ret != AVERROR_EOF)
                return ret;
        }
    
        ret = avcodec_receive_frame(avctx, frame);
        if (ret < 0 && ret != AVERROR(EAGAIN))
            return ret;
        if (ret >= 0)
            *got_frame = 1;
    
        return 0;
    }
    
    
    static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
    {
        int i, ret;
        AVFrame *f;
    
    
        av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
    
        for (i = 0; i < ist->nb_filters; i++) {
            if (i < ist->nb_filters - 1) {
                f = ist->filter_frame;
                ret = av_frame_ref(f, decoded_frame);
                if (ret < 0)
                    break;
            } else
                f = decoded_frame;
            ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
                                               AV_BUFFERSRC_FLAG_PUSH);
            if (ret == AVERROR_EOF)
                ret = 0; /* ignore */
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR,
                       "Failed to inject frame into filter network: %s\n", av_err2str(ret));
                break;
            }
        }
        return ret;
    }
    
    
    static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
    
        AVFrame *decoded_frame;
    
        AVCodecContext *avctx = ist->dec_ctx;
    
        int i, ret, err = 0, resample_changed;
    
        AVRational decoded_frame_tb;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
    
        if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
            return AVERROR(ENOMEM);
    
        decoded_frame = ist->decoded_frame;
    
        update_benchmark(NULL);
    
    wm4's avatar
    wm4 committed
        ret = decode(avctx, decoded_frame, got_output, pkt);
    
        update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
    
    
        if (ret >= 0 && avctx->sample_rate <= 0) {
    
            av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
    
    wm4's avatar
    wm4 committed
        if (ret != AVERROR_EOF)
            check_decode_result(ist, got_output, ret);
    
        ist->samples_decoded += decoded_frame->nb_samples;
        ist->frames_decoded++;
    
    
    #if 1
        /* increment next_dts to use for the case where the input stream does not
           have timestamps or there are multiple frames in the packet */
        ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                         avctx->sample_rate;
        ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
                         avctx->sample_rate;
    #endif
    
        resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
                           ist->resample_channels       != avctx->channels               ||
                           ist->resample_channel_layout != decoded_frame->channel_layout ||
                           ist->resample_sample_rate    != decoded_frame->sample_rate;
        if (resample_changed) {
            char layout1[64], layout2[64];
    
            if (!guess_input_channel_layout(ist)) {
                av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
                       "layout for Input Stream #%d.%d\n", ist->file_index,
                       ist->st->index);
    
                exit_program(1);
    
            }
            decoded_frame->channel_layout = avctx->channel_layout;
    
            av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
                                         ist->resample_channel_layout);
            av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
                                         decoded_frame->channel_layout);
    
            av_log(NULL, AV_LOG_INFO,
                   "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
                   ist->file_index, ist->st->index,
                   ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
                   ist->resample_channels, layout1,
                   decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
                   avctx->channels, layout2);
    
            ist->resample_sample_fmt     = decoded_frame->format;
            ist->resample_sample_rate    = decoded_frame->sample_rate;
            ist->resample_channel_layout = decoded_frame->channel_layout;
            ist->resample_channels       = avctx->channels;
    
    
            for (i = 0; i < ist->nb_filters; i++) {
                err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
                if (err < 0) {
                    av_log(NULL, AV_LOG_ERROR,
                           "Error reconfiguring input stream %d:%d filter %d\n",
                           ist->file_index, ist->st->index, i);
                    goto fail;
                }
            }
    
    
            for (i = 0; i < nb_filtergraphs; i++)
    
                if (ist_in_filtergraph(filtergraphs[i], ist)) {
                    FilterGraph *fg = filtergraphs[i];
                    if (configure_filtergraph(fg) < 0) {
                        av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
    
        if (decoded_frame->pts != AV_NOPTS_VALUE) {
    
            decoded_frame_tb   = ist->st->time_base;
    
    wm4's avatar
    wm4 committed
        } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
    
            decoded_frame->pts = pkt->pts;
            decoded_frame_tb   = ist->st->time_base;
        }else {
            decoded_frame->pts = ist->dts;
            decoded_frame_tb   = AV_TIME_BASE_Q;
        }
    
        if (decoded_frame->pts != AV_NOPTS_VALUE)
    
            decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
    
                                                  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
                                                  (AVRational){1, avctx->sample_rate});
    
        ist->nb_samples = decoded_frame->nb_samples;
    
        err = send_frame_to_filters(ist, decoded_frame);
    
        decoded_frame->pts = AV_NOPTS_VALUE;
    
        av_frame_unref(ist->filter_frame);
        av_frame_unref(decoded_frame);
        return err < 0 ? err : ret;
    
    wm4's avatar
    wm4 committed
    static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
    
        AVFrame *decoded_frame;
    
        int i, ret = 0, err = 0, resample_changed;
    
        int64_t best_effort_timestamp;
    
    wm4's avatar
    wm4 committed
        int64_t dts = AV_NOPTS_VALUE;
        AVPacket avpkt;
    
        // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
        // reason. This seems like a semi-critical bug. Don't trigger EOF, and
        // skip the packet.
        if (!eof && pkt && pkt->size == 0)
            return 0;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
            return AVERROR(ENOMEM);
        if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
    
        decoded_frame = ist->decoded_frame;
    
    wm4's avatar
    wm4 committed
        if (ist->dts != AV_NOPTS_VALUE)
            dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
        if (pkt) {
            avpkt = *pkt;
            avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
        }
    
        // The old code used to set dts on the drain packet, which does not work
        // with the new API anymore.
        if (eof) {
            void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
            if (!new)
                return AVERROR(ENOMEM);
            ist->dts_buffer = new;
            ist->dts_buffer[ist->nb_dts_buffer++] = dts;
        }
    
        update_benchmark(NULL);
    
    wm4's avatar
    wm4 committed
        ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
    
        update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
    
    
        // The following line may be required in some cases where there is no parser
        // or the parser does not has_b_frames correctly
    
        if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
    
            if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
    
                ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
    
                av_log(ist->dec_ctx, AV_LOG_WARNING,
    
                       "video_delay is larger in decoder than demuxer %d > %d.\n"
    
                       "If you want to help, upload a sample "
                       "of this file to ftp://upload.ffmpeg.org/incoming/ "
                       "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
                       ist->dec_ctx->has_b_frames,
    
                       ist->st->codecpar->video_delay);
    
    wm4's avatar
    wm4 committed
        if (ret != AVERROR_EOF)
            check_decode_result(ist, got_output, ret);
    
        if (*got_output && ret >= 0) {
            if (ist->dec_ctx->width  != decoded_frame->width ||
                ist->dec_ctx->height != decoded_frame->height ||
                ist->dec_ctx->pix_fmt != decoded_frame->format) {
                av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
                    decoded_frame->width,
                    decoded_frame->height,
                    decoded_frame->format,
                    ist->dec_ctx->width,
                    ist->dec_ctx->height,
                    ist->dec_ctx->pix_fmt);
            }
        }
    
    
        if(ist->top_field_first>=0)
            decoded_frame->top_field_first = ist->top_field_first;
    
        if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
    
            err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
    
        ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
    
    
        best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
    
    wm4's avatar
    wm4 committed
    
        if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
            best_effort_timestamp = ist->dts_buffer[0];
    
            for (i = 0; i < ist->nb_dts_buffer - 1; i++)
                ist->dts_buffer[i] = ist->dts_buffer[i + 1];
            ist->nb_dts_buffer--;
        }
    
    
        if(best_effort_timestamp != AV_NOPTS_VALUE) {
            int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
    
            if (ts != AV_NOPTS_VALUE)
                ist->next_pts = ist->pts = ts;
        }
    
        if (debug_ts) {
            av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
    
                   "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
                   ist->st->index, av_ts2str(decoded_frame->pts),
                   av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
                   best_effort_timestamp,
                   av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
                   decoded_frame->key_frame, decoded_frame->pict_type,
                   ist->st->time_base.num, ist->st->time_base.den);
    
        if (ist->st->sample_aspect_ratio.num)
            decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
    
        resample_changed = ist->resample_width   != decoded_frame->width  ||
                           ist->resample_height  != decoded_frame->height ||
                           ist->resample_pix_fmt != decoded_frame->format;
        if (resample_changed) {
            av_log(NULL, AV_LOG_INFO,
                   "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
                   ist->file_index, ist->st->index,
                   ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
                   decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
    
            ist->resample_width   = decoded_frame->width;
            ist->resample_height  = decoded_frame->height;
            ist->resample_pix_fmt = decoded_frame->format;
    
            for (i = 0; i < ist->nb_filters; i++) {
                err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
                if (err < 0) {
                    av_log(NULL, AV_LOG_ERROR,
                           "Error reconfiguring input stream %d:%d filter %d\n",
                           ist->file_index, ist->st->index, i);
                    goto fail;
                }
            }
    
    
            for (i = 0; i < nb_filtergraphs; i++) {
                if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
    
                    configure_filtergraph(filtergraphs[i]) < 0) {
    
                    av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
    
                    exit_program(1);
    
        err = send_frame_to_filters(ist, decoded_frame);
    
        av_frame_unref(ist->filter_frame);
        av_frame_unref(decoded_frame);
        return err < 0 ? err : ret;
    
    static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
    
        int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
    
                                              &subtitle, got_output, pkt);
    
        check_decode_result(NULL, got_output, ret);
    
        if (ret < 0 || !*got_output) {
            if (!pkt->size)
                sub2video_flush(ist);
    
        if (ist->fix_sub_duration) {
    
            if (ist->prev_sub.got_output) {
    
                end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
                                 1000, AV_TIME_BASE);
    
                if (end < ist->prev_sub.subtitle.end_display_time) {
    
                           "Subtitle duration reduced from %d to %d%s\n",
                           ist->prev_sub.subtitle.end_display_time, end,
                           end <= 0 ? ", dropping it" : "");
    
                    ist->prev_sub.subtitle.end_display_time = end;
                }
            }
            FFSWAP(int,        *got_output, ist->prev_sub.got_output);
            FFSWAP(int,        ret,         ist->prev_sub.ret);
            FFSWAP(AVSubtitle, subtitle,    ist->prev_sub.subtitle);
    
        sub2video_update(ist, &subtitle);
    
        for (i = 0; i < nb_output_streams; i++) {
    
            OutputStream *ost = output_streams[i];
    
            if (!check_output_constraints(ist, ost) || !ost->encoding_needed
                || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
    
            do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
    
        avsubtitle_free(&subtitle);
    
    static int send_filter_eof(InputStream *ist)
    {
        int i, ret;
        for (i = 0; i < ist->nb_filters; i++) {
            ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
            if (ret < 0)
                return ret;
        }
        return 0;
    }
    
    
    /* pkt = NULL means EOF (needed to flush decoder buffers) */
    
    static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
    
    wm4's avatar
    wm4 committed
        int repeating = 0;
        int eof_reached = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        if (!ist->saw_first_ts) {
    
            ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
    
            if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
    
                ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
                ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
    
        if (ist->next_dts == AV_NOPTS_VALUE)
    
            ist->next_dts = ist->dts;
        if (ist->next_pts == AV_NOPTS_VALUE)
            ist->next_pts = ist->pts;
    
            /* EOF handling */
            av_init_packet(&avpkt);
            avpkt.data = NULL;
            avpkt.size = 0;
        } else {
            avpkt = *pkt;
        }
    
    wm4's avatar
    wm4 committed
        if (pkt && pkt->dts != AV_NOPTS_VALUE) {
    
            ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
    
            if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
    
                ist->next_pts = ist->pts = ist->dts;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        // while we have more to decode or while the decoder did output something on EOF
    
    wm4's avatar
    wm4 committed
        while (ist->decoding_needed) {
            int duration = 0;
            int got_output = 0;
    
            ist->pts = ist->next_pts;
            ist->dts = ist->next_dts;
    
            switch (ist->dec_ctx->codec_type) {
    
    wm4's avatar
    wm4 committed
                ret = decode_audio    (ist, repeating ? NULL : &avpkt, &got_output);
    
                break;
            case AVMEDIA_TYPE_VIDEO:
    
    wm4's avatar
    wm4 committed
                ret = decode_video    (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
                if (!repeating || !pkt || got_output) {
                    if (pkt && pkt->duration) {
                        duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
                    } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
                        int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
                        duration = ((int64_t)AV_TIME_BASE *
                                        ist->dec_ctx->framerate.den * ticks) /
                                        ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
                    }
    
    wm4's avatar
    wm4 committed
                    if(ist->dts != AV_NOPTS_VALUE && duration) {
                        ist->next_dts += duration;
                    }else
                        ist->next_dts = AV_NOPTS_VALUE;
                }
    
                if (got_output)
                    ist->next_pts += duration; //FIXME the duration is not correct in some cases
    
                break;
            case AVMEDIA_TYPE_SUBTITLE:
    
    wm4's avatar
    wm4 committed
                if (repeating)
                    break;
    
                ret = transcode_subtitles(ist, &avpkt, &got_output);
    
    wm4's avatar
    wm4 committed
                if (!pkt && ret >= 0)
                    ret = AVERROR_EOF;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            default:
                return -1;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    wm4's avatar
    wm4 committed
            if (ret == AVERROR_EOF) {
                eof_reached = 1;
                break;
            }
    
    
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
                       ist->file_index, ist->st->index, av_err2str(ret));
                if (exit_on_error)
                    exit_program(1);
    
    wm4's avatar
    wm4 committed
                // Decoding might not terminate if we're draining the decoder, and
                // the decoder keeps returning an error.
                // This should probably be considered a libavcodec issue.
                // Sample: fate-vsynth1-dnxhd-720p-hr-lb
                if (!pkt)
                    eof_reached = 1;
    
    wm4's avatar
    wm4 committed
            if (!got_output)
                break;
    
    wm4's avatar
    wm4 committed
            // During draining, we might get multiple output frames in this loop.
            // ffmpeg.c does not drain the filter chain on configuration changes,
            // which means if we send multiple frames at once to the filters, and
            // one of those frames changes configuration, the buffered frames will
            // be lost. This can upset certain FATE tests.
            // Decode only 1 frame per call on EOF to appease these FATE tests.
            // The ideal solution would be to rewrite decoding to use the new
            // decoding API in a better way.
            if (!pkt)
    
    wm4's avatar
    wm4 committed
    
            repeating = 1;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    
        /* after flushing, send an EOF on all the filter inputs attached to the stream */
    
        /* except when looping we need to flush but not to send an EOF */
    
    wm4's avatar
    wm4 committed
        if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
    
            int ret = send_filter_eof(ist);
            if (ret < 0) {
                av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
                exit_program(1);
            }
        }
    
    
        if (!ist->decoding_needed) {
    
            ist->dts = ist->next_dts;
    
            switch (ist->dec_ctx->codec_type) {
    
            case AVMEDIA_TYPE_AUDIO:
    
                ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
                                 ist->dec_ctx->sample_rate;
    
                break;
            case AVMEDIA_TYPE_VIDEO:
    
                    // TODO: Remove work-around for c99-to-c89 issue 7
                    AVRational time_base_q = AV_TIME_BASE_Q;
                    int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
                    ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
    
                    ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
    
                } else if(ist->dec_ctx->framerate.num != 0) {
    
                    int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
    
                    ist->next_dts += ((int64_t)AV_TIME_BASE *
    
                                      ist->dec_ctx->framerate.den * ticks) /
                                      ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                }
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            }
    
            ist->pts = ist->dts;
            ist->next_pts = ist->next_dts;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
        for (i = 0; pkt && i < nb_output_streams; i++) {
    
            OutputStream *ost = output_streams[i];
    
            if (!check_output_constraints(ist, ost) || ost->encoding_needed)
                continue;
    
    wm4's avatar
    wm4 committed
        return !eof_reached;
    
    static void print_sdp(void)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
        int j;
        AVIOContext *sdp_pb;
    
        AVFormatContext **avc;
    
        for (i = 0; i < nb_output_files; i++) {
            if (!output_files[i]->header_written)
                return;
        }
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        avc = av_malloc_array(nb_output_files, sizeof(*avc));
    
            exit_program(1);
    
        for (i = 0, j = 0; i < nb_output_files; i++) {
            if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
                avc[j] = output_files[i]->ctx;
                j++;
            }
        }
    
    
        av_sdp_create(avc, j, sdp, sizeof(sdp));
    
        if (!sdp_filename) {
            printf("SDP:\n%s\n", sdp);
            fflush(stdout);
        } else {
            if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
                av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
            } else {
                avio_printf(sdp_pb, "SDP:\n%s", sdp);
    
    static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
    {
        int i;
        for (i = 0; hwaccels[i].name; i++)
            if (hwaccels[i].pix_fmt == pix_fmt)
                return &hwaccels[i];
        return NULL;
    }
    
    static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
    {
        InputStream *ist = s->opaque;
        const enum AVPixelFormat *p;
        int ret;
    
        for (p = pix_fmts; *p != -1; p++) {
            const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
            const HWAccel *hwaccel;
    
            if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
                break;
    
            hwaccel = get_hwaccel(*p);
            if (!hwaccel ||
                (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
                (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
                continue;
    
            ret = hwaccel->init(s);
            if (ret < 0) {
                if (ist->hwaccel_id == hwaccel->id) {
                    av_log(NULL, AV_LOG_FATAL,
                           "%s hwaccel requested for input stream #%d:%d, "
                           "but cannot be initialized.\n", hwaccel->name,
                           ist->file_index, ist->st->index);
    
    
            if (ist->hw_frames_ctx) {
                s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
                if (!s->hw_frames_ctx)
                    return AV_PIX_FMT_NONE;
            }
    
    
            ist->active_hwaccel_id = hwaccel->id;
            ist->hwaccel_pix_fmt   = *p;
            break;
        }
    
        return *p;
    }
    
    static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
    {
        InputStream *ist = s->opaque;
    
        if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
            return ist->hwaccel_get_buffer(s, frame, flags);
    
        return avcodec_default_get_buffer2(s, frame, flags);
    }
    
    
    static int init_input_stream(int ist_index, char *error, int error_len)
    
        InputStream *ist = input_streams[ist_index];
    
        for (i = 0; i < ist->nb_filters; i++) {
            ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
            if (ret < 0) {
                av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
                return ret;
            }
        }
    
    
        if (ist->decoding_needed) {
            AVCodec *codec = ist->dec;
            if (!codec) {
    
                snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
    
                        avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
    
                return AVERROR(EINVAL);
    
            ist->dec_ctx->opaque                = ist;
            ist->dec_ctx->get_format            = get_format;
            ist->dec_ctx->get_buffer2           = get_buffer;
            ist->dec_ctx->thread_safe_callbacks = 1;
    
            av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
    
            if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
               (ist->decoding_needed & DECODING_FOR_OST)) {
    
                av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
    
                if (ist->decoding_needed & DECODING_FOR_FILTER)
                    av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
            }
    
            av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
    
    
            /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
             * audio, and video decoders such as cuvid or mediacodec */
            av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
    
    
            if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
                av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
    
            if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
    
                if (ret == AVERROR_EXPERIMENTAL)
                    abort_codec_experimental(codec, 0);
    
    
                snprintf(error, error_len,
                         "Error while opening decoder for input stream "
                         "#%d:%d : %s",
    
                         ist->file_index, ist->st->index, av_err2str(ret));
    
            assert_avoptions(ist->decoder_opts);
    
        ist->next_pts = AV_NOPTS_VALUE;
    
        ist->next_dts = AV_NOPTS_VALUE;
    
        return 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    }
    
    
    static InputStream *get_input_stream(OutputStream *ost)
    
        if (ost->source_index >= 0)
            return input_streams[ost->source_index];
        return NULL;
    
    static int compare_int64(const void *a, const void *b)
    {
    
        return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
    
    /* open the muxer when all the streams are initialized */
    static int check_init_output_file(OutputFile *of, int file_index)
    {
        int ret, i;
    
        for (i = 0; i < of->ctx->nb_streams; i++) {
            OutputStream *ost = output_streams[of->ost_index + i];
            if (!ost->initialized)
                return 0;
        }
    
        of->ctx->interrupt_callback = int_cb;
    
        ret = avformat_write_header(of->ctx, &of->opts);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR,
                   "Could not write header for output file #%d "
    
                   "(incorrect codec parameters ?): %s\n",
    
                   file_index, av_err2str(ret));
            return ret;
        }
        //assert_avoptions(of->opts);
        of->header_written = 1;
    
        av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
    
        if (sdp_filename || want_sdp)
            print_sdp();
    
    
        /* flush the muxing queues */
        for (i = 0; i < of->ctx->nb_streams; i++) {
            OutputStream *ost = output_streams[of->ost_index + i];
    
            while (av_fifo_size(ost->muxing_queue)) {
                AVPacket pkt;
                av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
                write_packet(of, &pkt, ost);
            }
        }
    
    
    static int init_output_bsfs(OutputStream *ost)
    {
        AVBSFContext *ctx;
        int i, ret;
    
        if (!ost->nb_bitstream_filters)
            return 0;
    
        for (i = 0; i < ost->nb_bitstream_filters; i++) {
            ctx = ost->bsf_ctx[i];
    
            ret = avcodec_parameters_copy(ctx->par_in,
                                          i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
            if (ret < 0)
                return ret;
    
            ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
    
            ret = av_bsf_init(ctx);
            if (ret < 0) {
    
                av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
    
                       ost->bsf_ctx[i]->filter->name);
                return ret;
            }
        }
    
        ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
        ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
        if (ret < 0)
            return ret;
    
        ost->st->time_base = ctx->time_base_out;
    
        return 0;
    }
    
    
    static int init_output_stream_streamcopy(OutputStream *ost)
    {
        OutputFile *of = output_files[ost->file_index];
        InputStream *ist = get_input_stream(ost);
        AVCodecParameters *par_dst = ost->st->codecpar;
        AVCodecParameters *par_src = ost->ref_par;
        AVRational sar;
        int i, ret;
    
        uint32_t codec_tag = par_dst->codec_tag;
    
        ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
        if (ret >= 0)
            ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
    
        if (ret < 0) {
            av_log(NULL, AV_LOG_FATAL,
                   "Error setting up codec context options.\n");
            return ret;
        }
        avcodec_parameters_from_context(par_src, ost->enc_ctx);
    
    
        if (!codec_tag) {
            unsigned int codec_tag_tmp;
    
            if (!of->ctx->oformat->codec_tag ||
    
                av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
                !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
                codec_tag = par_src->codec_tag;
    
        ret = avcodec_parameters_copy(par_dst, par_src);
        if (ret < 0)
            return ret;
    
        par_dst->codec_tag = codec_tag;
    
    
        if (!ost->frame_rate.num)
            ost->frame_rate = ist->framerate;
        ost->st->avg_frame_rate = ost->frame_rate;
    
        ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
        if (ret < 0)
            return ret;
    
        // copy timebase while removing common factors
        ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
    
    
        // copy disposition
        ost->st->disposition = ist->st->disposition;
    
    
        if (ist->st->nb_side_data) {
            ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
                                                  sizeof(*ist->st->side_data));
            if (!ost->st->side_data)
                return AVERROR(ENOMEM);
    
            ost->st->nb_side_data = 0;
            for (i = 0; i < ist->st->nb_side_data; i++) {
                const AVPacketSideData *sd_src = &ist->st->side_data[i];
                AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
    
                if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
                    continue;
    
                sd_dst->data = av_malloc(sd_src->size);
                if (!sd_dst->data)
                    return AVERROR(ENOMEM);
                memcpy(sd_dst->data, sd_src->data, sd_src->size);
                sd_dst->size = sd_src->size;
                sd_dst->type = sd_src->type;
                ost->st->nb_side_data++;
            }
        }
    
        ost->parser = av_parser_init(par_dst->codec_id);
        ost->parser_avctx = avcodec_alloc_context3(NULL);
        if (!ost->parser_avctx)
            return AVERROR(ENOMEM);
    
        switch (par_dst->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            if (audio_volume != 256) {
                av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
                exit_program(1);
            }
            if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
                par_dst->block_align= 0;
            if(par_dst->codec_id == AV_CODEC_ID_AC3)
                par_dst->block_align= 0;
            break;
        case AVMEDIA_TYPE_VIDEO:
            if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
                sar =
                    av_mul_q(ost->frame_aspect_ratio,
                             (AVRational){ par_dst->height, par_dst->width });
                av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
                       "with stream copy may produce invalid files\n");
                }
            else if (ist->st->sample_aspect_ratio.num)
                sar = ist->st->sample_aspect_ratio;
            else
                sar = par_src->sample_aspect_ratio;
            ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
            ost->st->avg_frame_rate = ist->st->avg_frame_rate;
            ost->st->r_frame_rate = ist->st->r_frame_rate;
            break;
        }
    
        return 0;
    }
    
    
    static void set_encoder_id(OutputFile *of, OutputStream *ost)
    {
        AVDictionaryEntry *e;
    
        uint8_t *encoder_string;
        int encoder_string_len;
        int format_flags = 0;
        int codec_flags = 0;
    
        if (av_dict_get(ost->st->metadata, "encoder",  NULL, 0))
            return;
    
        e = av_dict_get(of->opts, "fflags", NULL, 0);
        if (e) {
            const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
            if (!o)
                return;
            av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
        }
        e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
        if (e) {
            const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
            if (!o)
                return;
            av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
        }
    
        encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
        encoder_string     = av_mallocz(encoder_string_len);