Skip to content
Snippets Groups Projects
ffmpeg.c 166 KiB
Newer Older
  • Learn to ignore specific revisions
  •         if (ret < 0) {
    
                av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
    
                       ost->bsf_ctx[i]->filter->name);
                return ret;
            }
        }
    
        ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
        ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
        if (ret < 0)
            return ret;
    
        ost->st->time_base = ctx->time_base_out;
    
        return 0;
    }
    
    
    static int init_output_stream_streamcopy(OutputStream *ost)
    {
        OutputFile *of = output_files[ost->file_index];
        InputStream *ist = get_input_stream(ost);
        AVCodecParameters *par_dst = ost->st->codecpar;
        AVCodecParameters *par_src = ost->ref_par;
        AVRational sar;
        int i, ret;
    
        uint32_t codec_tag = par_dst->codec_tag;
    
        ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
        if (ret >= 0)
            ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
    
        if (ret < 0) {
            av_log(NULL, AV_LOG_FATAL,
                   "Error setting up codec context options.\n");
            return ret;
        }
        avcodec_parameters_from_context(par_src, ost->enc_ctx);
    
    
        if (!codec_tag) {
            unsigned int codec_tag_tmp;
    
            if (!of->ctx->oformat->codec_tag ||
    
                av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
                !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
                codec_tag = par_src->codec_tag;
    
        ret = avcodec_parameters_copy(par_dst, par_src);
        if (ret < 0)
            return ret;
    
        par_dst->codec_tag = codec_tag;
    
    
        if (!ost->frame_rate.num)
            ost->frame_rate = ist->framerate;
        ost->st->avg_frame_rate = ost->frame_rate;
    
        ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
        if (ret < 0)
            return ret;
    
        // copy timebase while removing common factors
    
        if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
            ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
    
        // copy estimated duration as a hint to the muxer
        if (ost->st->duration <= 0 && ist->st->duration > 0)
            ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
    
    
        // copy disposition
        ost->st->disposition = ist->st->disposition;
    
    
        if (ist->st->nb_side_data) {
            for (i = 0; i < ist->st->nb_side_data; i++) {
                const AVPacketSideData *sd_src = &ist->st->side_data[i];
    
                uint8_t *dst_data;
    
                dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
                if (!dst_data)
    
                memcpy(dst_data, sd_src->data, sd_src->size);
    
        if (ost->rotate_overridden) {
            uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
                                                  sizeof(int32_t) * 9);
            if (sd)
                av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
        }
    
    
        ost->parser = av_parser_init(par_dst->codec_id);
        ost->parser_avctx = avcodec_alloc_context3(NULL);
        if (!ost->parser_avctx)
            return AVERROR(ENOMEM);
    
        switch (par_dst->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            if (audio_volume != 256) {
                av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
                exit_program(1);
            }
            if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
                par_dst->block_align= 0;
            if(par_dst->codec_id == AV_CODEC_ID_AC3)
                par_dst->block_align= 0;
            break;
        case AVMEDIA_TYPE_VIDEO:
            if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
                sar =
                    av_mul_q(ost->frame_aspect_ratio,
                             (AVRational){ par_dst->height, par_dst->width });
                av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
                       "with stream copy may produce invalid files\n");
                }
            else if (ist->st->sample_aspect_ratio.num)
                sar = ist->st->sample_aspect_ratio;
            else
                sar = par_src->sample_aspect_ratio;
            ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
            ost->st->avg_frame_rate = ist->st->avg_frame_rate;
            ost->st->r_frame_rate = ist->st->r_frame_rate;
            break;
        }
    
    
        ost->mux_timebase = ist->st->time_base;
    
    
    static void set_encoder_id(OutputFile *of, OutputStream *ost)
    {
        AVDictionaryEntry *e;
    
        uint8_t *encoder_string;
        int encoder_string_len;
        int format_flags = 0;
        int codec_flags = 0;
    
        if (av_dict_get(ost->st->metadata, "encoder",  NULL, 0))
            return;
    
        e = av_dict_get(of->opts, "fflags", NULL, 0);
        if (e) {
            const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
            if (!o)
                return;
            av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
        }
        e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
        if (e) {
            const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
            if (!o)
                return;
            av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
        }
    
        encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
        encoder_string     = av_mallocz(encoder_string_len);
        if (!encoder_string)
            exit_program(1);
    
        if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
            av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
        else
            av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
        av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
        av_dict_set(&ost->st->metadata, "encoder",  encoder_string,
                    AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
    }
    
    static void parse_forced_key_frames(char *kf, OutputStream *ost,
                                        AVCodecContext *avctx)
    {
        char *p;
        int n = 1, i, size, index = 0;
        int64_t t, *pts;
    
        for (p = kf; *p; p++)
            if (*p == ',')
                n++;
        size = n;
        pts = av_malloc_array(size, sizeof(*pts));
        if (!pts) {
            av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
            exit_program(1);
        }
    
        p = kf;
        for (i = 0; i < n; i++) {
            char *next = strchr(p, ',');
    
            if (next)
                *next++ = 0;
    
            if (!memcmp(p, "chapters", 8)) {
    
                AVFormatContext *avf = output_files[ost->file_index]->ctx;
                int j;
    
                if (avf->nb_chapters > INT_MAX - size ||
                    !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
                                         sizeof(*pts)))) {
                    av_log(NULL, AV_LOG_FATAL,
                           "Could not allocate forced key frames array.\n");
                    exit_program(1);
                }
                t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
                t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
    
                for (j = 0; j < avf->nb_chapters; j++) {
                    AVChapter *c = avf->chapters[j];
                    av_assert1(index < size);
                    pts[index++] = av_rescale_q(c->start, c->time_base,
                                                avctx->time_base) + t;
                }
    
            } else {
    
                t = parse_time_or_die("force_key_frames", p, 1);
                av_assert1(index < size);
                pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
    
            }
    
            p = next;
        }
    
        av_assert0(index == size);
        qsort(pts, size, sizeof(*pts), compare_int64);
        ost->forced_kf_count = size;
        ost->forced_kf_pts   = pts;
    }
    
    static int init_output_stream_encode(OutputStream *ost)
    {
        InputStream *ist = get_input_stream(ost);
        AVCodecContext *enc_ctx = ost->enc_ctx;
        AVCodecContext *dec_ctx = NULL;
        AVFormatContext *oc = output_files[ost->file_index]->ctx;
        int j, ret;
    
        set_encoder_id(output_files[ost->file_index], ost);
    
    
        // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
        // hand, the legacy API makes demuxers set "rotate" metadata entries,
        // which have to be filtered out to prevent leaking them to output files.
        av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
    
    
        if (ist) {
            ost->st->disposition          = ist->st->disposition;
    
            dec_ctx = ist->dec_ctx;
    
            enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
        } else {
            for (j = 0; j < oc->nb_streams; j++) {
                AVStream *st = oc->streams[j];
                if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
                    break;
            }
            if (j == oc->nb_streams)
                if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
                    ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
                    ost->st->disposition = AV_DISPOSITION_DEFAULT;
        }
    
        if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (!ost->frame_rate.num)
                ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
            if (ist && !ost->frame_rate.num)
                ost->frame_rate = ist->framerate;
            if (ist && !ost->frame_rate.num)
                ost->frame_rate = ist->st->r_frame_rate;
            if (ist && !ost->frame_rate.num) {
                ost->frame_rate = (AVRational){25, 1};
                av_log(NULL, AV_LOG_WARNING,
                       "No information "
                       "about the input framerate is available. Falling "
                       "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
                       "if you want a different framerate.\n",
                       ost->file_index, ost->index);
            }
    //      ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
    
            if (ost->enc->supported_framerates && !ost->force_fps) {
    
                int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
                ost->frame_rate = ost->enc->supported_framerates[idx];
            }
            // reduce frame rate for mpeg4 to be within the spec limits
            if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
                av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
                          ost->frame_rate.num, ost->frame_rate.den, 65535);
            }
        }
    
        switch (enc_ctx->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
    
            enc_ctx->sample_fmt     = av_buffersink_get_format(ost->filter->filter);
    
            if (dec_ctx)
                enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
                                                     av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
    
            enc_ctx->sample_rate    = av_buffersink_get_sample_rate(ost->filter->filter);
            enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
            enc_ctx->channels       = av_buffersink_get_channels(ost->filter->filter);
    
            enc_ctx->time_base      = (AVRational){ 1, enc_ctx->sample_rate };
            break;
        case AVMEDIA_TYPE_VIDEO:
            enc_ctx->time_base = av_inv_q(ost->frame_rate);
            if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
    
                enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
    
            if (   av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
               && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
                av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
                                           "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
            }
            for (j = 0; j < ost->forced_kf_count; j++)
                ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
                                                     AV_TIME_BASE_Q,
                                                     enc_ctx->time_base);
    
    
            enc_ctx->width  = av_buffersink_get_w(ost->filter->filter);
            enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
    
            enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
                ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
                av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
    
                av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
    
            if (!strncmp(ost->enc->name, "libx264", 7) &&
                enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
    
                av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
    
                av_log(NULL, AV_LOG_WARNING,
                       "No pixel format specified, %s for H.264 encoding chosen.\n"
                       "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
    
                       av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
    
            if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
                enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
    
                av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
    
                av_log(NULL, AV_LOG_WARNING,
                       "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
                       "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
    
                       av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
            enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
    
            if (dec_ctx)
                enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
                                                     av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
    
    
            enc_ctx->framerate = ost->frame_rate;
    
    
            ost->st->avg_frame_rate = ost->frame_rate;
    
            if (!dec_ctx ||
                enc_ctx->width   != dec_ctx->width  ||
                enc_ctx->height  != dec_ctx->height ||
                enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
                enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
            }
    
            if (ost->forced_keyframes) {
                if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
                    ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
                                        forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
                    if (ret < 0) {
                        av_log(NULL, AV_LOG_ERROR,
                               "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
                        return ret;
                    }
                    ost->forced_keyframes_expr_const_values[FKF_N] = 0;
                    ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
                    ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
                    ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
    
                // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
                // parse it only for static kf timings
                } else if(strncmp(ost->forced_keyframes, "source", 6)) {
                    parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
                }
            }
            break;
        case AVMEDIA_TYPE_SUBTITLE:
    
            enc_ctx->time_base = AV_TIME_BASE_Q;
    
            if (!enc_ctx->width) {
                enc_ctx->width     = input_streams[ost->source_index]->st->codecpar->width;
                enc_ctx->height    = input_streams[ost->source_index]->st->codecpar->height;
            }
            break;
        case AVMEDIA_TYPE_DATA:
            break;
        default:
            abort();
            break;
        }
    
    
        ost->mux_timebase = enc_ctx->time_base;
    
    
    static int init_output_stream(OutputStream *ost, char *error, int error_len)
    {
        int ret = 0;
    
        if (ost->encoding_needed) {
            AVCodec      *codec = ost->enc;
            AVCodecContext *dec = NULL;
            InputStream *ist;
    
    
            ret = init_output_stream_encode(ost);
            if (ret < 0)
                return ret;
    
    
            if ((ist = get_input_stream(ost)))
                dec = ist->dec_ctx;
            if (dec && dec->subtitle_header) {
                /* ASS code assumes this buffer is null terminated so add extra byte. */
                ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
                if (!ost->enc_ctx->subtitle_header)
                    return AVERROR(ENOMEM);
                memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
                ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
            }
            if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
                av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
    
            if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
                !codec->defaults &&
                !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
                !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
                av_dict_set(&ost->encoder_opts, "b", "128000", 0);
    
            if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
                ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
                av_buffersink_get_format(ost->filter->filter)) {
    
                ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
    
                if (!ost->enc_ctx->hw_frames_ctx)
                    return AVERROR(ENOMEM);
            }
    
    
            if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
                if (ret == AVERROR_EXPERIMENTAL)
                    abort_codec_experimental(codec, 1);
                snprintf(error, error_len,
                         "Error while opening encoder for output stream #%d:%d - "
                         "maybe incorrect parameters such as bit_rate, rate, width or height",
                        ost->file_index, ost->index);
                return ret;
            }
            if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
    
                !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
    
                av_buffersink_set_frame_size(ost->filter->filter,
                                                ost->enc_ctx->frame_size);
            assert_avoptions(ost->encoder_opts);
            if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
                av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
                                             " It takes bits/s as argument, not kbits/s\n");
    
    
            ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
    
            if (ret < 0) {
                av_log(NULL, AV_LOG_FATAL,
                       "Error initializing the output stream codec context.\n");
                exit_program(1);
            }
    
             * FIXME: ost->st->codec should't be needed here anymore.
    
             */
            ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
            if (ret < 0)
                return ret;
    
            if (ost->enc_ctx->nb_coded_side_data) {
                int i;
    
                for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
                    const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
    
                    dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
                    if (!dst_data)
    
                    memcpy(dst_data, sd_src->data, sd_src->size);
    
            /*
             * Add global input side data. For now this is naive, and copies it
             * from the input stream's global side data. All side data should
             * really be funneled over AVFrame and libavfilter, then added back to
             * packet side data, and then potentially using the first packet for
             * global side data.
             */
            if (ist) {
                int i;
                for (i = 0; i < ist->st->nb_side_data; i++) {
                    AVPacketSideData *sd = &ist->st->side_data[i];
                    uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
                    if (!dst)
                        return AVERROR(ENOMEM);
                    memcpy(dst, sd->data, sd->size);
                    if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
                        av_display_rotation_set((uint32_t *)dst, 0);
                }
            }
    
    
            // copy timebase while removing common factors
    
            if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
                ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
    
    
            // copy estimated duration as a hint to the muxer
            if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
                ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
    
    
            ost->st->codec->codec= ost->enc_ctx->codec;
    
        } else if (ost->stream_copy) {
    
            ret = init_output_stream_streamcopy(ost);
            if (ret < 0)
                return ret;
    
    
             * FIXME: will the codec context used by the parser during streamcopy
             * This should go away with the new parser API.
    
            ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
    
            if (ret < 0)
                return ret;
    
        // parse user provided disposition, and update stream values
        if (ost->disposition) {
            static const AVOption opts[] = {
                { "disposition"         , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
                { "default"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT           },    .unit = "flags" },
                { "dub"                 , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB               },    .unit = "flags" },
                { "original"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL          },    .unit = "flags" },
                { "comment"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT           },    .unit = "flags" },
                { "lyrics"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS            },    .unit = "flags" },
                { "karaoke"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE           },    .unit = "flags" },
                { "forced"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED            },    .unit = "flags" },
                { "hearing_impaired"    , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED  },    .unit = "flags" },
                { "visual_impaired"     , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED   },    .unit = "flags" },
                { "clean_effects"       , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS     },    .unit = "flags" },
                { "captions"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS          },    .unit = "flags" },
                { "descriptions"        , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS      },    .unit = "flags" },
                { "metadata"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA          },    .unit = "flags" },
                { NULL },
            };
            static const AVClass class = {
                .class_name = "",
                .item_name  = av_default_item_name,
                .option     = opts,
                .version    = LIBAVUTIL_VERSION_INT,
            };
            const AVClass *pclass = &class;
    
            ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
            if (ret < 0)
                return ret;
        }
    
    
        /* initialize bitstream filters for the output stream
         * needs to be done here, because the codec id for streamcopy is not
         * known until now */
        ret = init_output_bsfs(ost);
        if (ret < 0)
            return ret;
    
    
        ost->initialized = 1;
    
        ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
        if (ret < 0)
            return ret;
    
    
    static void report_new_stream(int input_index, AVPacket *pkt)
    
        InputFile *file = input_files[input_index];
        AVStream *st = file->ctx->streams[pkt->stream_index];
    
        if (pkt->stream_index < file->nb_streams_warn)
            return;
        av_log(file->ctx, AV_LOG_WARNING,
               "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
    
               av_get_media_type_string(st->codecpar->codec_type),
    
               input_index, pkt->stream_index,
               pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
        file->nb_streams_warn = pkt->stream_index + 1;
    
    static int transcode_init(void)
    
        AVFormatContext *oc;
    
        char error[1024] = {0};
    
        for (i = 0; i < nb_filtergraphs; i++) {
            FilterGraph *fg = filtergraphs[i];
            for (j = 0; j < fg->nb_outputs; j++) {
                OutputFilter *ofilter = fg->outputs[j];
    
                if (!ofilter->ost || ofilter->ost->source_index >= 0)
    
                    continue;
                if (fg->nb_inputs != 1)
                    continue;
                for (k = nb_input_streams-1; k >= 0 ; k--)
                    if (fg->inputs[0]->ist == input_streams[k])
                        break;
                ofilter->ost->source_index = k;
            }
        }
    
    
        /* init framerate emulation */
        for (i = 0; i < nb_input_files; i++) {
    
            InputFile *ifile = input_files[i];
    
            if (ifile->rate_emu)
                for (j = 0; j < ifile->nb_streams; j++)
    
                    input_streams[j + ifile->ist_index]->start = av_gettime_relative();
    
        /* init input streams */
        for (i = 0; i < nb_input_streams; i++)
    
            if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
                for (i = 0; i < nb_output_streams; i++) {
                    ost = output_streams[i];
    
                    avcodec_close(ost->enc_ctx);
    
        /* open each encoder */
        for (i = 0; i < nb_output_streams; i++) {
    
            // skip streams fed from filtergraphs until we have a frame for them
            if (output_streams[i]->filter)
                continue;
    
    
            ret = init_output_stream(output_streams[i], error, sizeof(error));
            if (ret < 0)
                goto dump_format;
        }
    
    
        /* discard unused programs */
        for (i = 0; i < nb_input_files; i++) {
    
            InputFile *ifile = input_files[i];
    
            for (j = 0; j < ifile->ctx->nb_programs; j++) {
                AVProgram *p = ifile->ctx->programs[j];
                int discard  = AVDISCARD_ALL;
    
                for (k = 0; k < p->nb_stream_indexes; k++)
    
                    if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
    
                        discard = AVDISCARD_DEFAULT;
                        break;
                    }
                p->discard = discard;
    
        /* write headers for files with no streams */
        for (i = 0; i < nb_output_files; i++) {
            oc = output_files[i]->ctx;
            if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
                ret = check_init_output_file(output_files[i], i);
                if (ret < 0)
                    goto dump_format;
            }
        }
    
    
     dump_format:
        /* dump the stream mapping */
    
        av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
    
        for (i = 0; i < nb_input_streams; i++) {
            ist = input_streams[i];
    
            for (j = 0; j < ist->nb_filters; j++) {
    
                if (!filtergraph_is_simple(ist->filters[j]->graph)) {
    
                    av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d (%s) -> %s",
                           ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
    
                    if (nb_filtergraphs > 1)
                        av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
                    av_log(NULL, AV_LOG_INFO, "\n");
                }
    
        for (i = 0; i < nb_output_streams; i++) {
    
            if (ost->attachment_filename) {
                /* an attached file */
                av_log(NULL, AV_LOG_INFO, "  File %s -> Stream #%d:%d\n",
                       ost->attachment_filename, ost->file_index, ost->index);
                continue;
    
            if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
    
                /* output from a complex graph */
    
                av_log(NULL, AV_LOG_INFO, "  %s", ost->filter->name);
    
                if (nb_filtergraphs > 1)
                    av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
    
                av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
                       ost->index, ost->enc ? ost->enc->name : "?");
    
            av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
    
                   input_streams[ost->source_index]->file_index,
                   input_streams[ost->source_index]->st->index,
    
                   ost->file_index,
                   ost->index);
    
            if (ost->sync_ist != input_streams[ost->source_index])
    
                av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
    
                       ost->sync_ist->file_index,
                       ost->sync_ist->st->index);
    
            if (ost->stream_copy)
    
                av_log(NULL, AV_LOG_INFO, " (copy)");
    
            else {
                const AVCodec *in_codec    = input_streams[ost->source_index]->dec;
                const AVCodec *out_codec   = ost->enc;
                const char *decoder_name   = "?";
                const char *in_codec_name  = "?";
                const char *encoder_name   = "?";
                const char *out_codec_name = "?";
    
                const AVCodecDescriptor *desc;
    
    
                if (in_codec) {
                    decoder_name  = in_codec->name;
    
                    desc = avcodec_descriptor_get(in_codec->id);
                    if (desc)
                        in_codec_name = desc->name;
    
                    if (!strcmp(decoder_name, in_codec_name))
                        decoder_name = "native";
                }
    
                if (out_codec) {
                    encoder_name   = out_codec->name;
    
                    desc = avcodec_descriptor_get(out_codec->id);
                    if (desc)
                        out_codec_name = desc->name;
    
                    if (!strcmp(encoder_name, out_codec_name))
    
                        encoder_name = "native";
                }
    
                av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
                       in_codec_name, decoder_name,
                       out_codec_name, encoder_name);
            }
    
            av_log(NULL, AV_LOG_INFO, "\n");
    
            av_log(NULL, AV_LOG_ERROR, "%s\n", error);
    
        atomic_store(&transcode_init_done, 1);
    
    /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
    
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost    = output_streams[i];
            OutputFile *of       = output_files[ost->file_index];
            AVFormatContext *os  = output_files[ost->file_index]->ctx;
    
                (os->pb && avio_tell(os->pb) >= of->limit_filesize))
                continue;
    
            if (ost->frame_number >= ost->max_frames) {
    
                int j;
                for (j = 0; j < of->ctx->nb_streams; j++)
    
                    close_output_stream(output_streams[of->ost_index + j]);
    
     * Select the output stream to process.
    
     * @return  selected output stream, or NULL if none available
    
    static OutputStream *choose_output(void)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
        int i;
        int64_t opts_min = INT64_MAX;
        OutputStream *ost_min = NULL;
    
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost = output_streams[i];
    
            int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
                           av_rescale_q(ost->st->cur_dts, ost->st->time_base,
    
            if (ost->st->cur_dts == AV_NOPTS_VALUE)
                av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
    
    
            if (!ost->initialized && !ost->inputs_done)
                return ost;
    
    
    static void set_tty_echo(int on)
    {
    #if HAVE_TERMIOS_H
        struct termios tty;
        if (tcgetattr(0, &tty) == 0) {
            if (on) tty.c_lflag |= ECHO;
            else    tty.c_lflag &= ~ECHO;
            tcsetattr(0, TCSANOW, &tty);
        }
    #endif
    }
    
    
    static int check_keyboard_interaction(int64_t cur_time)
    {
        int i, ret, key;
        static int64_t last_time;
        if (received_nb_signals)
            return AVERROR_EXIT;
        /* read_key() returns 0 on EOF */
        if(cur_time - last_time >= 100000 && !run_as_daemon){
            key =  read_key();
            last_time = cur_time;
        }else
            key = -1;
        if (key == 'q')
            return AVERROR_EXIT;
        if (key == '+') av_log_set_level(av_log_get_level()+10);
        if (key == '-') av_log_set_level(av_log_get_level()-10);
        if (key == 's') qp_hist     ^= 1;
        if (key == 'h'){
            if (do_hex_dump){
                do_hex_dump = do_pkt_dump = 0;
            } else if(do_pkt_dump){
                do_hex_dump = 1;
            } else
                do_pkt_dump = 1;
            av_log_set_level(AV_LOG_DEBUG);
        }
        if (key == 'c' || key == 'C'){
            char buf[4096], target[64], command[256], arg[256] = {0};
            double time;
            int k, n = 0;
    
            fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
    
            while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
                if (k > 0)
                    buf[i++] = k;
            buf[i] = 0;
    
            set_tty_echo(0);
            fprintf(stderr, "\n");
    
            if (k > 0 &&
                (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
                av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
                       target, time, command, arg);
                for (i = 0; i < nb_filtergraphs; i++) {
                    FilterGraph *fg = filtergraphs[i];
                    if (fg->graph) {
                        if (time < 0) {
                            ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
                                                              key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
    
                            fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
    
                            fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
    
                        } else {
                            ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
    
                                fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
    
            } else {
                av_log(NULL, AV_LOG_ERROR,
                       "Parse error, at least 3 arguments were expected, "
                       "only %d given in string '%s'\n", n, buf);
    
        }
        if (key == 'd' || key == 'D'){
            int debug=0;
            if(key == 'D') {
                debug = input_streams[0]->st->codec->debug<<1;
                if(!debug) debug = 1;
                while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
                    debug += debug;
    
            }else{
                char buf[32];
                int k = 0;
                i = 0;
    
                while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
                    if (k > 0)
                        buf[i++] = k;
                buf[i] = 0;
    
                set_tty_echo(0);
                fprintf(stderr, "\n");
    
                if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
    
                    fprintf(stderr,"error parsing debug value\n");
    
            for(i=0;i<nb_input_streams;i++) {
                input_streams[i]->st->codec->debug = debug;
    
            for(i=0;i<nb_output_streams;i++) {
                OutputStream *ost = output_streams[i];
    
            }
            if(debug) av_log_set_level(AV_LOG_DEBUG);
            fprintf(stderr,"debug=%d\n", debug);
    
        if (key == '?'){
            fprintf(stderr, "key    function\n"
                            "?      show this help\n"
                            "+      increase verbosity\n"
                            "-      decrease verbosity\n"
    
                            "c      Send command to first matching filter supporting it\n"
    
                            "C      Send/Queue command to all matching filters\n"
    
                            "D      cycle through available debug modes\n"
                            "h      dump packets/hex press to cycle through the 3 states\n"
                            "q      quit\n"
                            "s      Show QP histogram\n"
            );
        }
        return 0;
    
    #if HAVE_PTHREADS
    
    static void *input_thread(void *arg)
    
        InputFile *f = arg;
    
        unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
    
        int ret = 0;
    
        while (1) {
    
            AVPacket pkt;
            ret = av_read_frame(f->ctx, &pkt);
    
            if (ret == AVERROR(EAGAIN)) {
    
                av_usleep(10000);
    
                continue;
    
            }
            if (ret < 0) {
                av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
    
            ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
            if (flags && ret == AVERROR(EAGAIN)) {
                flags = 0;
                ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
                av_log(f->ctx, AV_LOG_WARNING,
                       "Thread message queue blocking; consider raising the "
                       "thread_queue_size option (current value: %d)\n",
                       f->thread_queue_size);
            }
    
            if (ret < 0) {
                if (ret != AVERROR_EOF)
                    av_log(f->ctx, AV_LOG_ERROR,
                           "Unable to send packet to main thread: %s\n",
                           av_err2str(ret));
    
                av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
                break;
            }
    
        return NULL;
    
    static void free_input_threads(void)
    
        for (i = 0; i < nb_input_files; i++) {
            InputFile *f = input_files[i];
            AVPacket pkt;
    
                continue;
    
            av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
            while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
    
            pthread_join(f->thread, NULL);
            f->joined = 1;
    
            av_thread_message_queue_free(&f->in_thread_queue);