Skip to content
Snippets Groups Projects
ffmpeg.c 163 KiB
Newer Older
  • Learn to ignore specific revisions
  •         ost->st->r_frame_rate = ist->st->r_frame_rate;
            break;
        case AVMEDIA_TYPE_SUBTITLE:
            par_dst->width  = par_src->width;
            par_dst->height = par_src->height;
            break;
        case AVMEDIA_TYPE_UNKNOWN:
        case AVMEDIA_TYPE_DATA:
        case AVMEDIA_TYPE_ATTACHMENT:
            break;
        default:
            abort();
        }
    
        return 0;
    }
    
    
    static int init_output_stream(OutputStream *ost, char *error, int error_len)
    {
        int ret = 0;
    
        if (ost->encoding_needed) {
            AVCodec      *codec = ost->enc;
            AVCodecContext *dec = NULL;
            InputStream *ist;
    
            if ((ist = get_input_stream(ost)))
                dec = ist->dec_ctx;
            if (dec && dec->subtitle_header) {
                /* ASS code assumes this buffer is null terminated so add extra byte. */
                ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
                if (!ost->enc_ctx->subtitle_header)
                    return AVERROR(ENOMEM);
                memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
                ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
            }
            if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
                av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
    
            if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
                !codec->defaults &&
                !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
                !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
                av_dict_set(&ost->encoder_opts, "b", "128000", 0);
    
            if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
                ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
                if (!ost->enc_ctx->hw_frames_ctx)
                    return AVERROR(ENOMEM);
            }
    
    
            if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
                if (ret == AVERROR_EXPERIMENTAL)
                    abort_codec_experimental(codec, 1);
                snprintf(error, error_len,
                         "Error while opening encoder for output stream #%d:%d - "
                         "maybe incorrect parameters such as bit_rate, rate, width or height",
                        ost->file_index, ost->index);
                return ret;
            }
            if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
    
                !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
    
                av_buffersink_set_frame_size(ost->filter->filter,
                                                ost->enc_ctx->frame_size);
            assert_avoptions(ost->encoder_opts);
            if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
                av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
                                             " It takes bits/s as argument, not kbits/s\n");
    
    
            ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
    
            if (ret < 0) {
                av_log(NULL, AV_LOG_FATAL,
                       "Error initializing the output stream codec context.\n");
                exit_program(1);
            }
    
             * FIXME: ost->st->codec should't be needed here anymore.
    
             */
            ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
            if (ret < 0)
                return ret;
    
            if (ost->enc_ctx->nb_coded_side_data) {
                int i;
    
                ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
                                                      sizeof(*ost->st->side_data));
                if (!ost->st->side_data)
                    return AVERROR(ENOMEM);
    
                for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
                    const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
                    AVPacketSideData *sd_dst = &ost->st->side_data[i];
    
                    sd_dst->data = av_malloc(sd_src->size);
                    if (!sd_dst->data)
                        return AVERROR(ENOMEM);
                    memcpy(sd_dst->data, sd_src->data, sd_src->size);
                    sd_dst->size = sd_src->size;
                    sd_dst->type = sd_src->type;
                    ost->st->nb_side_data++;
                }
            }
    
    
            // copy timebase while removing common factors
            ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
            ost->st->codec->codec= ost->enc_ctx->codec;
    
        } else if (ost->stream_copy) {
    
            ret = init_output_stream_streamcopy(ost);
            if (ret < 0)
                return ret;
    
    
             * FIXME: will the codec context used by the parser during streamcopy
             * This should go away with the new parser API.
    
            ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
    
            if (ret < 0)
                return ret;
    
        /* initialize bitstream filters for the output stream
         * needs to be done here, because the codec id for streamcopy is not
         * known until now */
        ret = init_output_bsfs(ost);
        if (ret < 0)
            return ret;
    
    
        ost->initialized = 1;
    
        ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
        if (ret < 0)
            return ret;
    
    
    static void parse_forced_key_frames(char *kf, OutputStream *ost,
                                        AVCodecContext *avctx)
    
        char *p;
    
        int n = 1, i, size, index = 0;
        int64_t t, *pts;
    
        for (p = kf; *p; p++)
            if (*p == ',')
                n++;
    
        pts = av_malloc_array(size, sizeof(*pts));
    
            av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
    
            exit_program(1);
    
        for (i = 0; i < n; i++) {
    
            char *next = strchr(p, ',');
    
            if (!memcmp(p, "chapters", 8)) {
    
                AVFormatContext *avf = output_files[ost->file_index]->ctx;
                int j;
    
                if (avf->nb_chapters > INT_MAX - size ||
                    !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
                                         sizeof(*pts)))) {
                    av_log(NULL, AV_LOG_FATAL,
                           "Could not allocate forced key frames array.\n");
    
                }
                t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
                t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
    
                for (j = 0; j < avf->nb_chapters; j++) {
                    AVChapter *c = avf->chapters[j];
                    av_assert1(index < size);
                    pts[index++] = av_rescale_q(c->start, c->time_base,
                                                avctx->time_base) + t;
                }
    
            } else {
    
                t = parse_time_or_die("force_key_frames", p, 1);
                av_assert1(index < size);
                pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
    
            }
    
    
        av_assert0(index == size);
        qsort(pts, size, sizeof(*pts), compare_int64);
        ost->forced_kf_count = size;
        ost->forced_kf_pts   = pts;
    
    static void report_new_stream(int input_index, AVPacket *pkt)
    
        InputFile *file = input_files[input_index];
        AVStream *st = file->ctx->streams[pkt->stream_index];
    
        if (pkt->stream_index < file->nb_streams_warn)
            return;
        av_log(file->ctx, AV_LOG_WARNING,
               "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
    
               av_get_media_type_string(st->codecpar->codec_type),
    
               input_index, pkt->stream_index,
               pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
        file->nb_streams_warn = pkt->stream_index + 1;
    
    static void set_encoder_id(OutputFile *of, OutputStream *ost)
    {
        AVDictionaryEntry *e;
    
        uint8_t *encoder_string;
        int encoder_string_len;
        int format_flags = 0;
    
        if (av_dict_get(ost->st->metadata, "encoder",  NULL, 0))
            return;
    
    
        e = av_dict_get(of->opts, "fflags", NULL, 0);
        if (e) {
            const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
            if (!o)
                return;
            av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
        }
    
        e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
        if (e) {
    
            const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
    
            av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
    
    
        encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
        encoder_string     = av_mallocz(encoder_string_len);
        if (!encoder_string)
            exit_program(1);
    
    
        if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
    
            av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
    
        else
            av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
    
        av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
        av_dict_set(&ost->st->metadata, "encoder",  encoder_string,
                    AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
    }
    
    
    static int transcode_init(void)
    
        AVFormatContext *oc;
    
        char error[1024] = {0};
    
        for (i = 0; i < nb_filtergraphs; i++) {
            FilterGraph *fg = filtergraphs[i];
            for (j = 0; j < fg->nb_outputs; j++) {
                OutputFilter *ofilter = fg->outputs[j];
    
                if (!ofilter->ost || ofilter->ost->source_index >= 0)
    
                    continue;
                if (fg->nb_inputs != 1)
                    continue;
                for (k = nb_input_streams-1; k >= 0 ; k--)
                    if (fg->inputs[0]->ist == input_streams[k])
                        break;
                ofilter->ost->source_index = k;
            }
        }
    
    
        /* init framerate emulation */
        for (i = 0; i < nb_input_files; i++) {
    
            InputFile *ifile = input_files[i];
    
            if (ifile->rate_emu)
                for (j = 0; j < ifile->nb_streams; j++)
    
                    input_streams[j + ifile->ist_index]->start = av_gettime_relative();
    
        /* for each output stream, we compute the right encoding parameters */
    
        for (i = 0; i < nb_output_streams; i++) {
    
            ost = output_streams[i];
            oc  = output_files[ost->file_index]->ctx;
    
            ist = get_input_stream(ost);
    
            if (ost->attachment_filename)
                continue;
    
            if (ist) {
                ost->st->disposition          = ist->st->disposition;
    
            } else {
                for (j=0; j<oc->nb_streams; j++) {
                    AVStream *st = oc->streams[j];
    
                    if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
    
                    if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
                        ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
    
                        ost->st->disposition = AV_DISPOSITION_DEFAULT;
    
            if (!ost->stream_copy) {
    
                AVCodecContext *enc_ctx = ost->enc_ctx;
                AVCodecContext *dec_ctx = NULL;
    
    
                set_encoder_id(output_files[ost->file_index], ost);
    
    
                if (ist) {
                    dec_ctx = ist->dec_ctx;
    
                    enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
                }
    
    
    #if CONFIG_LIBMFX
                if (qsv_transcode_init(ost))
                    exit_program(1);
    #endif
    
    
    #if CONFIG_CUVID
                if (cuvid_transcode_init(ost))
                    exit_program(1);
    #endif
    
    
                if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
                     enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
                     filtergraph_is_simple(ost->filter->graph)) {
                        FilterGraph *fg = ost->filter->graph;
    
    
                        if (dec_ctx) {
                            ret = ifilter_parameters_from_decoder(fg->inputs[0],
                                                                  dec_ctx);
                            if (ret < 0) {
                                av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
                                exit_program(1);
                            }
                        }
    
    
                        if (configure_filtergraph(fg)) {
    
                            av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
    
                            exit_program(1);
    
                if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
    
                        ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
    
                    if (ist && !ost->frame_rate.num)
                        ost->frame_rate = ist->framerate;
    
                    if (ist && !ost->frame_rate.num)
    
                        ost->frame_rate = ist->st->r_frame_rate;
                    if (ist && !ost->frame_rate.num) {
                        ost->frame_rate = (AVRational){25, 1};
                        av_log(NULL, AV_LOG_WARNING,
                               "No information "
                               "about the input framerate is available. Falling "
                               "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
                               "if you want a different framerate.\n",
                               ost->file_index, ost->index);
                    }
    
    //                    ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
                    if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
                        int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
                        ost->frame_rate = ost->enc->supported_framerates[idx];
                    }
    
                    // reduce frame rate for mpeg4 to be within the spec limits
    
                    if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
    
                        av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
                                  ost->frame_rate.num, ost->frame_rate.den, 65535);
                    }
    
                switch (enc_ctx->codec_type) {
    
                case AVMEDIA_TYPE_AUDIO:
    
                    enc_ctx->sample_fmt     = ost->filter->filter->inputs[0]->format;
    
                    if (dec_ctx)
                        enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
                                                             av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
    
                    enc_ctx->sample_rate    = ost->filter->filter->inputs[0]->sample_rate;
                    enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
    
                    enc_ctx->channels       = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
    
                    enc_ctx->time_base      = (AVRational){ 1, enc_ctx->sample_rate };
    
                    break;
                case AVMEDIA_TYPE_VIDEO:
    
                    enc_ctx->time_base = av_inv_q(ost->frame_rate);
    
                    if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
    
                        enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
                    if (   av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
    
                       && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
    
                        av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
                                                   "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
                    }
                    for (j = 0; j < ost->forced_kf_count; j++)
                        ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
                                                             AV_TIME_BASE_Q,
    
                    enc_ctx->width  = ost->filter->filter->inputs[0]->w;
                    enc_ctx->height = ost->filter->filter->inputs[0]->h;
                    enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
    
                        ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
    
                        av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
    
                        ost->filter->filter->inputs[0]->sample_aspect_ratio;
    
                    if (!strncmp(ost->enc->name, "libx264", 7) &&
    
                        enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
    
                        ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
    
                               "No pixel format specified, %s for H.264 encoding chosen.\n"
                               "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
                               av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
    
                    if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
    
                        enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
    
                        ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
                        av_log(NULL, AV_LOG_WARNING,
                               "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
                               "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
                               av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
    
                    enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
    
                    if (dec_ctx)
                        enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
                                                             av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
    
                    ost->st->avg_frame_rate = ost->frame_rate;
    
    
                        enc_ctx->width   != dec_ctx->width  ||
                        enc_ctx->height  != dec_ctx->height ||
                        enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
                        enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
    
                    if (ost->forced_keyframes) {
                        if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
                            ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
                                                forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
                            if (ret < 0) {
                                av_log(NULL, AV_LOG_ERROR,
                                       "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
                                return ret;
                            }
                            ost->forced_keyframes_expr_const_values[FKF_N] = 0;
                            ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
                            ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
                            ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
    
    
                            // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
                            // parse it only for static kf timings
                        } else if(strncmp(ost->forced_keyframes, "source", 6)) {
    
                            parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
    
                    break;
                case AVMEDIA_TYPE_SUBTITLE:
    
                    enc_ctx->time_base = (AVRational){1, 1000};
    
                        enc_ctx->width     = input_streams[ost->source_index]->st->codecpar->width;
                        enc_ctx->height    = input_streams[ost->source_index]->st->codecpar->height;
    
                case AVMEDIA_TYPE_DATA:
                    break;
    
    
            if (ost->disposition) {
                static const AVOption opts[] = {
                    { "disposition"         , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
                    { "default"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT           },    .unit = "flags" },
                    { "dub"                 , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB               },    .unit = "flags" },
                    { "original"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL          },    .unit = "flags" },
                    { "comment"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT           },    .unit = "flags" },
                    { "lyrics"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS            },    .unit = "flags" },
                    { "karaoke"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE           },    .unit = "flags" },
                    { "forced"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED            },    .unit = "flags" },
                    { "hearing_impaired"    , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED  },    .unit = "flags" },
                    { "visual_impaired"     , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED   },    .unit = "flags" },
                    { "clean_effects"       , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS     },    .unit = "flags" },
                    { "captions"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS          },    .unit = "flags" },
                    { "descriptions"        , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS      },    .unit = "flags" },
                    { "metadata"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA          },    .unit = "flags" },
                    { NULL },
                };
                static const AVClass class = {
                    .class_name = "",
                    .item_name  = av_default_item_name,
                    .option     = opts,
                    .version    = LIBAVUTIL_VERSION_INT,
                };
                const AVClass *pclass = &class;
    
                ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
                if (ret < 0)
                    goto dump_format;
            }
    
        /* init input streams */
        for (i = 0; i < nb_input_streams; i++)
    
            if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
                for (i = 0; i < nb_output_streams; i++) {
                    ost = output_streams[i];
    
                    avcodec_close(ost->enc_ctx);
    
        /* open each encoder */
        for (i = 0; i < nb_output_streams; i++) {
            ret = init_output_stream(output_streams[i], error, sizeof(error));
            if (ret < 0)
                goto dump_format;
        }
    
    
        /* discard unused programs */
        for (i = 0; i < nb_input_files; i++) {
    
            InputFile *ifile = input_files[i];
    
            for (j = 0; j < ifile->ctx->nb_programs; j++) {
                AVProgram *p = ifile->ctx->programs[j];
                int discard  = AVDISCARD_ALL;
    
                for (k = 0; k < p->nb_stream_indexes; k++)
    
                    if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
    
                        discard = AVDISCARD_DEFAULT;
                        break;
                    }
                p->discard = discard;
    
        /* write headers for files with no streams */
        for (i = 0; i < nb_output_files; i++) {
            oc = output_files[i]->ctx;
            if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
                ret = check_init_output_file(output_files[i], i);
                if (ret < 0)
                    goto dump_format;
            }
        }
    
    
     dump_format:
        /* dump the stream mapping */
    
        av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
    
        for (i = 0; i < nb_input_streams; i++) {
            ist = input_streams[i];
    
            for (j = 0; j < ist->nb_filters; j++) {
    
                if (!filtergraph_is_simple(ist->filters[j]->graph)) {
    
                    av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d (%s) -> %s",
                           ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
    
                    if (nb_filtergraphs > 1)
                        av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
                    av_log(NULL, AV_LOG_INFO, "\n");
                }
    
        for (i = 0; i < nb_output_streams; i++) {
    
            if (ost->attachment_filename) {
                /* an attached file */
                av_log(NULL, AV_LOG_INFO, "  File %s -> Stream #%d:%d\n",
                       ost->attachment_filename, ost->file_index, ost->index);
                continue;
    
            if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
    
                /* output from a complex graph */
    
                av_log(NULL, AV_LOG_INFO, "  %s", ost->filter->name);
    
                if (nb_filtergraphs > 1)
                    av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
    
                av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
                       ost->index, ost->enc ? ost->enc->name : "?");
    
            av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
    
                   input_streams[ost->source_index]->file_index,
                   input_streams[ost->source_index]->st->index,
    
                   ost->file_index,
                   ost->index);
    
            if (ost->sync_ist != input_streams[ost->source_index])
    
                av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
    
                       ost->sync_ist->file_index,
                       ost->sync_ist->st->index);
    
            if (ost->stream_copy)
    
                av_log(NULL, AV_LOG_INFO, " (copy)");
    
            else {
                const AVCodec *in_codec    = input_streams[ost->source_index]->dec;
                const AVCodec *out_codec   = ost->enc;
                const char *decoder_name   = "?";
                const char *in_codec_name  = "?";
                const char *encoder_name   = "?";
                const char *out_codec_name = "?";
    
                const AVCodecDescriptor *desc;
    
    
                if (in_codec) {
                    decoder_name  = in_codec->name;
    
                    desc = avcodec_descriptor_get(in_codec->id);
                    if (desc)
                        in_codec_name = desc->name;
    
                    if (!strcmp(decoder_name, in_codec_name))
                        decoder_name = "native";
                }
    
                if (out_codec) {
                    encoder_name   = out_codec->name;
    
                    desc = avcodec_descriptor_get(out_codec->id);
                    if (desc)
                        out_codec_name = desc->name;
    
                    if (!strcmp(encoder_name, out_codec_name))
    
                        encoder_name = "native";
                }
    
                av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
                       in_codec_name, decoder_name,
                       out_codec_name, encoder_name);
            }
    
            av_log(NULL, AV_LOG_INFO, "\n");
    
            av_log(NULL, AV_LOG_ERROR, "%s\n", error);
    
    /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
    
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost    = output_streams[i];
            OutputFile *of       = output_files[ost->file_index];
            AVFormatContext *os  = output_files[ost->file_index]->ctx;
    
                (os->pb && avio_tell(os->pb) >= of->limit_filesize))
                continue;
    
            if (ost->frame_number >= ost->max_frames) {
    
                int j;
                for (j = 0; j < of->ctx->nb_streams; j++)
    
                    close_output_stream(output_streams[of->ost_index + j]);
    
     * Select the output stream to process.
    
     * @return  selected output stream, or NULL if none available
    
    static OutputStream *choose_output(void)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
        int i;
        int64_t opts_min = INT64_MAX;
        OutputStream *ost_min = NULL;
    
        for (i = 0; i < nb_output_streams; i++) {
            OutputStream *ost = output_streams[i];
    
            int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
                           av_rescale_q(ost->st->cur_dts, ost->st->time_base,
    
            if (ost->st->cur_dts == AV_NOPTS_VALUE)
                av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
    
    
    static void set_tty_echo(int on)
    {
    #if HAVE_TERMIOS_H
        struct termios tty;
        if (tcgetattr(0, &tty) == 0) {
            if (on) tty.c_lflag |= ECHO;
            else    tty.c_lflag &= ~ECHO;
            tcsetattr(0, TCSANOW, &tty);
        }
    #endif
    }
    
    
    static int check_keyboard_interaction(int64_t cur_time)
    {
        int i, ret, key;
        static int64_t last_time;
        if (received_nb_signals)
            return AVERROR_EXIT;
        /* read_key() returns 0 on EOF */
        if(cur_time - last_time >= 100000 && !run_as_daemon){
            key =  read_key();
            last_time = cur_time;
        }else
            key = -1;
        if (key == 'q')
            return AVERROR_EXIT;
        if (key == '+') av_log_set_level(av_log_get_level()+10);
        if (key == '-') av_log_set_level(av_log_get_level()-10);
        if (key == 's') qp_hist     ^= 1;
        if (key == 'h'){
            if (do_hex_dump){
                do_hex_dump = do_pkt_dump = 0;
            } else if(do_pkt_dump){
                do_hex_dump = 1;
            } else
                do_pkt_dump = 1;
            av_log_set_level(AV_LOG_DEBUG);
        }
        if (key == 'c' || key == 'C'){
            char buf[4096], target[64], command[256], arg[256] = {0};
            double time;
            int k, n = 0;
    
            fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
    
            while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
                if (k > 0)
                    buf[i++] = k;
            buf[i] = 0;
    
            set_tty_echo(0);
            fprintf(stderr, "\n");
    
            if (k > 0 &&
                (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
                av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
                       target, time, command, arg);
                for (i = 0; i < nb_filtergraphs; i++) {
                    FilterGraph *fg = filtergraphs[i];
                    if (fg->graph) {
                        if (time < 0) {
                            ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
                                                              key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
    
                            fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
    
                            fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
    
                        } else {
                            ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
    
                                fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
    
            } else {
                av_log(NULL, AV_LOG_ERROR,
                       "Parse error, at least 3 arguments were expected, "
                       "only %d given in string '%s'\n", n, buf);
    
        }
        if (key == 'd' || key == 'D'){
            int debug=0;
            if(key == 'D') {
                debug = input_streams[0]->st->codec->debug<<1;
                if(!debug) debug = 1;
                while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
                    debug += debug;
    
            }else{
                char buf[32];
                int k = 0;
                i = 0;
    
                while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
                    if (k > 0)
                        buf[i++] = k;
                buf[i] = 0;
    
                set_tty_echo(0);
                fprintf(stderr, "\n");
    
                if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
    
                    fprintf(stderr,"error parsing debug value\n");
    
            for(i=0;i<nb_input_streams;i++) {
                input_streams[i]->st->codec->debug = debug;
    
            for(i=0;i<nb_output_streams;i++) {
                OutputStream *ost = output_streams[i];
    
            }
            if(debug) av_log_set_level(AV_LOG_DEBUG);
            fprintf(stderr,"debug=%d\n", debug);
    
        if (key == '?'){
            fprintf(stderr, "key    function\n"
                            "?      show this help\n"
                            "+      increase verbosity\n"
                            "-      decrease verbosity\n"
    
                            "c      Send command to first matching filter supporting it\n"
    
                            "C      Send/Queue command to all matching filters\n"
    
                            "D      cycle through available debug modes\n"
                            "h      dump packets/hex press to cycle through the 3 states\n"
                            "q      quit\n"
                            "s      Show QP histogram\n"
            );
        }
        return 0;
    
    #if HAVE_PTHREADS
    
    static void *input_thread(void *arg)
    
        InputFile *f = arg;
    
        unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
    
        int ret = 0;
    
        while (1) {
    
            AVPacket pkt;
            ret = av_read_frame(f->ctx, &pkt);
    
            if (ret == AVERROR(EAGAIN)) {
    
                av_usleep(10000);
    
                continue;
    
            }
            if (ret < 0) {
                av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
    
            ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
            if (flags && ret == AVERROR(EAGAIN)) {
                flags = 0;
                ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
                av_log(f->ctx, AV_LOG_WARNING,
                       "Thread message queue blocking; consider raising the "
                       "thread_queue_size option (current value: %d)\n",
                       f->thread_queue_size);
            }
    
            if (ret < 0) {
                if (ret != AVERROR_EOF)
                    av_log(f->ctx, AV_LOG_ERROR,
                           "Unable to send packet to main thread: %s\n",
                           av_err2str(ret));
    
                av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
                break;
            }
    
        return NULL;
    
    static void free_input_threads(void)
    
        for (i = 0; i < nb_input_files; i++) {
            InputFile *f = input_files[i];
            AVPacket pkt;
    
                continue;
    
            av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
            while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
    
            pthread_join(f->thread, NULL);
            f->joined = 1;
    
            av_thread_message_queue_free(&f->in_thread_queue);
    
    static int init_input_threads(void)
    
        int i, ret;
    
        if (nb_input_files == 1)
            return 0;
    
        for (i = 0; i < nb_input_files; i++) {
            InputFile *f = input_files[i];
    
            if (f->ctx->pb ? !f->ctx->pb->seekable :
                strcmp(f->ctx->iformat->name, "lavfi"))
                f->non_blocking = 1;
    
            ret = av_thread_message_queue_alloc(&f->in_thread_queue,
    
                                                f->thread_queue_size, sizeof(AVPacket));
    
            if (ret < 0)
                return ret;
    
            if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
                av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
                av_thread_message_queue_free(&f->in_thread_queue);
    
                return AVERROR(ret);
    
    static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
    
        return av_thread_message_queue_recv(f->in_thread_queue, pkt,
                                            f->non_blocking ?
                                            AV_THREAD_MESSAGE_NONBLOCK : 0);
    
    static int get_input_packet(InputFile *f, AVPacket *pkt)
    {
    
        if (f->rate_emu) {
            int i;
            for (i = 0; i < f->nb_streams; i++) {
                InputStream *ist = input_streams[f->ist_index + i];
    
                int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
    
                int64_t now = av_gettime_relative() - ist->start;
    
                if (pts > now)
                    return AVERROR(EAGAIN);
            }
        }
    
    
    #if HAVE_PTHREADS
    
        if (nb_input_files > 1)
            return get_input_packet_mt(f, pkt);
    #endif
        return av_read_frame(f->ctx, pkt);
    
        for (i = 0; i < nb_output_streams; i++)
            if (output_streams[i]->unavailable)
    
                return 1;
        return 0;
    }
    
    static void reset_eagain(void)
    {
        int i;
        for (i = 0; i < nb_input_files; i++)
            input_files[i]->eagain = 0;
    
        for (i = 0; i < nb_output_streams; i++)
            output_streams[i]->unavailable = 0;
    }
    
    
    // set duration to max(tmp, duration) in a proper time base and return duration's time_base
    static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
                                    AVRational time_base)
    {
        int ret;
    
        if (!*duration) {
            *duration = tmp;
            return tmp_time_base;
        }
    
        ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
        if (ret < 0) {
            *duration = tmp;
            return tmp_time_base;
        }
    
        return time_base;
    }
    
    static int seek_to_start(InputFile *ifile, AVFormatContext *is)
    {
        InputStream *ist;
        AVCodecContext *avctx;
        int i, ret, has_audio = 0;
        int64_t duration = 0;
    
        ret = av_seek_frame(is, -1, is->start_time, 0);
        if (ret < 0)
            return ret;
    
        for (i = 0; i < ifile->nb_streams; i++) {
            ist   = input_streams[ifile->ist_index + i];
            avctx = ist->dec_ctx;
    
            // flush decoders
            if (ist->decoding_needed) {
                process_input_packet(ist, NULL, 1);
                avcodec_flush_buffers(avctx);
            }
    
            /* duration is the length of the last frame in a stream
             * when audio stream is present we don't care about
             * last video frame length because it's not defined exactly */