Skip to content
Snippets Groups Projects
avconv.c 180 KiB
Newer Older
  • Learn to ignore specific revisions
  •                     fprintf(ost->logfile, "%s", enc->stats_out);
                    }
    
                    if (!got_packet) {
    
                        stop_encoding = 1;
                        break;
                    }
    
                    if (pkt.pts != AV_NOPTS_VALUE)
                        pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
                    if (pkt.dts != AV_NOPTS_VALUE)
                        pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
    
                    write_frame(os, &pkt, ost);
    
                if (stop_encoding)
    
    /*
     * Check whether a packet from ist should be written into ost at this time
     */
    static int check_output_constraints(InputStream *ist, OutputStream *ost)
    {
    
        OutputFile *of = output_files[ost->file_index];
        int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
    
    
        if (ost->source_index != ist_index)
            return 0;
    
    
        if (of->start_time && ist->last_dts < of->start_time)
    
            return 0;
    
        return 1;
    }
    
    static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
    {
    
        OutputFile *of = output_files[ost->file_index];
    
        int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
        AVPacket opkt;
    
        av_init_packet(&opkt);
    
        if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
            !ost->copy_initial_nonkeyframes)
            return;
    
    
        if (of->recording_time != INT64_MAX &&
    
            ist->last_dts >= of->recording_time + of->start_time) {
    
            ost->is_past_recording_time = 1;
            return;
        }
    
    
        /* force the input stream PTS */
        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
            audio_size += pkt->size;
        else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_size += pkt->size;
            ost->sync_opts++;
        }
    
        if (pkt->pts != AV_NOPTS_VALUE)
            opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
        else
            opkt.pts = AV_NOPTS_VALUE;
    
        if (pkt->dts == AV_NOPTS_VALUE)
    
            opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
    
        else
            opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
        opkt.dts -= ost_tb_start_time;
    
        opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
        opkt.flags    = pkt->flags;
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
        if (  ost->st->codec->codec_id != CODEC_ID_H264
    
           && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
           && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
    
           && ost->st->codec->codec_id != CODEC_ID_VC1
    
           ) {
            if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
                opkt.destruct = av_destruct_packet;
        } else {
            opkt.data = pkt->data;
            opkt.size = pkt->size;
        }
    
    
        write_frame(of->ctx, &opkt, ost);
    
        ost->st->codec->frame_number++;
        av_free_packet(&opkt);
    }
    
    
    static void rate_emu_sleep(InputStream *ist)
    {
    
        if (input_files[ist->file_index]->rate_emu) {
    
            int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
    
            int64_t now = av_gettime() - ist->start;
            if (pts > now)
                usleep(pts - now);
        }
    }
    
    
    static int guess_input_channel_layout(InputStream *ist)
    {
        AVCodecContext *dec = ist->st->codec;
    
        if (!dec->channel_layout) {
            char layout_name[256];
    
            dec->channel_layout = av_get_default_channel_layout(dec->channels);
            if (!dec->channel_layout)
                return 0;
            av_get_channel_layout_string(layout_name, sizeof(layout_name),
                                         dec->channels, dec->channel_layout);
            av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for  Input Stream "
                   "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
        }
        return 1;
    }
    
    
    static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
    {
    
        AVFrame *decoded_frame;
        AVCodecContext *avctx = ist->st->codec;
    
        int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
    
        int i, ret, resample_changed;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
    
        else
            avcodec_get_frame_defaults(ist->decoded_frame);
        decoded_frame = ist->decoded_frame;
    
        ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
        if (ret < 0) {
    
    
        if (!*got_output) {
            /* no audio frame */
    
            if (!pkt->size)
                for (i = 0; i < ist->nb_filters; i++)
                    av_buffersrc_buffer(ist->filters[i]->filter, NULL);
    
        /* if the decoder provides a pts, use it instead of the last packet pts.
           the decoder could be delaying output by a packet or more. */
        if (decoded_frame->pts != AV_NOPTS_VALUE)
    
            ist->next_dts = decoded_frame->pts;
    
        else if (pkt->pts != AV_NOPTS_VALUE) {
            decoded_frame->pts = pkt->pts;
            pkt->pts           = AV_NOPTS_VALUE;
        }
    
    
        // preprocess audio (volume)
        if (audio_volume != 256) {
    
            int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
            void *samples = decoded_frame->data[0];
            switch (avctx->sample_fmt) {
    
            case AV_SAMPLE_FMT_U8:
            {
                uint8_t *volp = samples;
                for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
                    int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
                    *volp++ = av_clip_uint8(v);
                }
                break;
            }
            case AV_SAMPLE_FMT_S16:
            {
                int16_t *volp = samples;
                for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
                    int v = ((*volp) * audio_volume + 128) >> 8;
                    *volp++ = av_clip_int16(v);
                }
                break;
            }
            case AV_SAMPLE_FMT_S32:
            {
                int32_t *volp = samples;
                for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
                    int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
                    *volp++ = av_clipl_int32(v);
                }
                break;
            }
            case AV_SAMPLE_FMT_FLT:
            {
                float *volp = samples;
                float scale = audio_volume / 256.f;
                for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
                    *volp++ *= scale;
                }
                break;
            }
            case AV_SAMPLE_FMT_DBL:
            {
                double *volp = samples;
                double scale = audio_volume / 256.;
                for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
                    *volp++ *= scale;
                }
                break;
            }
            default:
                av_log(NULL, AV_LOG_FATAL,
                       "Audio volume adjustment on sample format %s is not supported.\n",
                       av_get_sample_fmt_name(ist->st->codec->sample_fmt));
                exit_program(1);
            }
        }
    
        rate_emu_sleep(ist);
    
    
        resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
                           ist->resample_channels       != avctx->channels               ||
                           ist->resample_channel_layout != decoded_frame->channel_layout ||
                           ist->resample_sample_rate    != decoded_frame->sample_rate;
        if (resample_changed) {
            char layout1[64], layout2[64];
    
            if (!guess_input_channel_layout(ist)) {
                av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
                       "layout for Input Stream #%d.%d\n", ist->file_index,
                       ist->st->index);
                exit_program(1);
            }
            decoded_frame->channel_layout = avctx->channel_layout;
    
            av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
                                         ist->resample_channel_layout);
            av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
                                         decoded_frame->channel_layout);
    
            av_log(NULL, AV_LOG_INFO,
                   "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
                   ist->file_index, ist->st->index,
                   ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
                   ist->resample_channels, layout1,
                   decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
                   avctx->channels, layout2);
    
            ist->resample_sample_fmt     = decoded_frame->format;
            ist->resample_sample_rate    = decoded_frame->sample_rate;
            ist->resample_channel_layout = decoded_frame->channel_layout;
            ist->resample_channels       = avctx->channels;
    
            for (i = 0; i < nb_filtergraphs; i++)
                if (ist_in_filtergraph(filtergraphs[i], ist) &&
                    configure_filtergraph(filtergraphs[i]) < 0) {
                    av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
                    exit_program(1);
                }
    
        for (i = 0; i < ist->nb_filters; i++)
            av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
    
    
    static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output)
    
        AVFrame *decoded_frame;
    
        void *buffer_to_free = NULL;
    
        int i, ret = 0, resample_changed;
    
        if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
    
        else
            avcodec_get_frame_defaults(ist->decoded_frame);
        decoded_frame = ist->decoded_frame;
    
    
        ret = avcodec_decode_video2(ist->st->codec,
                                    decoded_frame, got_output, pkt);
        if (ret < 0)
    
    
        quality = same_quant ? decoded_frame->quality : 0;
        if (!*got_output) {
            /* no picture yet */
    
            if (!pkt->size)
                for (i = 0; i < ist->nb_filters; i++)
                    av_buffersrc_buffer(ist->filters[i]->filter, NULL);
    
        decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
                                               decoded_frame->pkt_dts);
    
        pkt->size = 0;
        pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
    
        rate_emu_sleep(ist);
    
    
        if (ist->st->sample_aspect_ratio.num)
            decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
    
    
        resample_changed = ist->resample_width   != decoded_frame->width  ||
                           ist->resample_height  != decoded_frame->height ||
                           ist->resample_pix_fmt != decoded_frame->format;
        if (resample_changed) {
            av_log(NULL, AV_LOG_INFO,
                   "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
                   ist->file_index, ist->st->index,
                   ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
                   decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
    
            ist->resample_width   = decoded_frame->width;
            ist->resample_height  = decoded_frame->height;
            ist->resample_pix_fmt = decoded_frame->format;
    
    
            for (i = 0; i < nb_filtergraphs; i++)
                if (ist_in_filtergraph(filtergraphs[i], ist) &&
                    configure_filtergraph(filtergraphs[i]) < 0) {
    
                    av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
                    exit_program(1);
    
        for (i = 0; i < ist->nb_filters; i++) {
    
            // XXX what an ugly hack
            if (ist->filters[i]->graph->nb_outputs == 1)
                ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
    
    
            if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
                FrameBuffer      *buf = decoded_frame->opaque;
                AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
                                            decoded_frame->data, decoded_frame->linesize,
                                            AV_PERM_READ | AV_PERM_PRESERVE,
                                            ist->st->codec->width, ist->st->codec->height,
                                            ist->st->codec->pix_fmt);
    
                avfilter_copy_frame_props(fb, decoded_frame);
                fb->buf->priv           = buf;
                fb->buf->free           = filter_release_buffer;
    
                buf->refcount++;
    
                av_buffersrc_buffer(ist->filters[i]->filter, fb);
    
                av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
    
    static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
    {
        AVSubtitle subtitle;
        int i, ret = avcodec_decode_subtitle2(ist->st->codec,
                                              &subtitle, got_output, pkt);
        if (ret < 0)
            return ret;
        if (!*got_output)
    
    
        rate_emu_sleep(ist);
    
        for (i = 0; i < nb_output_streams; i++) {
    
            OutputStream *ost = output_streams[i];
    
    
            if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
                continue;
    
    
            do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
    
    /* pkt = NULL means EOF (needed to flush decoder buffers) */
    
    static int output_packet(InputStream *ist, const AVPacket *pkt)
    
        int got_output;
        AVPacket avpkt;
    
    
        if (ist->next_dts == AV_NOPTS_VALUE)
    
            ist->next_dts = ist->last_dts;
    
    
        if (pkt == NULL) {
            /* EOF handling */
            av_init_packet(&avpkt);
            avpkt.data = NULL;
            avpkt.size = 0;
            goto handle_eof;
        } else {
            avpkt = *pkt;
        }
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        if (pkt->dts != AV_NOPTS_VALUE)
    
            ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        // while we have more to decode or while the decoder did output something on EOF
    
        while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
    
            ist->last_dts = ist->next_dts;
    
    
            if (avpkt.size && avpkt.size != pkt->size) {
    
                av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
                       "Multiple frames in a packet from stream %d\n", pkt->stream_index);
    
                ist->showed_multi_packet_warning = 1;
            }
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            switch (ist->st->codec->codec_type) {
    
            case AVMEDIA_TYPE_AUDIO:
                ret = transcode_audio    (ist, &avpkt, &got_output);
                break;
            case AVMEDIA_TYPE_VIDEO:
    
                ret = transcode_video    (ist, &avpkt, &got_output);
    
                if (avpkt.duration)
                    ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
    
                else if (ist->st->r_frame_rate.num)
                    ist->next_dts += av_rescale_q(1, (AVRational){ist->st->r_frame_rate.den,
                                                                  ist->st->r_frame_rate.num},
                                                  AV_TIME_BASE_Q);
    
                else if (ist->st->codec->time_base.num != 0) {
                    int ticks      = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
                                                       ist->st->codec->ticks_per_frame;
                    ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
                }
    
                break;
            case AVMEDIA_TYPE_SUBTITLE:
                ret = transcode_subtitles(ist, &avpkt, &got_output);
                break;
    
    Anton Khirnov's avatar
    Anton Khirnov committed
            default:
                return -1;
            }
    
            // touch data and size only if not EOF
            if (pkt) {
                avpkt.data += ret;
                avpkt.size -= ret;
            }
    
        if (!ist->decoding_needed) {
            rate_emu_sleep(ist);
    
            ist->last_dts = ist->next_dts;
    
            switch (ist->st->codec->codec_type) {
            case AVMEDIA_TYPE_AUDIO:
    
                ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
    
                                 ist->st->codec->sample_rate;
                break;
            case AVMEDIA_TYPE_VIDEO:
                if (ist->st->codec->time_base.num != 0) {
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
    
                    ist->next_dts += ((int64_t)AV_TIME_BASE *
    
                                      ist->st->codec->time_base.num * ticks) /
                                      ist->st->codec->time_base.den;
                }
                break;
            }
        }
    
        for (i = 0; pkt && i < nb_output_streams; i++) {
    
            OutputStream *ost = output_streams[i];
    
    
            if (!check_output_constraints(ist, ost) || ost->encoding_needed)
                continue;
    
            do_streamcopy(ist, ost, pkt);
        }
    
    
    static void print_sdp(void)
    
        AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
    
        for (i = 0; i < nb_output_files; i++)
    
        av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
    
        printf("SDP:\n%s\n", sdp);
        fflush(stdout);
    
    static int init_input_stream(int ist_index, char *error, int error_len)
    
        InputStream *ist = input_streams[ist_index];
    
        if (ist->decoding_needed) {
            AVCodec *codec = ist->dec;
            if (!codec) {
    
                snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
    
                        ist->st->codec->codec_id, ist->file_index, ist->st->index);
                return AVERROR(EINVAL);
            }
    
            /* update requested sample format for the decoder based on the
               corresponding encoder sample format */
            for (i = 0; i < nb_output_streams; i++) {
    
                OutputStream *ost = output_streams[i];
    
                if (ost->source_index == ist_index) {
                    update_sample_fmt(ist->st->codec, codec, ost->st->codec);
                    break;
                }
            }
    
    
            if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
                ist->st->codec->get_buffer     = codec_get_buffer;
                ist->st->codec->release_buffer = codec_release_buffer;
                ist->st->codec->opaque         = ist;
            }
    
    
            if (!av_dict_get(ist->opts, "threads", NULL, 0))
                av_dict_set(&ist->opts, "threads", "auto", 0);
    
            if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
    
                snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
    
                        ist->file_index, ist->st->index);
                return AVERROR(EINVAL);
            }
            assert_codec_experimental(ist->st->codec, 0);
            assert_avoptions(ist->opts);
        }
    
    
        ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
    
        ist->next_dts = AV_NOPTS_VALUE;
    
        init_pts_correction(&ist->pts_ctx);
        ist->is_start = 1;
    
        return 0;
    }
    
    
    static InputStream *get_input_stream(OutputStream *ost)
    {
        if (ost->source_index >= 0)
            return input_streams[ost->source_index];
    
        if (ost->filter) {
            FilterGraph *fg = ost->filter->graph;
            int i;
    
            for (i = 0; i < fg->nb_inputs; i++)
                if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
                    return fg->inputs[i]->ist;
        }
    
        return NULL;
    }
    
    
    static int transcode_init(void)
    
        AVFormatContext *oc;
    
        AVCodecContext *codec, *icodec;
    
        InputStream *ist;
        char error[1024];
        int want_sdp = 1;
    
        /* init framerate emulation */
        for (i = 0; i < nb_input_files; i++) {
    
            InputFile *ifile = input_files[i];
    
            if (ifile->rate_emu)
                for (j = 0; j < ifile->nb_streams; j++)
    
                    input_streams[j + ifile->ist_index]->start = av_gettime();
    
        for (i = 0; i < nb_output_files; i++) {
    
            if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
                av_dump_format(oc, i, oc->filename, 1);
    
                av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
    
        /* init complex filtergraphs */
        for (i = 0; i < nb_filtergraphs; i++)
            if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
                return ret;
    
    
        /* for each output stream, we compute the right encoding parameters */
    
        for (i = 0; i < nb_output_streams; i++) {
    
            ost = output_streams[i];
            oc  = output_files[ost->file_index]->ctx;
    
            ist = get_input_stream(ost);
    
            if (ost->attachment_filename)
                continue;
    
    
            codec  = ost->st->codec;
    
            if (ist) {
                icodec = ist->st->codec;
    
                ost->st->disposition          = ist->st->disposition;
                codec->bits_per_raw_sample    = icodec->bits_per_raw_sample;
                codec->chroma_sample_location = icodec->chroma_sample_location;
            }
    
            if (ost->stream_copy) {
    
                uint64_t extra_size;
    
                av_assert0(ist && !ost->filter);
    
                extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
    
                if (extra_size > INT_MAX) {
    
    
                /* if stream_copy is selected, no need to decode or encode */
    
                codec->codec_id   = icodec->codec_id;
    
                codec->codec_type = icodec->codec_type;
    
    
                if (!codec->codec_tag) {
                    if (!oc->oformat->codec_tag ||
                         av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
                         av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
    
                        codec->codec_tag = icodec->codec_tag;
                }
    
    
                codec->bit_rate       = icodec->bit_rate;
    
                codec->rc_max_rate    = icodec->rc_max_rate;
                codec->rc_buffer_size = icodec->rc_buffer_size;
    
                codec->field_order    = icodec->field_order;
    
                codec->extradata      = av_mallocz(extra_size);
    
                if (!codec->extradata) {
    
                memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
    
                codec->extradata_size = icodec->extradata_size;
    
                if (!copy_tb) {
    
                    codec->time_base      = icodec->time_base;
    
                    codec->time_base.num *= icodec->ticks_per_frame;
                    av_reduce(&codec->time_base.num, &codec->time_base.den,
                              codec->time_base.num, codec->time_base.den, INT_MAX);
    
                    codec->time_base = ist->st->time_base;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                switch (codec->codec_type) {
    
                case AVMEDIA_TYPE_AUDIO:
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    if (audio_volume != 256) {
    
                        av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
    
                    codec->channel_layout     = icodec->channel_layout;
                    codec->sample_rate        = icodec->sample_rate;
                    codec->channels           = icodec->channels;
                    codec->frame_size         = icodec->frame_size;
    
                    codec->audio_service_type = icodec->audio_service_type;
    
                    codec->block_align        = icodec->block_align;
    
                    break;
                case AVMEDIA_TYPE_VIDEO:
    
                    codec->pix_fmt            = icodec->pix_fmt;
                    codec->width              = icodec->width;
                    codec->height             = icodec->height;
                    codec->has_b_frames       = icodec->has_b_frames;
    
                    if (!codec->sample_aspect_ratio.num) {
    
                        codec->sample_aspect_ratio   =
    
                        ost->st->sample_aspect_ratio =
                            ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
                            ist->st->codec->sample_aspect_ratio.num ?
                            ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
                    }
                    break;
                case AVMEDIA_TYPE_SUBTITLE:
    
                    codec->width  = icodec->width;
    
                    codec->height = icodec->height;
                    break;
                case AVMEDIA_TYPE_DATA:
    
                case AVMEDIA_TYPE_ATTACHMENT:
    
                if (!ost->enc) {
                    /* should only happen when a default codec is not present. */
                    snprintf(error, sizeof(error), "Automatic encoder selection "
                             "failed for output stream #%d:%d. Default encoder for "
                             "format %s is probably disabled. Please choose an "
                             "encoder manually.\n", ost->file_index, ost->index,
                             oc->oformat->name);
                    ret = AVERROR(EINVAL);
                    goto dump_format;
                }
    
                if (ist)
                    ist->decoding_needed = 1;
    
                ost->encoding_needed = 1;
    
                /*
                 * We want CFR output if and only if one of those is true:
                 * 1) user specified output framerate with -r
                 * 2) user specified -vsync cfr
                 * 3) output format is CFR and the user didn't force vsync to
                 *    something else than CFR
                 *
                 * in such a case, set ost->frame_rate
                 */
                if (codec->codec_type == AVMEDIA_TYPE_VIDEO &&
                    !ost->frame_rate.num && ist &&
                    (video_sync_method ==  VSYNC_CFR ||
                     (video_sync_method ==  VSYNC_AUTO &&
                      !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
                    ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
                    if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
                        int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
                        ost->frame_rate = ost->enc->supported_framerates[idx];
                    }
                }
    
    
                if (!ost->filter &&
                    (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
                     codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
    
                        FilterGraph *fg;
                        fg = init_simple_filtergraph(ist, ost);
    
                        if (configure_simple_filtergraph(fg)) {
    
                            av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
                            exit(1);
                        }
    
                switch (codec->codec_type) {
                case AVMEDIA_TYPE_AUDIO:
                    codec->sample_fmt     = ost->filter->filter->inputs[0]->format;
                    codec->sample_rate    = ost->filter->filter->inputs[0]->sample_rate;
                    codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
                    codec->channels       = av_get_channel_layout_nb_channels(codec->channel_layout);
                    codec->time_base      = (AVRational){ 1, codec->sample_rate };
                    break;
                case AVMEDIA_TYPE_VIDEO:
    
                    codec->time_base = ost->filter->filter->inputs[0]->time_base;
    
                    codec->width  = ost->filter->filter->inputs[0]->w;
                    codec->height = ost->filter->filter->inputs[0]->h;
                    codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
                        ost->frame_aspect_ratio ? // overridden by the -aspect cli option
                        av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
                        ost->filter->filter->inputs[0]->sample_aspect_ratio;
                    codec->pix_fmt = ost->filter->filter->inputs[0]->format;
    
                    if (codec->width   != icodec->width  ||
                        codec->height  != icodec->height ||
                        codec->pix_fmt != icodec->pix_fmt) {
    
                        codec->bits_per_raw_sample = 0;
                    }
    
    
                    break;
                case AVMEDIA_TYPE_SUBTITLE:
    
                    codec->time_base = (AVRational){1, 1000};
    
                    break;
                default:
                    abort();
                    break;
                }
                /* two pass mode */
    
                if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
    
                    char logfilename[1024];
                    FILE *f;
    
                    snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
                             pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
                             i);
    
                    if (!strcmp(ost->enc->name, "libx264")) {
                        av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
                    } else {
    
    Anton Khirnov's avatar
    Anton Khirnov committed
                        if (codec->flags & CODEC_FLAG_PASS1) {
                            f = fopen(logfilename, "wb");
                            if (!f) {
                                av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
                                       logfilename, strerror(errno));
                                exit_program(1);
                            }
                            ost->logfile = f;
                        } else {
                            char  *logbuffer;
                            size_t logbuffer_size;
                            if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
                                av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
                                       logfilename);
                                exit_program(1);
                            }
                            codec->stats_in = logbuffer;
    
        for (i = 0; i < nb_output_streams; i++) {
    
            if (ost->encoding_needed) {
    
                AVCodec      *codec = ost->enc;
    
                AVCodecContext *dec = NULL;
    
                if ((ist = get_input_stream(ost)))
                    dec = ist->st->codec;
                if (dec && dec->subtitle_header) {
    
                    ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
                    if (!ost->st->codec->subtitle_header) {
                        ret = AVERROR(ENOMEM);
                        goto dump_format;
                    }
                    memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
                    ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
                }
    
                if (!av_dict_get(ost->opts, "threads", NULL, 0))
                    av_dict_set(&ost->opts, "threads", "auto", 0);
    
                if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
    
                    snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
    
                            ost->file_index, ost->index);
                    ret = AVERROR(EINVAL);
                    goto dump_format;
                }
                assert_codec_experimental(ost->st->codec, 1);
                assert_avoptions(ost->opts);
                if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
                    av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
                                                 "It takes bits/s as argument, not kbits/s\n");
                extra_size += ost->st->codec->extradata_size;
    
    
                if (ost->st->codec->me_threshold)
    
                    input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
    
        /* init input streams */
        for (i = 0; i < nb_input_streams; i++)
    
            if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
    
        /* discard unused programs */
        for (i = 0; i < nb_input_files; i++) {
    
            InputFile *ifile = input_files[i];
    
            for (j = 0; j < ifile->ctx->nb_programs; j++) {
                AVProgram *p = ifile->ctx->programs[j];
                int discard  = AVDISCARD_ALL;
    
                for (k = 0; k < p->nb_stream_indexes; k++)
    
                    if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
    
                        discard = AVDISCARD_DEFAULT;
                        break;
                    }
                p->discard = discard;
            }
        }
    
    
        /* open files and write file headers */
    
            oc->interrupt_callback = int_cb;
    
            if (avformat_write_header(oc, &output_files[i]->opts) < 0) {
    
                snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
                ret = AVERROR(EINVAL);
                goto dump_format;
            }
    
            assert_avoptions(output_files[i]->opts);
    
            if (strcmp(oc->oformat->name, "rtp")) {
    
                want_sdp = 0;
            }
        }
    
     dump_format:
        /* dump the file output parameters - cannot be done before in case
           of stream copy */
    
        for (i = 0; i < nb_output_files; i++) {
    
            av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
    
        }
    
        /* dump the stream mapping */
    
        av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
    
        for (i = 0; i < nb_input_streams; i++) {
            ist = input_streams[i];
    
            for (j = 0; j < ist->nb_filters; j++) {
                AVFilterLink *link = ist->filters[j]->filter->outputs[0];
                if (ist->filters[j]->graph->graph_desc) {
                    av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d (%s) -> %s",
                           ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
                           link->dst->filter->name);
                    if (link->dst->input_count > 1)
                        av_log(NULL, AV_LOG_INFO, ":%s", link->dstpad->name);
                    if (nb_filtergraphs > 1)
                        av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
                    av_log(NULL, AV_LOG_INFO, "\n");
                }
            }
        }
    
    
        for (i = 0; i < nb_output_streams; i++) {
    
    
            if (ost->attachment_filename) {
                /* an attached file */
                av_log(NULL, AV_LOG_INFO, "  File %s -> Stream #%d:%d\n",
                       ost->attachment_filename, ost->file_index, ost->index);
                continue;
            }
    
    
            if (ost->filter && ost->filter->graph->graph_desc) {
                /* output from a complex graph */
                AVFilterLink *link = ost->filter->filter->inputs[0];
                av_log(NULL, AV_LOG_INFO, "  %s", link->src->filter->name);
                if (link->src->output_count > 1)
                    av_log(NULL, AV_LOG_INFO, ":%s", link->srcpad->name);
                if (nb_filtergraphs > 1)
                    av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
    
                av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
                       ost->index, ost->enc ? ost->enc->name : "?");
                continue;
            }
    
    
            av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
    
                   input_streams[ost->source_index]->file_index,
                   input_streams[ost->source_index]->st->index,
    
                   ost->file_index,
                   ost->index);
    
            if (ost->sync_ist != input_streams[ost->source_index])
    
                av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
    
                       ost->sync_ist->file_index,
                       ost->sync_ist->st->index);
    
            if (ost->stream_copy)
    
                av_log(NULL, AV_LOG_INFO, " (copy)");
            else
    
                av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
                       input_streams[ost->source_index]->dec->name : "?",
    
                       ost->enc ? ost->enc->name : "?");
            av_log(NULL, AV_LOG_INFO, "\n");
    
            av_log(NULL, AV_LOG_ERROR, "%s\n", error);
    
        return 0;
    }
    
    /*
     * The following code is the main loop of the file converter
     */
    
    static int transcode(void)
    
    {
        int ret, i;
        AVFormatContext *is, *os;
        OutputStream *ost;
        InputStream *ist;
        uint8_t *no_packet;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        int no_packet_count = 0;
    
        int64_t timer_start;
    
        if (!(no_packet = av_mallocz(nb_input_files)))
            exit_program(1);
    
    
        av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
    
        term_init();
    
        timer_start = av_gettime();
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (; received_sigterm == 0;) {
    
            int file_index, ist_index, past_recording_time = 1;
    
            /* check if there's any stream where output is still needed */
    
            for (i = 0; i < nb_output_streams; i++) {
    
                of  = output_files[ost->file_index];
                os  = output_files[ost->file_index]->ctx;
                if (ost->is_past_recording_time ||
    
                    (os->pb && avio_tell(os->pb) >= of->limit_filesize))
    
                if (ost->frame_number > ost->max_frames) {
                    int j;