Skip to content
Snippets Groups Projects
muxing.c 16.2 KiB
Newer Older
  • Learn to ignore specific revisions
  • /*
     * Copyright (c) 2003 Fabrice Bellard
    
     * Permission is hereby granted, free of charge, to any person obtaining a copy
     * of this software and associated documentation files (the "Software"), to deal
     * in the Software without restriction, including without limitation the rights
     * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     * copies of the Software, and to permit persons to whom the Software is
     * furnished to do so, subject to the following conditions:
    
     * The above copyright notice and this permission notice shall be included in
     * all copies or substantial portions of the Software.
    
     * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    
     * THE SOFTWARE.
    
     * libavformat API example.
     *
     * Output a media file in any supported libavformat format.
     * The default codecs are used.
    
    #include <stdlib.h>
    #include <stdio.h>
    
    #include <string.h>
    
    #include <libavutil/mathematics.h>
    #include <libavformat/avformat.h>
    #include <libswscale/swscale.h>
    
    
    /* 5 seconds stream duration */
    
    #define STREAM_DURATION   200.0
    
    #define STREAM_FRAME_RATE 25 /* 25 images/s */
    #define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
    
    #define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */
    
    static int sws_flags = SWS_BICUBIC;
    
    
    /**************************************************************/
    /* audio output */
    
    
    static float t, tincr, tincr2;
    static int16_t *samples;
    static int audio_input_frame_size;
    
     * add an audio output stream
     */
    
    static AVStream *add_audio_stream(AVFormatContext *oc, AVCodec **codec,
    
    {
        AVCodecContext *c;
        AVStream *st;
    
    
        /* find the audio encoder */
    
        *codec = avcodec_find_encoder(codec_id);
        if (!(*codec)) {
    
            fprintf(stderr, "Could not find codec\n");
    
        st = avformat_new_stream(oc, *codec);
    
            fprintf(stderr, "Could not allocate stream\n");
    
    
        /* put sample parameters */
    
        c->sample_fmt  = AV_SAMPLE_FMT_S16;
        c->bit_rate    = 64000;
    
        c->sample_rate = 44100;
    
    
        // some formats want stream headers to be separate
    
        if (oc->oformat->flags & AVFMT_GLOBALHEADER)
    
            c->flags |= CODEC_FLAG_GLOBAL_HEADER;
    
    
    static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
    
        if (avcodec_open2(c, codec, NULL) < 0) {
    
            fprintf(stderr, "Could not open codec\n");
    
            exit(1);
        }
    
        /* init signal generator */
    
        tincr = 2 * M_PI * 110.0 / c->sample_rate;
        /* increment frequency by 110 Hz per second */
        tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
    
        if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
            audio_input_frame_size = 10000;
        else
    
            audio_input_frame_size = c->frame_size;
    
        samples = av_malloc(audio_input_frame_size *
                            av_get_bytes_per_sample(c->sample_fmt) *
                            c->channels);
    
    /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
     * 'nb_channels' channels. */
    
    static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
        int j, i, v;
        int16_t *q;
    
        q = samples;
    
        for (j = 0; j < frame_size; j++) {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            v = (int)(sin(t) * 10000);
    
            for (i = 0; i < nb_channels; i++)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                *q++ = v;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            tincr += tincr2;
        }
    }
    
    
    static void write_audio_frame(AVFormatContext *oc, AVStream *st)
    
        AVPacket pkt = { 0 }; // data and size must be 0;
    
        AVFrame *frame = avcodec_alloc_frame();
        int got_packet;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        get_audio_frame(samples, audio_input_frame_size, c->channels);
    
        frame->nb_samples = audio_input_frame_size;
    
        avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
                                 (uint8_t *)samples,
                                 audio_input_frame_size *
                                 av_get_bytes_per_sample(c->sample_fmt) *
                                 c->channels, 1);
    
        avcodec_encode_audio2(c, &pkt, frame, &got_packet);
        if (!got_packet)
            return;
    
        pkt.stream_index = st->index;
    
        /* Write the compressed frame to the media file. */
    
        if (av_interleaved_write_frame(oc, &pkt) != 0) {
    
            fprintf(stderr, "Error while writing audio frame\n");
            exit(1);
        }
    
        avcodec_free_frame(&frame);
    
    static void close_audio(AVFormatContext *oc, AVStream *st)
    
    /**************************************************************/
    /* video output */
    
    
    static AVFrame *frame;
    static AVPicture src_picture, dst_picture;
    
    static uint8_t *video_outbuf;
    static int frame_count, video_outbuf_size;
    
    /* Add a video output stream. */
    
    static AVStream *add_video_stream(AVFormatContext *oc, AVCodec **codec,
    
    {
        AVCodecContext *c;
        AVStream *st;
    
    
        /* find the video encoder */
    
        *codec = avcodec_find_encoder(codec_id);
        if (!(*codec)) {
    
            fprintf(stderr, "codec not found\n");
            exit(1);
        }
    
        st = avformat_new_stream(oc, *codec);
    
        if (!st) {
            fprintf(stderr, "Could not alloc stream\n");
            exit(1);
        }
    
        avcodec_get_context_defaults3(c, *codec);
    
        c->bit_rate = 400000;
    
        /* Resolution must be a multiple of two. */
        c->width    = 352;
        c->height   = 288;
        /* timebase: This is the fundamental unit of time (in seconds) in terms
         * of which frame timestamps are represented. For fixed-fps content,
         * timebase should be 1/framerate and timestamp increments should be
         * identical to 1. */
    
        c->time_base.den = STREAM_FRAME_RATE;
    
        c->time_base.num = 1;
    
        c->gop_size      = 12; /* emit one intra frame every twelve frames at most */
        c->pix_fmt       = STREAM_PIX_FMT;
    
        if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
    
            /* just for testing, we also add B frames */
            c->max_b_frames = 2;
        }
    
        if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
    
    Diego Biurrun's avatar
    Diego Biurrun committed
            /* Needed to avoid using macroblocks in which some coeffs overflow.
    
             * This does not happen with normal video, it just happens here as
             * the motion of the chroma plane does not match the luma plane. */
            c->mb_decision = 2;
    
        /* Some formats want stream headers to be separate. */
    
        if (oc->oformat->flags & AVFMT_GLOBALHEADER)
    
            c->flags |= CODEC_FLAG_GLOBAL_HEADER;
    
    static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
    
        AVCodecContext *c = st->codec;
    
        if (avcodec_open2(c, codec, NULL) < 0) {
    
            fprintf(stderr, "Could not open codec\n");
    
        video_outbuf = NULL;
        if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
    
            /* Allocate output buffer. */
            /* XXX: API change will be done. */
            /* Buffers passed into lav* can be allocated any way you prefer,
             * as long as they're aligned enough for the architecture, and
             * they're freed appropriately (such as using av_free for buffers
             * allocated with av_malloc). */
    
            video_outbuf      = av_malloc(video_outbuf_size);
    
        /* allocate and init a re-usable frame */
        frame = avcodec_alloc_frame();
        if (!frame) {
            fprintf(stderr, "Could not allocate video frame\n");
            exit(1);
        }
    
    
        /* Allocate the encoded raw picture. */
    
        ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
        if (ret < 0) {
    
            fprintf(stderr, "Could not allocate picture\n");
            exit(1);
        }
    
        /* If the output format is not YUV420P, then a temporary YUV420P
         * picture is needed too. It is then converted to the required
         * output format. */
    
        if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
    
            ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
    
                fprintf(stderr, "Could not allocate temporary picture\n");
                exit(1);
            }
        }
    
    
        /* copy data and linesize picture pointers to frame */
        *((AVPicture *)frame) = dst_picture;
    
    static void fill_yuv_image(AVPicture *pict, int frame_index,
    
        for (y = 0; y < height; y++)
            for (x = 0; x < width; x++)
    
                pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
    
        for (y = 0; y < height / 2; y++) {
            for (x = 0; x < width / 2; x++) {
    
                pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
                pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
    
    static void write_video_frame(AVFormatContext *oc, AVStream *st)
    
        static struct SwsContext *sws_ctx;
    
        AVCodecContext *c = st->codec;
    
        if (frame_count >= STREAM_NB_FRAMES) {
    
            /* No more frames to compress. The codec has a latency of a few
             * frames if using B-frames, so we get the last frames by
             * passing the same picture again. */
    
            if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
    
                /* as we only generate a YUV420P picture, we must convert it
    
                 * to the codec pixel format if needed */
    
                    sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
    
                                             c->width, c->height, c->pix_fmt,
                                             sws_flags, NULL, NULL, NULL);
                    if (!sws_ctx) {
    
                                "Could not initialize the conversion context\n");
    
                fill_yuv_image(&src_picture, frame_count, c->width, c->height);
    
                          (const uint8_t * const *)src_picture.data, src_picture.linesize,
                          0, c->height, dst_picture.data, dst_picture.linesize);
    
                fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
    
            /* Raw video case - the API will change slightly in the near
             * future for that. */
    
            AVPacket pkt;
            av_init_packet(&pkt);
    
            pkt.flags        |= AV_PKT_FLAG_KEY;
            pkt.stream_index  = st->index;
    
            pkt.data          = dst_picture.data[0];
    
            pkt.size          = sizeof(AVPicture);
    
            ret = av_interleaved_write_frame(oc, &pkt);
    
            AVPacket pkt;
            int got_output;
    
            av_init_packet(&pkt);
            pkt.data = NULL;    // packet data will be allocated by the encoder
            pkt.size = 0;
    
            ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
    
                fprintf(stderr, "Error encoding video frame\n");
    
                exit(1);
            }
    
            /* If size is zero, it means the image was buffered. */
            if (got_output) {
    
                if (c->coded_frame->pts != AV_NOPTS_VALUE)
    
                    pkt.pts = av_rescale_q(c->coded_frame->pts,
                                           c->time_base, st->time_base);
                if (c->coded_frame->key_frame)
    
                    pkt.flags |= AV_PKT_FLAG_KEY;
    
                pkt.stream_index = st->index;
    
                /* Write the compressed frame to the media file. */
    
                ret = av_interleaved_write_frame(oc, &pkt);
    
            fprintf(stderr, "Error while writing video frame\n");
            exit(1);
        }
    
    static void close_video(AVFormatContext *oc, AVStream *st)
    
        av_free(src_picture.data[0]);
        av_free(dst_picture.data[0]);
        av_free(frame);
    
    }
    
    /**************************************************************/
    /* media file output */
    
    int main(int argc, char **argv)
    {
        const char *filename;
        AVOutputFormat *fmt;
        AVFormatContext *oc;
    
        AVCodec *audio_codec, *video_codec;
    
        double audio_pts, video_pts;
    
        /* Initialize libavcodec, and register all codecs and formats. */
    
        if (argc != 2) {
            printf("usage: %s output_file\n"
    
                   "API example program to output a media file with libavformat.\n"
                   "The output format is automatically guessed according to the file extension.\n"
                   "Raw images can also be output by using '%%d' in the filename\n"
    
        /* allocate the output media context */
    
        avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    
            printf("Could not deduce output format from file extension: using MPEG.\n");
    
            avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    
        fmt = oc->oformat;
    
        /* Add the audio and video streams using the default format codecs
         * and initialize the codecs. */
    
        video_st = NULL;
        audio_st = NULL;
    
        if (fmt->video_codec != AV_CODEC_ID_NONE) {
    
            video_st = add_video_stream(oc, &video_codec, fmt->video_codec);
    
        if (fmt->audio_codec != AV_CODEC_ID_NONE) {
    
            audio_st = add_audio_stream(oc, &audio_codec, fmt->audio_codec);
    
        /* Now that all the parameters are set, we can open the audio and
         * video codecs and allocate the necessary encode buffers. */
    
            open_video(oc, video_codec, video_st);
    
            open_audio(oc, audio_codec, audio_st);
    
        av_dump_format(oc, 0, filename, 1);
    
    
        /* open the output file, if needed */
        if (!(fmt->flags & AVFMT_NOFILE)) {
    
            if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
    
                fprintf(stderr, "Could not open '%s'\n", filename);
    
        /* Write the stream header, if any. */
    
        if (avformat_write_header(oc, NULL) < 0) {
            fprintf(stderr, "Error occurred when opening output file\n");
            return 1;
        }
    
        for (;;) {
            /* Compute current audio and video time. */
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
    
                video_pts = (double)video_st->pts.val * video_st->time_base.num /
                            video_st->time_base.den;
    
            if ((!audio_st || audio_pts >= STREAM_DURATION) &&
    
                (!video_st || video_pts >= STREAM_DURATION))
                break;
    
            /* write interleaved audio and video frames */
    
            if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
    
                write_audio_frame(oc, audio_st);
            } else {
                write_video_frame(oc, video_st);
    
        /* Write the trailer, if any. The trailer must be written before you
         * close the CodecContexts open when you wrote the header; otherwise
         * av_write_trailer() may try to use memory that was freed on
         * av_codec_close(). */
    
        if (video_st)
            close_video(oc, video_st);
        if (audio_st)
            close_audio(oc, audio_st);
    
        /* Free the streams. */
        for (i = 0; i < oc->nb_streams; i++) {
    
            av_freep(&oc->streams[i]->codec);
    
        if (!(fmt->flags & AVFMT_NOFILE))
            /* Close the output file. */
    
            avio_close(oc->pb);
    
    
        /* free the stream */
        av_free(oc);
    
        return 0;
    }