Skip to content
Snippets Groups Projects
utils.c 54.7 KiB
Newer Older
  • Learn to ignore specific revisions
  • Fabrice Bellard's avatar
    Fabrice Bellard committed
    /*
     * utils for libavcodec
    
     * Copyright (c) 2001 Fabrice Bellard
    
     * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     *
    
     * This file is part of Libav.
    
     * Libav is free software; you can redistribute it and/or
    
     * modify it under the terms of the GNU Lesser General Public
     * License as published by the Free Software Foundation; either
    
     * version 2.1 of the License, or (at your option) any later version.
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     *
    
     * Libav is distributed in the hope that it will be useful,
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     * but WITHOUT ANY WARRANTY; without even the implied warranty of
    
     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     * Lesser General Public License for more details.
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     *
    
     * You should have received a copy of the GNU Lesser General Public
    
     * License along with Libav; if not, write to the Free Software
    
     * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     */
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    /**
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
     * utils.
     */
    
    #include "libavutil/avstring.h"
    
    #include "libavutil/crc.h"
    
    #include "libavutil/mathematics.h"
    
    #include "libavutil/audioconvert.h"
    #include "libavutil/imgutils.h"
    #include "libavutil/samplefmt.h"
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    #include "avcodec.h"
    
    #include "dsputil.h"
    
    #include "libavutil/opt.h"
    
    #include "imgconvert.h"
    
    #include "audioconvert.h"
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    #include <float.h>
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    static int volatile entangled_thread_counter=0;
    
    static int (*ff_lockmgr_cb)(void **mutex, enum AVLockOp op);
    
    static void *codec_mutex;
    
    void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
    
        if(min_size < *size)
    
        min_size= FFMAX(17*min_size/16 + 32, min_size);
    
        if(!ptr) //we could set this to the unmodified min_size but this is safer if the user lost the ptr and uses NULL now
    
    void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
    
        min_size= FFMAX(17*min_size/16 + 32, min_size);
    
        *p = av_malloc(min_size);
        if (!*p) min_size = 0;
        *size= min_size;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    /* encoder management */
    
    static AVCodec *first_avcodec = NULL;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    AVCodec *av_codec_next(AVCodec *c){
        if(c) return c->next;
        else  return first_avcodec;
    }
    
    
    static void avcodec_init(void)
    
    {
        static int initialized = 0;
    
        if (initialized != 0)
            return;
        initialized = 1;
    
        dsputil_static_init();
    }
    
    
    static av_always_inline int codec_is_encoder(AVCodec *codec)
    {
        return codec && (codec->encode || codec->encode2);
    }
    
    static av_always_inline int codec_is_decoder(AVCodec *codec)
    {
        return codec && codec->decode;
    }
    
    
    void avcodec_register(AVCodec *codec)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
        AVCodec **p;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        p = &first_avcodec;
        while (*p != NULL) p = &(*p)->next;
    
    
        if (codec->init_static_data)
            codec->init_static_data(codec);
    
    unsigned avcodec_get_edge_width(void)
    {
        return EDGE_WIDTH;
    }
    
    
    void avcodec_set_dimensions(AVCodecContext *s, int width, int height){
        s->coded_width = width;
        s->coded_height= height;
        s->width = -((-width )>>s->lowres);
        s->height= -((-height)>>s->lowres);
    }
    
    
    #define INTERNAL_BUFFER_SIZE (32+1)
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    
    
    void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
                                   int linesize_align[AV_NUM_DATA_POINTERS])
    {
        int i;
    
        int w_align= 1;
        int h_align= 1;
    
    
        switch(s->pix_fmt){
        case PIX_FMT_YUV420P:
    
        case PIX_FMT_YUV422P:
    
        case PIX_FMT_YUV444P:
    
        case PIX_FMT_GRAY8:
    
        case PIX_FMT_GRAY16BE:
        case PIX_FMT_GRAY16LE:
    
        case PIX_FMT_YUVJ420P:
        case PIX_FMT_YUVJ422P:
    
        case PIX_FMT_YUVJ440P:
    
        case PIX_FMT_YUVJ444P:
    
        case PIX_FMT_YUV420P9LE:
        case PIX_FMT_YUV420P9BE:
        case PIX_FMT_YUV420P10LE:
        case PIX_FMT_YUV420P10BE:
    
        case PIX_FMT_YUV422P9LE:
        case PIX_FMT_YUV422P9BE:
    
        case PIX_FMT_YUV422P10LE:
        case PIX_FMT_YUV422P10BE:
    
        case PIX_FMT_YUV444P9LE:
        case PIX_FMT_YUV444P9BE:
        case PIX_FMT_YUV444P10LE:
        case PIX_FMT_YUV444P10BE:
    
        case PIX_FMT_GBRP9LE:
        case PIX_FMT_GBRP9BE:
        case PIX_FMT_GBRP10LE:
        case PIX_FMT_GBRP10BE:
    
            w_align = 16; //FIXME assume 16 pixel per macroblock
            h_align = 16 * 2; // interlaced needs 2 macroblocks height
    
            break;
        case PIX_FMT_YUV411P:
    
        case PIX_FMT_UYYVYY411:
    
            w_align=32;
            h_align=8;
            break;
        case PIX_FMT_YUV410P:
            if(s->codec_id == CODEC_ID_SVQ1){
                w_align=64;
                h_align=64;
            }
    
        case PIX_FMT_RGB555:
            if(s->codec_id == CODEC_ID_RPZA){
                w_align=4;
                h_align=4;
            }
        case PIX_FMT_PAL8:
    
        case PIX_FMT_BGR8:
        case PIX_FMT_RGB8:
    
            if(s->codec_id == CODEC_ID_SMC){
                w_align=4;
                h_align=4;
            }
    
        case PIX_FMT_BGR24:
            if((s->codec_id == CODEC_ID_MSZH) || (s->codec_id == CODEC_ID_ZLIB)){
                w_align=4;
                h_align=4;
            }
            break;
    
        default:
            w_align= 1;
            h_align= 1;
            break;
        }
    
    
        *width = FFALIGN(*width , w_align);
        *height= FFALIGN(*height, h_align);
    
        if(s->codec_id == CODEC_ID_H264 || s->lowres)
    
            *height+=2; // some of the optimized chroma MC reads one line too much
    
                        // which is also done in mpeg decoders with lowres > 0
    
        for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
            linesize_align[i] = STRIDE_ALIGN;
    
    //STRIDE_ALIGN is 8 for SSE* but this does not work for SVQ1 chroma planes
    //we could change STRIDE_ALIGN to 16 for x86/sse but it would increase the
    //picture size unneccessarily in some cases. The solution here is not
    //pretty and better ideas are welcome!
    #if HAVE_MMX
        if(s->codec_id == CODEC_ID_SVQ1 || s->codec_id == CODEC_ID_VP5 ||
           s->codec_id == CODEC_ID_VP6 || s->codec_id == CODEC_ID_VP6F ||
           s->codec_id == CODEC_ID_VP6A) {
    
            for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
                linesize_align[i] = 16;
    
        }
    #endif
    }
    
    void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){
        int chroma_shift = av_pix_fmt_descriptors[s->pix_fmt].log2_chroma_w;
    
        int linesize_align[AV_NUM_DATA_POINTERS];
    
        int align;
        avcodec_align_dimensions2(s, width, height, linesize_align);
        align = FFMAX(linesize_align[0], linesize_align[3]);
        linesize_align[1] <<= chroma_shift;
        linesize_align[2] <<= chroma_shift;
        align = FFMAX3(align, linesize_align[1], linesize_align[2]);
        *width=FFALIGN(*width, align);
    
    int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
                                 enum AVSampleFormat sample_fmt, const uint8_t *buf,
                                 int buf_size, int align)
    {
        int ch, planar, needed_size, ret = 0;
    
        needed_size = av_samples_get_buffer_size(NULL, nb_channels,
                                                 frame->nb_samples, sample_fmt,
                                                 align);
        if (buf_size < needed_size)
            return AVERROR(EINVAL);
    
        planar = av_sample_fmt_is_planar(sample_fmt);
        if (planar && nb_channels > AV_NUM_DATA_POINTERS) {
            if (!(frame->extended_data = av_mallocz(nb_channels *
                                                    sizeof(*frame->extended_data))))
                return AVERROR(ENOMEM);
        } else {
            frame->extended_data = frame->data;
        }
    
        if ((ret = av_samples_fill_arrays(frame->extended_data, &frame->linesize[0],
                                          buf, nb_channels, frame->nb_samples,
                                          sample_fmt, align)) < 0) {
            if (frame->extended_data != frame->data)
                av_free(frame->extended_data);
            return ret;
        }
        if (frame->extended_data != frame->data) {
            for (ch = 0; ch < AV_NUM_DATA_POINTERS; ch++)
                frame->data[ch] = frame->extended_data[ch];
        }
    
        return ret;
    }
    
    
    static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
    {
        AVCodecInternal *avci = avctx->internal;
        InternalBuffer *buf;
    
    
        buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
                                              frame->nb_samples, avctx->sample_fmt,
                                              32);
        if (buf_size < 0)
            return AVERROR(EINVAL);
    
        /* allocate InternalBuffer if needed */
        if (!avci->buffer) {
            avci->buffer = av_mallocz(sizeof(InternalBuffer));
            if (!avci->buffer)
                return AVERROR(ENOMEM);
        }
        buf = avci->buffer;
    
        /* if there is a previously-used internal buffer, check its size and
           channel count to see if we can reuse it */
        if (buf->extended_data) {
            /* if current buffer is too small, free it */
            if (buf->extended_data[0] && buf_size > buf->audio_data_size) {
                av_free(buf->extended_data[0]);
                if (buf->extended_data != buf->data)
                    av_free(&buf->extended_data);
                buf->extended_data = NULL;
                buf->data[0] = NULL;
            }
            /* if number of channels has changed, reset and/or free extended data
               pointers but leave data buffer in buf->data[0] for reuse */
            if (buf->nb_channels != avctx->channels) {
                if (buf->extended_data != buf->data)
                    av_free(buf->extended_data);
                buf->extended_data = NULL;
            }
        }
    
        /* if there is no previous buffer or the previous buffer cannot be used
           as-is, allocate a new buffer and/or rearrange the channel pointers */
        if (!buf->extended_data) {
    
            if (!buf->data[0]) {
                if (!(buf->data[0] = av_mallocz(buf_size)))
    
                    return AVERROR(ENOMEM);
    
                buf->audio_data_size = buf_size;
    
            if ((ret = avcodec_fill_audio_frame(frame, avctx->channels,
                                                avctx->sample_fmt, buf->data[0],
                                                buf->audio_data_size, 32)))
    
                return ret;
    
    
            if (frame->extended_data == frame->data)
                buf->extended_data = buf->data;
            else
                buf->extended_data = frame->extended_data;
            memcpy(buf->data, frame->data, sizeof(frame->data));
            buf->linesize[0] = frame->linesize[0];
            buf->nb_channels = avctx->channels;
        } else {
            /* copy InternalBuffer info to the AVFrame */
            frame->extended_data = buf->extended_data;
            frame->linesize[0]   = buf->linesize[0];
            memcpy(frame->data, buf->data, sizeof(frame->data));
    
        }
    
        frame->type          = FF_BUFFER_TYPE_INTERNAL;
    
        if (avctx->pkt) frame->pkt_pts = avctx->pkt->pts;
        else            frame->pkt_pts = AV_NOPTS_VALUE;
        frame->reordered_opaque = avctx->reordered_opaque;
    
        if (avctx->debug & FF_DEBUG_BUFFERS)
            av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p, "
                   "internal audio buffer used\n", frame);
    
        return 0;
    }
    
    static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
    {
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        int i;
    
        int w= s->width;
        int h= s->height;
    
        InternalBuffer *buf;
    
        AVCodecInternal *avci = s->internal;
    
        if(pic->data[0]!=NULL) {
            av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n");
            return -1;
        }
    
        if(avci->buffer_count >= INTERNAL_BUFFER_SIZE) {
            av_log(s, AV_LOG_ERROR, "buffer_count overflow (missing release_buffer?)\n");
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    
    
        if(av_image_check_size(w, h, 0, s))
    
        if (!avci->buffer) {
            avci->buffer = av_mallocz((INTERNAL_BUFFER_SIZE+1) *
                                      sizeof(InternalBuffer));
    
        buf = &avci->buffer[avci->buffer_count];
    
        if(buf->base[0] && (buf->width != w || buf->height != h || buf->pix_fmt != s->pix_fmt)){
    
            if(s->active_thread_type&FF_THREAD_FRAME) {
                av_log_missing_feature(s, "Width/height changing with frame threads is", 0);
                return -1;
            }
    
    
            for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    
        if (!buf->base[0]) {
    
            int h_chroma_shift, v_chroma_shift;
    
            int size[4] = {0};
            int tmpsize;
    
            AVPicture picture;
    
            int stride_align[AV_NUM_DATA_POINTERS];
    
            const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
    
            avcodec_align_dimensions2(s, &w, &h, stride_align);
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            if(!(s->flags&CODEC_FLAG_EMU_EDGE)){
                w+= EDGE_WIDTH*2;
                h+= EDGE_WIDTH*2;
            }
    
            do {
                // NOTE: do not align linesizes individually, this breaks e.g. assumptions
                // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
    
                av_image_fill_linesizes(picture.linesize, s->pix_fmt, w);
    
                // increase alignment of w for next try (rhs gives the lowest bit set in w)
                w += w & ~(w-1);
    
                for (i=0; i<4; i++){
    
                    unaligned |= picture.linesize[i] % stride_align[i];
    
            tmpsize = av_image_fill_pointers(picture.data, s->pix_fmt, h, NULL, picture.linesize);
    
    
            for (i=0; i<3 && picture.data[i+1]; i++)
                size[i] = picture.data[i+1] - picture.data[i];
    
            size[i] = tmpsize - (picture.data[i] - picture.data[0]);
    
    
            memset(buf->base, 0, sizeof(buf->base));
            memset(buf->data, 0, sizeof(buf->data));
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    
    
            for(i=0; i<4 && size[i]; i++){
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                const int h_shift= i==0 ? 0 : h_chroma_shift;
                const int v_shift= i==0 ? 0 : v_chroma_shift;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    
    
                buf->linesize[i]= picture.linesize[i];
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    
    
                buf->base[i]= av_malloc(size[i]+16); //FIXME 16
    
                if(buf->base[i]==NULL) return -1;
    
                memset(buf->base[i], 128, size[i]);
    
    
    Jai Menon's avatar
    Jai Menon committed
                // no edge if EDGE EMU or not planar YUV
    
                if((s->flags&CODEC_FLAG_EMU_EDGE) || !size[2])
    
                    buf->data[i] = buf->base[i];
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                else
    
                    buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i]*EDGE_WIDTH>>v_shift) + (pixel_size*EDGE_WIDTH>>h_shift), stride_align[i]);
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            }
    
            for (; i < AV_NUM_DATA_POINTERS; i++) {
                buf->base[i] = buf->data[i] = NULL;
                buf->linesize[i] = 0;
            }
    
                ff_set_systematic_pal2((uint32_t*)buf->data[1], s->pix_fmt);
    
            buf->width  = s->width;
            buf->height = s->height;
            buf->pix_fmt= s->pix_fmt;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        }
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    
    
        for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    
            pic->base[i]= buf->base[i];
            pic->data[i]= buf->data[i];
    
        pic->extended_data = pic->data;
    
        if(s->pkt) pic->pkt_pts= s->pkt->pts;
        else       pic->pkt_pts= AV_NOPTS_VALUE;
    
        pic->reordered_opaque= s->reordered_opaque;
    
    
            av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p, %d "
                   "buffers used\n", pic, avci->buffer_count);
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        return 0;
    }
    
    
    int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame)
    {
        switch (avctx->codec_type) {
        case AVMEDIA_TYPE_VIDEO:
            return video_get_buffer(avctx, frame);
        case AVMEDIA_TYPE_AUDIO:
            return audio_get_buffer(avctx, frame);
        default:
            return -1;
        }
    }
    
    
    void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        int i;
    
        InternalBuffer *buf, *last;
    
        AVCodecInternal *avci = s->internal;
    
        assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
    
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        assert(pic->type==FF_BUFFER_TYPE_INTERNAL);
    
        assert(avci->buffer_count);
    
        if (avci->buffer) {
            buf = NULL; /* avoids warning */
            for (i = 0; i < avci->buffer_count; i++) { //just 3-5 checks so is not worth to optimize
                buf = &avci->buffer[i];
                if (buf->data[0] == pic->data[0])
                    break;
            }
            assert(i < avci->buffer_count);
            avci->buffer_count--;
            last = &avci->buffer[avci->buffer_count];
    
            if (buf != last)
                FFSWAP(InternalBuffer, *buf, *last);
    
        for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            pic->data[i]=NULL;
    
    //        pic->base[i]=NULL;
        }
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    //printf("R%X\n", pic->opaque);
    
            av_log(s, AV_LOG_DEBUG, "default_release_buffer called on pic %p, %d "
                   "buffers used\n", pic, avci->buffer_count);
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    }
    
    
    int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){
        AVFrame temp_pic;
        int i;
    
    
        assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
    
    
        /* If no picture return a new buffer */
        if(pic->data[0] == NULL) {
            /* We will copy from buffer, so must be readable */
            pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
            return s->get_buffer(s, pic);
        }
    
        /* If internal buffer type return the same buffer */
    
        if(pic->type == FF_BUFFER_TYPE_INTERNAL) {
    
            if(s->pkt) pic->pkt_pts= s->pkt->pts;
            else       pic->pkt_pts= AV_NOPTS_VALUE;
    
            pic->reordered_opaque= s->reordered_opaque;
    
    
        /*
         * Not internal type and reget_buffer not overridden, emulate cr buffer
         */
        temp_pic = *pic;
    
        for(i = 0; i < AV_NUM_DATA_POINTERS; i++)
    
            pic->data[i] = pic->base[i] = NULL;
        pic->opaque = NULL;
        /* Allocate new frame */
        if (s->get_buffer(s, pic))
            return -1;
        /* Copy image data from old buffer to new buffer */
    
        av_picture_copy((AVPicture*)pic, (AVPicture*)&temp_pic, s->pix_fmt, s->width,
    
                 s->height);
        s->release_buffer(s, &temp_pic); // Release old frame
        return 0;
    }
    
    
    int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size){
    
        int i;
    
        for(i=0; i<count; i++){
    
            if(ret) ret[i]= r;
        }
        return 0;
    }
    
    
    int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr),void *arg, int *ret, int count){
        int i;
    
        for(i=0; i<count; i++){
            int r= func(c, arg, i, 0);
            if(ret) ret[i]= r;
        }
        return 0;
    }
    
    
    enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat *fmt){
        while (*fmt != PIX_FMT_NONE && ff_is_hwaccel_pix_fmt(*fmt))
            ++fmt;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        return fmt[0];
    }
    
    
    void avcodec_get_frame_defaults(AVFrame *pic){
        memset(pic, 0, sizeof(AVFrame));
    
        pic->pts= AV_NOPTS_VALUE;
    
        pic->key_frame= 1;
    
        pic->sample_aspect_ratio = (AVRational){0, 1};
    
        pic->format = -1;           /* unknown */
    
    AVFrame *avcodec_alloc_frame(void){
    
        AVFrame *pic= av_malloc(sizeof(AVFrame));
    
        avcodec_get_frame_defaults(pic);
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        return pic;
    }
    
    
    int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
        if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE)
            return AVERROR(EINVAL);
    
    
        if (options)
            av_dict_copy(&tmp, *options, 0);
    
        /* If there is a user-supplied mutex locking routine, call it. */
        if (ff_lockmgr_cb) {
            if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN))
                return -1;
        }
    
    
        entangled_thread_counter++;
        if(entangled_thread_counter != 1){
            av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n");
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        if(avctx->codec || !codec) {
            ret = AVERROR(EINVAL);
    
        avctx->internal = av_mallocz(sizeof(AVCodecInternal));
        if (!avctx->internal) {
            ret = AVERROR(ENOMEM);
            goto end;
        }
    
    
        if (codec->priv_data_size > 0) {
    
            avctx->priv_data = av_mallocz(codec->priv_data_size);
    
            if (!avctx->priv_data) {
                ret = AVERROR(ENOMEM);
    
                *(AVClass**)avctx->priv_data= codec->priv_class;
                av_opt_set_defaults(avctx->priv_data);
            }
          }
    
          if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0)
    
        if ((ret = av_opt_set_dict(avctx, &tmp)) < 0)
            goto free_and_end;
    
    
        if(avctx->coded_width && avctx->coded_height)
            avcodec_set_dimensions(avctx, avctx->coded_width, avctx->coded_height);
        else if(avctx->width && avctx->height)
            avcodec_set_dimensions(avctx, avctx->width, avctx->height);
    
    
        if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height)
            && (  av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx) < 0
               || av_image_check_size(avctx->width,       avctx->height,       0, avctx) < 0)) {
            av_log(avctx, AV_LOG_WARNING, "ignoring invalid width/height values\n");
            avcodec_set_dimensions(avctx, 0, 0);
        }
    
    
        /* if the decoder init function was already called previously,
           free the already allocated subtitle_header before overwriting it */
    
            av_freep(&avctx->subtitle_header);
    
    
    #define SANE_NB_CHANNELS 128U
    
        if (avctx->channels > SANE_NB_CHANNELS) {
    
        avctx->codec = codec;
    
        if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) &&
    
            avctx->codec_id == CODEC_ID_NONE) {
            avctx->codec_type = codec->type;
            avctx->codec_id   = codec->id;
        }
    
        if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type
                               && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) {
    
            av_log(avctx, AV_LOG_ERROR, "codec type or id mismatches\n");
    
        avctx->frame_number = 0;
    
    
        if (HAVE_THREADS && !avctx->thread_opaque) {
    
            if (ret < 0) {
                goto free_and_end;
            }
        }
    
        if (!HAVE_THREADS && !(codec->capabilities & CODEC_CAP_AUTO_THREADS))
            avctx->thread_count = 1;
    
        if (avctx->codec->max_lowres < avctx->lowres) {
            av_log(avctx, AV_LOG_ERROR, "The maximum value for lowres supported by the decoder is %d\n",
                   avctx->codec->max_lowres);
    
            goto free_and_end;
        }
    
            if (avctx->codec->sample_fmts) {
    
    Justin Ruggles's avatar
    Justin Ruggles committed
                for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++)
                    if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
                        break;
                if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
                    av_log(avctx, AV_LOG_ERROR, "Specified sample_fmt is not supported.\n");
                    ret = AVERROR(EINVAL);
                    goto free_and_end;
                }
    
            }
            if (avctx->codec->supported_samplerates) {
                for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
                    if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
                        break;
                if (avctx->codec->supported_samplerates[i] == 0) {
                    av_log(avctx, AV_LOG_ERROR, "Specified sample_rate is not supported\n");
                    ret = AVERROR(EINVAL);
                    goto free_and_end;
                }
            }
            if (avctx->codec->channel_layouts) {
                if (!avctx->channel_layout) {
                    av_log(avctx, AV_LOG_WARNING, "channel_layout not specified\n");
                } else {
                    for (i = 0; avctx->codec->channel_layouts[i] != 0; i++)
                        if (avctx->channel_layout == avctx->codec->channel_layouts[i])
                            break;
                    if (avctx->codec->channel_layouts[i] == 0) {
                        av_log(avctx, AV_LOG_ERROR, "Specified channel_layout is not supported\n");
                        ret = AVERROR(EINVAL);
                        goto free_and_end;
                    }
                }
            }
    
            if (avctx->channel_layout && avctx->channels) {
                if (av_get_channel_layout_nb_channels(avctx->channel_layout) != avctx->channels) {
                    av_log(avctx, AV_LOG_ERROR, "channel layout does not match number of channels\n");
                    ret = AVERROR(EINVAL);
                    goto free_and_end;
                }
    
            } else if (avctx->channel_layout) {
                avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
    
        if(avctx->codec->init && !(avctx->active_thread_type&FF_THREAD_FRAME)){
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            ret = avctx->codec->init(avctx);
            if (ret < 0) {
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            }
    
    end:
        entangled_thread_counter--;
    
    
        /* Release any user-supplied mutex. */
        if (ff_lockmgr_cb) {
            (*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE);
        }
    
        if (options) {
            av_dict_free(options);
            *options = tmp;
        }
    
    
    int ff_alloc_packet(AVPacket *avpkt, int size)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
        if (size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE)
            return AVERROR(EINVAL);
    
        if (avpkt->data) {
            uint8_t *pkt_data;
            int pkt_size;
    
            if (avpkt->size < size)
                return AVERROR(EINVAL);
    
            pkt_data = avpkt->data;
            pkt_size = avpkt->size;
            av_init_packet(avpkt);
            avpkt->data = pkt_data;
            avpkt->size = pkt_size;
            return 0;
        } else {
            return av_new_packet(avpkt, size);
    
    }
    
    int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
                                                  AVPacket *avpkt,
                                                  const AVFrame *frame,
                                                  int *got_packet_ptr)
    {
        int ret;
        int user_packet = !!avpkt->data;
        int nb_samples;
    
        if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) {
            av_init_packet(avpkt);
            avpkt->size = 0;
    
        }
    
        /* check for valid frame size */
        if (frame) {
            nb_samples = frame->nb_samples;
            if (avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
                if (nb_samples > avctx->frame_size)
                    return AVERROR(EINVAL);
            } else if (!(avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
                if (nb_samples != avctx->frame_size)
                    return AVERROR(EINVAL);
            }
        } else {
            nb_samples = avctx->frame_size;
        }
    
        if (avctx->codec->encode2) {
            *got_packet_ptr = 0;
            ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
            if (!ret && *got_packet_ptr &&
                !(avctx->codec->capabilities & CODEC_CAP_DELAY)) {
                avpkt->pts = frame->pts;
                avpkt->duration = av_rescale_q(frame->nb_samples,
                                               (AVRational){ 1, avctx->sample_rate },
                                               avctx->time_base);
            }
        } else {
            /* for compatibility with encoders not supporting encode2(), we need to
               allocate a packet buffer if the user has not provided one or check
               the size otherwise */
            int fs_tmp   = 0;
            int buf_size = avpkt->size;
            if (!user_packet) {
                if (avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) {
                    av_assert0(av_get_bits_per_sample(avctx->codec_id) != 0);
                    buf_size = nb_samples * avctx->channels *
                               av_get_bits_per_sample(avctx->codec_id) / 8;
                } else {
                    /* this is a guess as to the required size.
                       if an encoder needs more than this, it should probably
                       implement encode2() */
                    buf_size = 2 * avctx->frame_size * avctx->channels *
                               av_get_bytes_per_sample(avctx->sample_fmt);
                    buf_size += FF_MIN_BUFFER_SIZE;
                }
            }
            if ((ret = ff_alloc_packet(avpkt, buf_size)))
                return ret;
    
            /* Encoders using AVCodec.encode() that support
               CODEC_CAP_SMALL_LAST_FRAME require avctx->frame_size to be set to
               the smaller size when encoding the last frame.
               This code can be removed once all encoders supporting
               CODEC_CAP_SMALL_LAST_FRAME use encode2() */
            if ((avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) &&
                nb_samples < avctx->frame_size) {
                fs_tmp = avctx->frame_size;
                avctx->frame_size = nb_samples;
            }
    
            /* encode the frame */
            ret = avctx->codec->encode(avctx, avpkt->data, avpkt->size,
                                       frame ? frame->data[0] : NULL);
            if (ret >= 0) {
                if (!ret) {
                    /* no output. if the packet data was allocated by libavcodec,
                       free it */
                    if (!user_packet)
                        av_freep(&avpkt->data);
                } else {
                    if (avctx->coded_frame)
                        avpkt->pts = avctx->coded_frame->pts;
                    /* Set duration for final small packet. This can be removed
                       once all encoders supporting CODEC_CAP_SMALL_LAST_FRAME use
                       encode2() */
                    if (fs_tmp) {
                        avpkt->duration = av_rescale_q(avctx->frame_size,
                                                       (AVRational){ 1, avctx->sample_rate },
                                                       avctx->time_base);
                    }
                }
                avpkt->size = ret;
                *got_packet_ptr = (ret > 0);
                ret = 0;
            }
    
            if (fs_tmp)
                avctx->frame_size = fs_tmp;
        }
        if (!ret)
            avctx->frame_number++;
    
        /* NOTE: if we add any audio encoders which output non-keyframe packets,
                 this needs to be moved to the encoders, but for now we can do it
                 here to simplify things */
        avpkt->flags |= AV_PKT_FLAG_KEY;
    
        return ret;
    
    #if FF_API_OLD_DECODE_AUDIO
    int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx,
                                                 uint8_t *buf, int buf_size,
                                                 const short *samples)
    {
        AVPacket pkt;
        AVFrame frame0;
        AVFrame *frame;
        int ret, samples_size, got_packet;
    
        av_init_packet(&pkt);
        pkt.data = buf;
        pkt.size = buf_size;
    
        if (samples) {
            frame = &frame0;
            avcodec_get_frame_defaults(frame);
    
            if (avctx->frame_size) {
                frame->nb_samples = avctx->frame_size;
            } else {
                /* if frame_size is not set, the number of samples must be
                   calculated from the buffer size */
                int64_t nb_samples;
                if (!av_get_bits_per_sample(avctx->codec_id)) {
                    av_log(avctx, AV_LOG_ERROR, "avcodec_encode_audio() does not "
                           "support this codec\n");
                    return AVERROR(EINVAL);
                }
                nb_samples = (int64_t)buf_size * 8 /
                             (av_get_bits_per_sample(avctx->codec_id) *
                             avctx->channels);
                if (nb_samples >= INT_MAX)
                    return AVERROR(EINVAL);
                frame->nb_samples = nb_samples;
            }
    
            /* it is assumed that the samples buffer is large enough based on the
               relevant parameters */
            samples_size = av_samples_get_buffer_size(NULL, avctx->channels,
                                                      frame->nb_samples,
                                                      avctx->sample_fmt, 1);
            if ((ret = avcodec_fill_audio_frame(frame, avctx->channels,
                                                avctx->sample_fmt,
                                                samples, samples_size, 1)))
                return ret;
    
            /* fabricate frame pts from sample count.
               this is needed because the avcodec_encode_audio() API does not have
               a way for the user to provide pts */
            frame->pts = av_rescale_q(avctx->internal->sample_count,
                                      (AVRational){ 1, avctx->sample_rate },
                                      avctx->time_base);
            avctx->internal->sample_count += frame->nb_samples;
        } else {
            frame = NULL;
        }
    
        got_packet = 0;