Newer
Older
* Copyright (c) 2001 Fabrice Bellard
Michael Niedermayer
committed
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
* This file is part of Libav.
Diego Biurrun
committed
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
Diego Biurrun
committed
* version 2.1 of the License, or (at your option) any later version.
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Justin Ruggles
committed
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/crc.h"
#include "libavutil/mathematics.h"
Reimar Döffinger
committed
#include "libavutil/pixdesc.h"
#include "libavutil/audioconvert.h"
#include "libavutil/imgutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/dict.h"
#include "libavutil/opt.h"
#include "thread.h"
#include "audioconvert.h"
#include "internal.h"
#include "bytestream.h"
Aurelien Jacobs
committed
#include <stdlib.h>
#include <stdarg.h>
Michael Niedermayer
committed
#include <limits.h>
static int volatile entangled_thread_counter = 0;
static int (*ff_lockmgr_cb)(void **mutex, enum AVLockOp op);
static void *avformat_mutex;
void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
return ptr;
min_size = FFMAX(17 * min_size / 16 + 32, min_size);
ptr = av_realloc(ptr, min_size);
/* we could set this to the unmodified min_size but this is safer
* if the user lost the ptr and uses NULL now
*/
if (!ptr)
min_size = 0;
Michael Niedermayer
committed
return ptr;
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Reimar Döffinger
committed
{
void **p = ptr;
if (min_size < *size)
return;
min_size = FFMAX(17 * min_size / 16 + 32, min_size);
Reimar Döffinger
committed
av_free(*p);
Michael Niedermayer
committed
*p = av_malloc(min_size);
Reimar Döffinger
committed
}
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
{
void **p = ptr;
if (min_size > SIZE_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
av_freep(p);
*size = 0;
return;
}
av_fast_malloc(p, size, min_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (*size)
memset((uint8_t *)*p + min_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
}
static AVCodec *first_avcodec = NULL;
AVCodec *av_codec_next(const AVCodec *c)
{
if (c)
return c->next;
else
return first_avcodec;
static void avcodec_init(void)
{
static int initialized = 0;
if (initialized != 0)
return;
initialized = 1;
ff_dsputil_static_init();
int av_codec_is_encoder(const AVCodec *codec)
Justin Ruggles
committed
{
return codec && (codec->encode_sub || codec->encode2);
Justin Ruggles
committed
}
int av_codec_is_decoder(const AVCodec *codec)
Justin Ruggles
committed
{
return codec && codec->decode;
}
void avcodec_register(AVCodec *codec)
while (*p != NULL)
p = &(*p)->next;
*p = codec;
codec->next = NULL;
if (codec->init_static_data)
codec->init_static_data(codec);
unsigned avcodec_get_edge_width(void)
{
return EDGE_WIDTH;
}
void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
{
s->coded_width = width;
s->coded_height = height;
s->width = width;
s->height = height;
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
int linesize_align[AV_NUM_DATA_POINTERS])
{
int i;
case PIX_FMT_YUYV422:
case PIX_FMT_UYVY422:
case PIX_FMT_YUV440P:
case PIX_FMT_GRAY16BE:
case PIX_FMT_GRAY16LE:
case PIX_FMT_YUVJ420P:
case PIX_FMT_YUVJ422P:
case PIX_FMT_YUVJ440P:
case PIX_FMT_YUVA420P:
case PIX_FMT_YUV420P9LE:
case PIX_FMT_YUV420P9BE:
case PIX_FMT_YUV420P10LE:
case PIX_FMT_YUV420P10BE:
case PIX_FMT_YUV422P9LE:
case PIX_FMT_YUV422P9BE:
case PIX_FMT_YUV422P10LE:
case PIX_FMT_YUV422P10BE:
case PIX_FMT_YUV444P9LE:
case PIX_FMT_YUV444P9BE:
case PIX_FMT_YUV444P10LE:
case PIX_FMT_YUV444P10BE:
case PIX_FMT_GBRP9LE:
case PIX_FMT_GBRP9BE:
case PIX_FMT_GBRP10LE:
case PIX_FMT_GBRP10BE:
w_align = 16; //FIXME assume 16 pixel per macroblock
h_align = 16 * 2; // interlaced needs 2 macroblocks height
case PIX_FMT_UYYVYY411:
if (s->codec_id == AV_CODEC_ID_SVQ1) {
w_align = 64;
h_align = 64;
if (s->codec_id == AV_CODEC_ID_RPZA) {
w_align = 4;
h_align = 4;
case PIX_FMT_BGR8:
case PIX_FMT_RGB8:
if (s->codec_id == AV_CODEC_ID_SMC) {
w_align = 4;
h_align = 4;
case PIX_FMT_BGR24:
if ((s->codec_id == AV_CODEC_ID_MSZH) ||
(s->codec_id == AV_CODEC_ID_ZLIB)) {
w_align = 4;
h_align = 4;
}
break;
*width = FFALIGN(*width, w_align);
*height = FFALIGN(*height, h_align);
if (s->codec_id == AV_CODEC_ID_H264)
// some of the optimized chroma MC reads one line too much
*height += 2;
Reimar Döffinger
committed
for (i = 0; i < 4; i++)
linesize_align[i] = STRIDE_ALIGN;
Reimar Döffinger
committed
}
void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height)
{
Reimar Döffinger
committed
int chroma_shift = av_pix_fmt_descriptors[s->pix_fmt].log2_chroma_w;
int linesize_align[AV_NUM_DATA_POINTERS];
Reimar Döffinger
committed
int align;
Reimar Döffinger
committed
avcodec_align_dimensions2(s, width, height, linesize_align);
align = FFMAX(linesize_align[0], linesize_align[3]);
Reimar Döffinger
committed
linesize_align[1] <<= chroma_shift;
linesize_align[2] <<= chroma_shift;
align = FFMAX3(align, linesize_align[1], linesize_align[2]);
*width = FFALIGN(*width, align);
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
enum AVSampleFormat sample_fmt, const uint8_t *buf,
int buf_size, int align)
{
int ch, planar, needed_size, ret = 0;
needed_size = av_samples_get_buffer_size(NULL, nb_channels,
frame->nb_samples, sample_fmt,
align);
if (buf_size < needed_size)
return AVERROR(EINVAL);
planar = av_sample_fmt_is_planar(sample_fmt);
if (planar && nb_channels > AV_NUM_DATA_POINTERS) {
if (!(frame->extended_data = av_mallocz(nb_channels *
sizeof(*frame->extended_data))))
return AVERROR(ENOMEM);
} else {
frame->extended_data = frame->data;
}
if ((ret = av_samples_fill_arrays(frame->extended_data, &frame->linesize[0],
buf, nb_channels, frame->nb_samples,
sample_fmt, align)) < 0) {
if (frame->extended_data != frame->data)
av_free(frame->extended_data);
return ret;
}
if (frame->extended_data != frame->data) {
for (ch = 0; ch < AV_NUM_DATA_POINTERS; ch++)
frame->data[ch] = frame->extended_data[ch];
}
return ret;
}
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
InternalBuffer *buf;
int buf_size, ret;
buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
frame->nb_samples, avctx->sample_fmt,
Justin Ruggles
committed
0);
if (buf_size < 0)
return AVERROR(EINVAL);
/* allocate InternalBuffer if needed */
if (!avci->buffer) {
avci->buffer = av_mallocz(sizeof(InternalBuffer));
if (!avci->buffer)
return AVERROR(ENOMEM);
}
buf = avci->buffer;
/* if there is a previously-used internal buffer, check its size and
if (buf->extended_data) {
/* if current buffer is too small, free it */
if (buf->extended_data[0] && buf_size > buf->audio_data_size) {
av_free(buf->extended_data[0]);
if (buf->extended_data != buf->data)
av_free(&buf->extended_data);
buf->extended_data = NULL;
}
/* if number of channels has changed, reset and/or free extended data
* pointers but leave data buffer in buf->data[0] for reuse */
if (buf->nb_channels != avctx->channels) {
if (buf->extended_data != buf->data)
av_free(buf->extended_data);
buf->extended_data = NULL;
}
}
/* if there is no previous buffer or the previous buffer cannot be used
* as-is, allocate a new buffer and/or rearrange the channel pointers */
if (!buf->data[0]) {
if (!(buf->data[0] = av_mallocz(buf_size)))
buf->audio_data_size = buf_size;
if ((ret = avcodec_fill_audio_frame(frame, avctx->channels,
avctx->sample_fmt, buf->data[0],
Justin Ruggles
committed
buf->audio_data_size, 0)))
if (frame->extended_data == frame->data)
buf->extended_data = buf->data;
else
buf->extended_data = frame->extended_data;
memcpy(buf->data, frame->data, sizeof(frame->data));
buf->linesize[0] = frame->linesize[0];
buf->nb_channels = avctx->channels;
} else {
/* copy InternalBuffer info to the AVFrame */
frame->extended_data = buf->extended_data;
frame->linesize[0] = buf->linesize[0];
memcpy(frame->data, buf->data, sizeof(frame->data));
if (avctx->pkt)
frame->pkt_pts = avctx->pkt->pts;
else
frame->pkt_pts = AV_NOPTS_VALUE;
frame->reordered_opaque = avctx->reordered_opaque;
frame->sample_rate = avctx->sample_rate;
frame->format = avctx->sample_fmt;
frame->channel_layout = avctx->channel_layout;
if (avctx->debug & FF_DEBUG_BUFFERS)
av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p, "
return 0;
}
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
{
AVCodecInternal *avci = s->internal;
av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n");
return -1;
}
if (avci->buffer_count >= INTERNAL_BUFFER_SIZE) {
av_log(s, AV_LOG_ERROR, "buffer_count overflow (missing release_buffer?)\n");
return -1;
}
if (!avci->buffer) {
avci->buffer = av_mallocz((INTERNAL_BUFFER_SIZE + 1) *
sizeof(InternalBuffer));
buf = &avci->buffer[avci->buffer_count];
if (buf->base[0] && (buf->width != w || buf->height != h || buf->pix_fmt != s->pix_fmt)) {
for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
Michael Niedermayer
committed
av_freep(&buf->base[i]);
Michael Niedermayer
committed
}
}
Reimar Döffinger
committed
int unaligned;
int stride_align[AV_NUM_DATA_POINTERS];
const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1 + 1;
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
Reimar Döffinger
committed
avcodec_align_dimensions2(s, &w, &h, stride_align);
if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
w += EDGE_WIDTH * 2;
h += EDGE_WIDTH * 2;
Reimar Döffinger
committed
do {
// NOTE: do not align linesizes individually, this breaks e.g. assumptions
// that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
av_image_fill_linesizes(picture.linesize, s->pix_fmt, w);
Reimar Döffinger
committed
// increase alignment of w for next try (rhs gives the lowest bit set in w)
Reimar Döffinger
committed
unaligned = 0;
Reimar Döffinger
committed
unaligned |= picture.linesize[i] % stride_align[i];
} while (unaligned);
tmpsize = av_image_fill_pointers(picture.data, s->pix_fmt, h, NULL, picture.linesize);
if (tmpsize < 0)
return -1;
for (i = 0; i < 3 && picture.data[i + 1]; i++)
size[i] = picture.data[i + 1] - picture.data[i];
size[i] = tmpsize - (picture.data[i] - picture.data[0]);
memset(buf->base, 0, sizeof(buf->base));
memset(buf->data, 0, sizeof(buf->data));
for (i = 0; i < 4 && size[i]; i++) {
const int h_shift = i == 0 ? 0 : h_chroma_shift;
const int v_shift = i == 0 ? 0 : v_chroma_shift;
buf->base[i] = av_malloc(size[i] + 16); //FIXME 16
if (buf->base[i] == NULL)
return -1;
memset(buf->base[i], 128, size[i]);
if ((s->flags & CODEC_FLAG_EMU_EDGE) || !size[2])
buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i] * EDGE_WIDTH >> v_shift) + (pixel_size * EDGE_WIDTH >> h_shift), stride_align[i]);
for (; i < AV_NUM_DATA_POINTERS; i++) {
buf->linesize[i] = 0;
}
if (size[1] && !size[2])
ff_set_systematic_pal2((uint32_t *)buf->data[1], s->pix_fmt);
buf->width = s->width;
buf->height = s->height;
buf->pix_fmt = s->pix_fmt;
for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
pic->base[i] = buf->base[i];
pic->data[i] = buf->data[i];
pic->linesize[i] = buf->linesize[i];
avci->buffer_count++;
pic->width = buf->width;
pic->height = buf->height;
pic->format = buf->pix_fmt;
pic->sample_aspect_ratio = s->sample_aspect_ratio;
if (s->pkt)
pic->pkt_pts = s->pkt->pts;
else
pic->pkt_pts = AV_NOPTS_VALUE;
pic->reordered_opaque = s->reordered_opaque;
av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p, %d "
Alexander Strange
committed
int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame)
{
switch (avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
return video_get_buffer(avctx, frame);
case AVMEDIA_TYPE_AUDIO:
return audio_get_buffer(avctx, frame);
default:
return -1;
}
}
void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic)
{
AVCodecInternal *avci = s->internal;
assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
assert(avci->buffer_count);
if (avci->buffer) {
buf = NULL; /* avoids warning */
for (i = 0; i < avci->buffer_count; i++) { //just 3-5 checks so is not worth to optimize
buf = &avci->buffer[i];
if (buf->data[0] == pic->data[0])
break;
}
assert(i < avci->buffer_count);
avci->buffer_count--;
last = &avci->buffer[avci->buffer_count];
if (buf != last)
FFSWAP(InternalBuffer, *buf, *last);
for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
pic->data[i] = NULL;
Alexander Strange
committed
av_log(s, AV_LOG_DEBUG, "default_release_buffer called on pic %p, %d "
int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic)
{
AVFrame temp_pic;
int i;
assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
/* If no picture return a new buffer */
/* We will copy from buffer, so must be readable */
pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
return s->get_buffer(s, pic);
}
assert(s->pix_fmt == pic->format);
/* If internal buffer type return the same buffer */
if (pic->type == FF_BUFFER_TYPE_INTERNAL) {
if (s->pkt)
pic->pkt_pts = s->pkt->pts;
else
pic->pkt_pts = AV_NOPTS_VALUE;
pic->reordered_opaque = s->reordered_opaque;
}
/*
* Not internal type and reget_buffer not overridden, emulate cr buffer
*/
temp_pic = *pic;
pic->data[i] = pic->base[i] = NULL;
pic->opaque = NULL;
/* Allocate new frame */
if (s->get_buffer(s, pic))
return -1;
/* Copy image data from old buffer to new buffer */
av_picture_copy((AVPicture *)pic, (AVPicture *)&temp_pic, s->pix_fmt, s->width,
s->height);
s->release_buffer(s, &temp_pic); // Release old frame
return 0;
}
int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size)
{
for (i = 0; i < count; i++) {
int r = func(c, (char *)arg + i * size);
if (ret)
ret[i] = r;
int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr), void *arg, int *ret, int count)
{
Reimar Döffinger
committed
int i;
for (i = 0; i < count; i++) {
int r = func(c, arg, i, 0);
if (ret)
ret[i] = r;
Reimar Döffinger
committed
}
return 0;
}
enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat *fmt)
{
while (*fmt != PIX_FMT_NONE && ff_is_hwaccel_pix_fmt(*fmt))
++fmt;
void avcodec_get_frame_defaults(AVFrame *frame)
memset(frame, 0, sizeof(AVFrame));
frame->pts = AV_NOPTS_VALUE;
frame->key_frame = 1;
frame->sample_aspect_ratio = (AVRational) {0, 1 };
frame->format = -1; /* unknown */
AVFrame *frame = av_mallocz(sizeof(AVFrame));
if (frame == NULL)
avcodec_get_frame_defaults(frame);
return frame;
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Justin Ruggles
committed
int ret = 0;
AVDictionary *tmp = NULL;
if (avcodec_is_open(avctx))
return 0;
if ((!codec && !avctx->codec)) {
av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2().\n");
return AVERROR(EINVAL);
}
if ((codec && avctx->codec && codec != avctx->codec)) {
av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, "
"but %s passed to avcodec_open2().\n", avctx->codec->name, codec->name);
return AVERROR(EINVAL);
}
if (!codec)
codec = avctx->codec;
if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE)
return AVERROR(EINVAL);
if (options)
av_dict_copy(&tmp, *options, 0);
/* If there is a user-supplied mutex locking routine, call it. */
if (ff_lockmgr_cb) {
if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN))
return -1;
}
entangled_thread_counter++;
av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n");
Justin Ruggles
committed
ret = -1;
avctx->internal = av_mallocz(sizeof(AVCodecInternal));
if (!avctx->internal) {
ret = AVERROR(ENOMEM);
goto end;
}
if (codec->priv_data_size > 0) {
if (!avctx->priv_data) {
avctx->priv_data = av_mallocz(codec->priv_data_size);
if (!avctx->priv_data) {
ret = AVERROR(ENOMEM);
goto end;
}
if (codec->priv_class) {
*(const AVClass **)avctx->priv_data = codec->priv_class;
av_opt_set_defaults(avctx->priv_data);
}
Michael Niedermayer
committed
}
if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0)
goto free_and_end;
} else {
avctx->priv_data = NULL;
}
if ((ret = av_opt_set_dict(avctx, &tmp)) < 0)
goto free_and_end;
avcodec_set_dimensions(avctx, avctx->coded_width, avctx->coded_height);
avcodec_set_dimensions(avctx, avctx->width, avctx->height);
Reimar Döffinger
committed
if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height)
&& ( av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx) < 0
|| av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0)) {
av_log(avctx, AV_LOG_WARNING, "ignoring invalid width/height values\n");
avcodec_set_dimensions(avctx, 0, 0);
}
/* if the decoder init function was already called previously,
* free the already allocated subtitle_header before overwriting it */
if (av_codec_is_decoder(codec))
av_freep(&avctx->subtitle_header);
#define SANE_NB_CHANNELS 128U
Reimar Döffinger
committed
if (avctx->channels > SANE_NB_CHANNELS) {
Panagiotis Issaris
committed
ret = AVERROR(EINVAL);
Michael Niedermayer
committed
goto free_and_end;
if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) &&
avctx->codec_id == AV_CODEC_ID_NONE) {
avctx->codec_type = codec->type;
avctx->codec_id = codec->id;
}
if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type
&& avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) {
Michael Niedermayer
committed
av_log(avctx, AV_LOG_ERROR, "codec type or id mismatches\n");
Justin Ruggles
committed
ret = AVERROR(EINVAL);
Michael Niedermayer
committed
goto free_and_end;
Michael Niedermayer
committed
}
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO &&
(!avctx->time_base.num || !avctx->time_base.den)) {
avctx->time_base.num = 1;
avctx->time_base.den = avctx->sample_rate;
}
if (HAVE_THREADS && !avctx->thread_opaque) {
ret = ff_thread_init(avctx);
if (ret < 0) {
goto free_and_end;
}
}
if (!HAVE_THREADS && !(codec->capabilities & CODEC_CAP_AUTO_THREADS))
avctx->thread_count = 1;
if (av_codec_is_encoder(avctx->codec)) {
if (avctx->codec->sample_fmts) {
for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++)
if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
break;
if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
av_log(avctx, AV_LOG_ERROR, "Specified sample_fmt is not supported.\n");
ret = AVERROR(EINVAL);
goto free_and_end;
}
if (avctx->codec->pix_fmts) {
for (i = 0; avctx->codec->pix_fmts[i] != PIX_FMT_NONE; i++)
if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
break;
if (avctx->codec->pix_fmts[i] == PIX_FMT_NONE) {
av_log(avctx, AV_LOG_ERROR, "Specified pix_fmt is not supported\n");
ret = AVERROR(EINVAL);
goto free_and_end;
}
}
if (avctx->codec->supported_samplerates) {
for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
break;
if (avctx->codec->supported_samplerates[i] == 0) {
av_log(avctx, AV_LOG_ERROR, "Specified sample_rate is not supported\n");
ret = AVERROR(EINVAL);
goto free_and_end;
}
}
if (avctx->codec->channel_layouts) {
if (!avctx->channel_layout) {
av_log(avctx, AV_LOG_WARNING, "channel_layout not specified\n");
} else {
for (i = 0; avctx->codec->channel_layouts[i] != 0; i++)
if (avctx->channel_layout == avctx->codec->channel_layouts[i])
break;
if (avctx->codec->channel_layouts[i] == 0) {
av_log(avctx, AV_LOG_ERROR, "Specified channel_layout is not supported\n");
ret = AVERROR(EINVAL);
goto free_and_end;
}
}
}
if (avctx->channel_layout && avctx->channels) {
if (av_get_channel_layout_nb_channels(avctx->channel_layout) != avctx->channels) {
av_log(avctx, AV_LOG_ERROR, "channel layout does not match number of channels\n");
ret = AVERROR(EINVAL);
goto free_and_end;
}
} else if (avctx->channel_layout) {
avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
}
Carl Eugen Hoyos
committed
if (avctx->codec->init && !(avctx->active_thread_type & FF_THREAD_FRAME)) {
Michael Niedermayer
committed
goto free_and_end;
if (av_codec_is_decoder(avctx->codec)) {
/* validate channel layout from the decoder */
if (avctx->channel_layout &&
av_get_channel_layout_nb_channels(avctx->channel_layout) != avctx->channels) {
av_log(avctx, AV_LOG_WARNING, "channel layout does not match number of channels\n");
avctx->channel_layout = 0;
}
}
end:
entangled_thread_counter--;
/* Release any user-supplied mutex. */
if (ff_lockmgr_cb) {
(*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE);
}
if (options) {
av_dict_free(options);
*options = tmp;
}
Michael Niedermayer
committed
free_and_end:
av_dict_free(&tmp);
Michael Niedermayer
committed
av_freep(&avctx->priv_data);
av_freep(&avctx->internal);
Michael Niedermayer
committed
goto end;
Justin Ruggles
committed
int ff_alloc_packet(AVPacket *avpkt, int size)
Justin Ruggles
committed
if (size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE)
return AVERROR(EINVAL);
if (avpkt->data) {
void *destruct = avpkt->destruct;
Justin Ruggles
committed
if (avpkt->size < size)
return AVERROR(EINVAL);
av_init_packet(avpkt);
avpkt->destruct = destruct;
Justin Ruggles
committed
return 0;
} else {
return av_new_packet(avpkt, size);
Justin Ruggles
committed
}
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
/**
* Pad last frame with silence.
*/
static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src)
{
AVFrame *frame = NULL;
uint8_t *buf = NULL;
int ret;
if (!(frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
*frame = *src;
if ((ret = av_samples_get_buffer_size(&frame->linesize[0], s->channels,
s->frame_size, s->sample_fmt, 0)) < 0)
goto fail;
if (!(buf = av_malloc(ret))) {
ret = AVERROR(ENOMEM);
goto fail;
}
frame->nb_samples = s->frame_size;
if ((ret = avcodec_fill_audio_frame(frame, s->channels, s->sample_fmt,
buf, ret, 0)) < 0)
goto fail;
if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
src->nb_samples, s->channels, s->sample_fmt)) < 0)
goto fail;
if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
frame->nb_samples - src->nb_samples,
s->channels, s->sample_fmt)) < 0)
goto fail;
*dst = frame;
return 0;
fail:
if (frame->extended_data != frame->data)
av_freep(&frame->extended_data);
av_freep(&buf);
av_freep(&frame);
return ret;
}
Justin Ruggles
committed
int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
AVFrame tmp;
AVFrame *padded_frame = NULL;
Justin Ruggles
committed
int ret;
int user_packet = !!avpkt->data;
Anton Khirnov
committed
*got_packet_ptr = 0;
Justin Ruggles
committed
if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) {
av_free_packet(avpkt);
Justin Ruggles
committed
av_init_packet(avpkt);
Justin Ruggles
committed
}
/* ensure that extended_data is properly set */
if (frame && !frame->extended_data) {
if (av_sample_fmt_is_planar(avctx->sample_fmt) &&
avctx->channels > AV_NUM_DATA_POINTERS) {
av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, "
"with more than %d channels, but extended_data is not set.\n",
AV_NUM_DATA_POINTERS);
return AVERROR(EINVAL);
}
av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
tmp = *frame;
tmp.extended_data = tmp.data;
frame = &tmp;
}
Justin Ruggles
committed
/* check for valid frame size */
if (frame) {
if (avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
if (frame->nb_samples > avctx->frame_size)
Justin Ruggles
committed
return AVERROR(EINVAL);
} else if (!(avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
if (frame->nb_samples < avctx->frame_size &&
!avctx->internal->last_audio_frame) {
ret = pad_last_frame(avctx, &padded_frame, frame);
if (ret < 0)
return ret;
frame = padded_frame;
avctx->internal->last_audio_frame = 1;
}
if (frame->nb_samples != avctx->frame_size) {
ret = AVERROR(EINVAL);
goto end;
}
Justin Ruggles
committed
}
}
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (!ret) {
if (*got_packet_ptr) {
if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) {
if (avpkt->pts == AV_NOPTS_VALUE)
avpkt->pts = frame->pts;
if (!avpkt->duration)
avpkt->duration = ff_samples_to_time_base(avctx,
frame->nb_samples);