diff --git a/avconv.c b/avconv.c
index 9c94134943b2694a7e029dac0834a6c037cbf845..1e645b8c756a0b5d55d03ae4f2c5373de6cad8a2 100644
--- a/avconv.c
+++ b/avconv.c
@@ -1267,7 +1267,8 @@ static void do_video_out(AVFormatContext *s,
         av_init_packet(&pkt);
         pkt.stream_index= ost->index;
 
-        if (s->oformat->flags & AVFMT_RAWPICTURE) {
+        if (s->oformat->flags & AVFMT_RAWPICTURE &&
+            enc->codec->id == CODEC_ID_RAWVIDEO) {
             /* raw pictures are written as AVPicture structure to
                avoid any copies. We support temporarily the older
                method. */
@@ -1528,7 +1529,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
 
         if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
             continue;
-        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
+        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
             continue;
 
         for(;;) {
diff --git a/doc/APIchanges b/doc/APIchanges
index 97a5c0068d6c8222f29d381ff78c893179b74371..23e568922c4b80e8ab8bdb618d2009ccd8583bc6 100644
--- a/doc/APIchanges
+++ b/doc/APIchanges
@@ -22,6 +22,19 @@ API changes, most recent first:
 2011-10-20 - b35e9e1 - lavu 51.22.0
   Add av_strtok() to avstring.h.
 
+2011-xx-xx - xxxxxxx - lavc 53.25.0
+  Add nb_samples and extended_data fields to AVFrame.
+  Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
+  Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4().
+  avcodec_decode_audio4() writes output samples to an AVFrame, which allows
+  audio decoders to use get_buffer().
+
+2011-xx-xx - xxxxxxx - lavc 53.24.0
+  Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump.
+  Change AVPicture.data[4]/linesize[4] to [8] at next major bump.
+  Change AVCodecContext.error[4] to [8] at next major bump.
+  Add AV_NUM_DATA_POINTERS to simplify the bump transition.
+
 2011-11-23 - bbb46f3 - lavu 51.18.0
   Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and
   av_samples_alloc(), to samplefmt.h.
diff --git a/doc/developer.texi b/doc/developer.texi
index 2052854a40daf4b5f1044d6f6c68bcb1ffb0c02a..800ca7d0455ceeaf159da6e4c534483e5819cda1 100644
--- a/doc/developer.texi
+++ b/doc/developer.texi
@@ -53,48 +53,26 @@ and should try to fix issues their commit causes.
 @anchor{Coding Rules}
 @section Coding Rules
 
-FFmpeg is programmed in the ISO C90 language with a few additional
-features from ISO C99, namely:
-@itemize @bullet
-@item
-the @samp{inline} keyword;
-@item
-@samp{//} comments;
-@item
-designated struct initializers (@samp{struct s x = @{ .i = 17 @};})
-@item
-compound literals (@samp{x = (struct s) @{ 17, 23 @};})
-@end itemize
-
-These features are supported by all compilers we care about, so we will not
-accept patches to remove their use unless they absolutely do not impair
-clarity and performance.
+@subsection Code formatting conventions
 
-All code must compile with recent versions of GCC and a number of other
-currently supported compilers. To ensure compatibility, please do not use
-additional C99 features or GCC extensions. Especially watch out for:
+There are the following guidelines regarding the indentation in files:
 @itemize @bullet
 @item
-mixing statements and declarations;
-@item
-@samp{long long} (use @samp{int64_t} instead);
-@item
-@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
-@item
-GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
-@end itemize
-
 Indent size is 4.
-The presentation is one inspired by 'indent -i4 -kr -nut'.
+@item
 The TAB character is forbidden outside of Makefiles as is any
 form of trailing whitespace. Commits containing either will be
 rejected by the git repository.
+@item
+You should try to limit your code lines to 80 characters; however, do so if and only if this improves readability.
+@end itemize
+The presentation is one inspired by 'indent -i4 -kr -nut'.
 
 The main priority in FFmpeg is simplicity and small code size in order to
 minimize the bug count.
 
-Comments: Use the JavaDoc/Doxygen
-format (see examples below) so that code documentation
+@subsection Comments
+Use the JavaDoc/Doxygen  format (see examples below) so that code documentation
 can be generated automatically. All nontrivial functions should have a comment
 above them explaining what the function does, even if it is just one sentence.
 All structures and their member variables should be documented, too.
@@ -128,11 +106,69 @@ int myfunc(int my_parameter)
 ...
 @end example
 
+@subsection C language features
+
+FFmpeg is programmed in the ISO C90 language with a few additional
+features from ISO C99, namely:
+@itemize @bullet
+@item
+the @samp{inline} keyword;
+@item
+@samp{//} comments;
+@item
+designated struct initializers (@samp{struct s x = @{ .i = 17 @};})
+@item
+compound literals (@samp{x = (struct s) @{ 17, 23 @};})
+@end itemize
+
+These features are supported by all compilers we care about, so we will not
+accept patches to remove their use unless they absolutely do not impair
+clarity and performance.
+
+All code must compile with recent versions of GCC and a number of other
+currently supported compilers. To ensure compatibility, please do not use
+additional C99 features or GCC extensions. Especially watch out for:
+@itemize @bullet
+@item
+mixing statements and declarations;
+@item
+@samp{long long} (use @samp{int64_t} instead);
+@item
+@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
+@item
+GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
+@end itemize
+
+@subsection Naming conventions
+All names are using underscores (_), not CamelCase. For example, @samp{avfilter_get_video_buffer} is
+a valid function name and @samp{AVFilterGetVideo} is not. The only exception from this are structure names;
+they should always be in the CamelCase
+
+There are following conventions for naming variables and functions:
+@itemize @bullet
+@item
+For local variables no prefix is required.
+@item
+For variables and functions declared as @code{static} no prefixes are required.
+@item
+For variables and functions used internally by the library, @code{ff_} prefix should be used.
+For example, @samp{ff_w64_demuxer}.
+@item
+For variables and functions used internally across multiple libraries, use @code{avpriv_}. For example,
+@samp{avpriv_aac_parse_header}.
+@item
+For exported names, each library has its own prefixes. Just check the existing code and name accordingly.
+@end itemize
+
+@subsection Miscellanous conventions
+@itemize @bullet
+@item
 fprintf and printf are forbidden in libavformat and libavcodec,
 please use av_log() instead.
-
+@item
 Casts should be used only when necessary. Unneeded parentheses
 should also be avoided if they don't make the code easier to understand.
+@end itemize
 
 @section Development Policy
 
diff --git a/doc/general.texi b/doc/general.texi
index 120b7160c25e3c9c57ae5a91c610c68975a62891..04ca71db916de0205349e17168ec4fbea5c3d75c 100644
--- a/doc/general.texi
+++ b/doc/general.texi
@@ -840,13 +840,22 @@ bash directly to work around this:
 bash ./configure
 @end example
 
-@subsection Darwin (MacOS X, iPhone)
+@anchor{Darwin}
+@subsection Darwin (OSX, iPhone)
 
-MacOS X on PowerPC or ARM (iPhone) requires a preprocessor from
+The toolchain provided with Xcode is sufficient to build the basic
+unacelerated code.
+
+OSX on PowerPC or ARM (iPhone) requires a preprocessor from
 @url{http://github.com/yuvi/gas-preprocessor} to build the optimized
 assembler functions. Just download the Perl script and put it somewhere
 in your PATH, FFmpeg's configure will pick it up automatically.
 
+OSX on amd64 and x86 requires @command{yasm} to build most of the
+optimized assembler functions @url{http://mxcl.github.com/homebrew/, Homebrew},
+@url{http://www.gentoo.org/proj/en/gentoo-alt/prefix/bootstrap-macos.xml, Gentoo Prefix}
+or @url{http://www.macports.org, MacPorts} can easily provide it.
+
 @section Windows
 
 To get help and instructions for building FFmpeg under Windows, check out
diff --git a/ffmpeg.c b/ffmpeg.c
index e81936de8cc6c5c9cf9c3a847f5953db49b26b23..3158bb4874e06fe4ca5951261ad99b39171bf8ad 100644
--- a/ffmpeg.c
+++ b/ffmpeg.c
@@ -1295,7 +1295,8 @@ static void do_video_out(AVFormatContext *s,
         av_init_packet(&pkt);
         pkt.stream_index= ost->index;
 
-        if (s->oformat->flags & AVFMT_RAWPICTURE) {
+        if (s->oformat->flags & AVFMT_RAWPICTURE &&
+            enc->codec->id == CODEC_ID_RAWVIDEO) {
             /* raw pictures are written as AVPicture structure to
                avoid any copies. We support temporarily the older
                method. */
@@ -1560,7 +1561,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
 
         if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
             continue;
-        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
+        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
             continue;
 
         for(;;) {
diff --git a/libavcodec/8svx.c b/libavcodec/8svx.c
index efe554adc298b4aa131c2003354f111af4d26975..b4fc899a2297bfdb2a53e814ed49cb560f57bc2f 100644
--- a/libavcodec/8svx.c
+++ b/libavcodec/8svx.c
@@ -41,6 +41,7 @@
 
 /** decoder context */
 typedef struct EightSvxContext {
+    AVFrame frame;
     const int8_t *table;
 
     /* buffer used to store the whole audio decoded/interleaved chunk,
@@ -99,11 +100,13 @@ static int delta_decode(int8_t *dst, const uint8_t *src, int src_size,
     return dst-dst0;
 }
 
-static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
-                                 AVPacket *avpkt)
+/** decode a frame */
+static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
+                                 int *got_frame_ptr, AVPacket *avpkt)
 {
     EightSvxContext *esc = avctx->priv_data;
-    int out_data_size, n;
+    int n, out_data_size, ret;
+    uint8_t *out_date;
     uint8_t *src, *dst;
 
     /* decode and interleave the first packet */
@@ -145,19 +148,22 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_si
             memcpy(esc->samples, deinterleaved_samples, esc->samples_size);
     }
 
-    /* return single packed with fixed size */
-    out_data_size = FFMIN(MAX_FRAME_SIZE, esc->samples_size - esc->samples_idx);
-    if (*data_size < out_data_size) {
-        av_log(avctx, AV_LOG_ERROR, "Provided buffer with size %d is too small.\n", *data_size);
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    esc->frame.nb_samples = (FFMIN(MAX_FRAME_SIZE, esc->samples_size - esc->samples_idx) +avctx->channels-1)  / avctx->channels;
+    if ((ret = avctx->get_buffer(avctx, &esc->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
 
-    *data_size = out_data_size;
-    dst = data;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = esc->frame;
+
+    dst = esc->frame.data[0];
     src = esc->samples + esc->samples_idx;
+    out_data_size = esc->frame.nb_samples * avctx->channels;
     for (n = out_data_size; n > 0; n--)
         *dst++ = *src++ + 128;
-    esc->samples_idx += *data_size;
+    esc->samples_idx += out_data_size;
 
     return avctx->codec->id == CODEC_ID_8SVX_FIB || avctx->codec->id == CODEC_ID_8SVX_EXP ?
         (avctx->frame_number == 0)*2 + out_data_size / 2 :
@@ -184,6 +190,9 @@ static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
     }
     avctx->sample_fmt = AV_SAMPLE_FMT_U8;
 
+    avcodec_get_frame_defaults(&esc->frame);
+    avctx->coded_frame = &esc->frame;
+
     return 0;
 }
 
@@ -206,6 +215,7 @@ AVCodec ff_eightsvx_fib_decoder = {
   .init           = eightsvx_decode_init,
   .decode         = eightsvx_decode_frame,
   .close          = eightsvx_decode_close,
+  .capabilities   = CODEC_CAP_DR1,
   .long_name      = NULL_IF_CONFIG_SMALL("8SVX fibonacci"),
 };
 
@@ -217,6 +227,7 @@ AVCodec ff_eightsvx_exp_decoder = {
   .init           = eightsvx_decode_init,
   .decode         = eightsvx_decode_frame,
   .close          = eightsvx_decode_close,
+  .capabilities   = CODEC_CAP_DR1,
   .long_name      = NULL_IF_CONFIG_SMALL("8SVX exponential"),
 };
 
@@ -228,5 +239,6 @@ AVCodec ff_pcm_s8_planar_decoder = {
     .init           = eightsvx_decode_init,
     .close          = eightsvx_decode_close,
     .decode         = eightsvx_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name      = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"),
 };
diff --git a/libavcodec/aac.h b/libavcodec/aac.h
index d01534de3f8bf3a12747599e5513dc0ad887d36c..d31103684cd0df4d7ff9c82ad4c71b51e4240f32 100644
--- a/libavcodec/aac.h
+++ b/libavcodec/aac.h
@@ -251,6 +251,7 @@ typedef struct {
  */
 typedef struct {
     AVCodecContext *avctx;
+    AVFrame frame;
 
     MPEG4AudioConfig m4ac;
 
diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c
index 5a2b230d24a70c97aca62a8bda779bae3b596010..a046d991e662baa831d883f64d3022eb6c70599e 100644
--- a/libavcodec/aacdec.c
+++ b/libavcodec/aacdec.c
@@ -471,15 +471,17 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
  * @param   ac          pointer to AACContext, may be null
  * @param   avctx       pointer to AVCCodecContext, used for logging
  * @param   m4ac        pointer to MPEG4AudioConfig, used for parsing
- * @param   data        pointer to AVCodecContext extradata
- * @param   data_size   size of AVCCodecContext extradata
+ * @param   data        pointer to buffer holding an audio specific config
+ * @param   bit_size    size of audio specific config or data in bits
+ * @param   sync_extension look for an appended sync extension
  *
  * @return  Returns error status or number of consumed bits. <0 - error
  */
 static int decode_audio_specific_config(AACContext *ac,
                                         AVCodecContext *avctx,
                                         MPEG4AudioConfig *m4ac,
-                                        const uint8_t *data, int data_size, int asclen)
+                                        const uint8_t *data, int bit_size,
+                                        int sync_extension)
 {
     GetBitContext gb;
     int i;
@@ -489,9 +491,9 @@ static int decode_audio_specific_config(AACContext *ac,
          av_dlog(avctx, "%02x ", avctx->extradata[i]);
     av_dlog(avctx, "\n");
 
-    init_get_bits(&gb, data, data_size * 8);
+    init_get_bits(&gb, data, bit_size);
 
-    if ((i = avpriv_mpeg4audio_get_config(m4ac, data, asclen/8)) < 0)
+    if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size, sync_extension)) < 0)
         return -1;
     if (m4ac->sampling_index > 12) {
         av_log(avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", m4ac->sampling_index);
@@ -591,7 +593,7 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
     if (avctx->extradata_size > 0) {
         if (decode_audio_specific_config(ac, ac->avctx, &ac->m4ac,
                                          avctx->extradata,
-                                         avctx->extradata_size, 8*avctx->extradata_size) < 0)
+                                         avctx->extradata_size*8, 1) < 0)
             return -1;
     } else {
         int sr, i;
@@ -665,6 +667,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
 
     cbrt_tableinit();
 
+    avcodec_get_frame_defaults(&ac->frame);
+    avctx->coded_frame = &ac->frame;
+
     return 0;
 }
 
@@ -2132,12 +2137,12 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
 }
 
 static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
-                                int *data_size, GetBitContext *gb)
+                                int *got_frame_ptr, GetBitContext *gb)
 {
     AACContext *ac = avctx->priv_data;
     ChannelElement *che = NULL, *che_prev = NULL;
     enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
-    int err, elem_id, data_size_tmp;
+    int err, elem_id;
     int samples = 0, multiplier, audio_found = 0;
 
     if (show_bits(gb, 12) == 0xfff) {
@@ -2250,24 +2255,26 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
         avctx->frame_size = samples;
     }
 
-    data_size_tmp = samples * avctx->channels *
-                    av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < data_size_tmp) {
-        av_log(avctx, AV_LOG_ERROR,
-               "Output buffer too small (%d) or trying to output too many samples (%d) for this frame.\n",
-               *data_size, data_size_tmp);
-        return -1;
-    }
-    *data_size = data_size_tmp;
-
     if (samples) {
+        /* get output buffer */
+        ac->frame.nb_samples = samples;
+        if ((err = avctx->get_buffer(avctx, &ac->frame)) < 0) {
+            av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+            return err;
+        }
+
         if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT)
-            ac->fmt_conv.float_interleave(data, (const float **)ac->output_data,
+            ac->fmt_conv.float_interleave((float *)ac->frame.data[0],
+                                          (const float **)ac->output_data,
                                           samples, avctx->channels);
         else
-            ac->fmt_conv.float_to_int16_interleave(data, (const float **)ac->output_data,
+            ac->fmt_conv.float_to_int16_interleave((int16_t *)ac->frame.data[0],
+                                                   (const float **)ac->output_data,
                                                    samples, avctx->channels);
+
+        *(AVFrame *)data = ac->frame;
     }
+    *got_frame_ptr = !!samples;
 
     if (ac->output_configured && audio_found)
         ac->output_configured = OC_LOCKED;
@@ -2276,7 +2283,7 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
 }
 
 static int aac_decode_frame(AVCodecContext *avctx, void *data,
-                            int *data_size, AVPacket *avpkt)
+                            int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
@@ -2287,7 +2294,7 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
 
     init_get_bits(&gb, buf, buf_size * 8);
 
-    if ((err = aac_decode_frame_int(avctx, data, data_size, &gb)) < 0)
+    if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb)) < 0)
         return err;
 
     buf_consumed = (get_bits_count(&gb) + 7) >> 3;
@@ -2340,30 +2347,40 @@ static inline uint32_t latm_get_value(GetBitContext *b)
 static int latm_decode_audio_specific_config(struct LATMContext *latmctx,
                                              GetBitContext *gb, int asclen)
 {
-    AVCodecContext *avctx = latmctx->aac_ctx.avctx;
-    AACContext *ac= &latmctx->aac_ctx;
-    MPEG4AudioConfig m4ac=ac->m4ac;
-    int  config_start_bit = get_bits_count(gb);
-    int     bits_consumed, esize;
+    AACContext *ac        = &latmctx->aac_ctx;
+    AVCodecContext *avctx = ac->avctx;
+    MPEG4AudioConfig m4ac = {0};
+    int config_start_bit  = get_bits_count(gb);
+    int sync_extension    = 0;
+    int bits_consumed, esize;
+
+    if (asclen) {
+        sync_extension = 1;
+        asclen         = FFMIN(asclen, get_bits_left(gb));
+    } else
+        asclen         = get_bits_left(gb);
 
     if (config_start_bit % 8) {
         av_log_missing_feature(latmctx->aac_ctx.avctx, "audio specific "
                                "config not byte aligned.\n", 1);
         return AVERROR_INVALIDDATA;
-    } else {
-        bits_consumed =
-            decode_audio_specific_config(ac, avctx, &m4ac,
+    }
+    bits_consumed = decode_audio_specific_config(NULL, avctx, &m4ac,
                                          gb->buffer + (config_start_bit / 8),
-                                         get_bits_left(gb) / 8, asclen);
+                                         asclen, sync_extension);
 
-        if (bits_consumed < 0)
-            return AVERROR_INVALIDDATA;
-        if(ac->m4ac.sample_rate != m4ac.sample_rate || m4ac.chan_config != ac->m4ac.chan_config)
-            ac->m4ac= m4ac;
+    if (bits_consumed < 0)
+        return AVERROR_INVALIDDATA;
+
+    if (ac->m4ac.sample_rate != m4ac.sample_rate ||
+        ac->m4ac.chan_config != m4ac.chan_config) {
+
+        av_log(avctx, AV_LOG_INFO, "audio config changed\n");
+        latmctx->initialized = 0;
 
         esize = (bits_consumed+7) / 8;
 
-        if (avctx->extradata_size <= esize) {
+        if (avctx->extradata_size < esize) {
             av_free(avctx->extradata);
             avctx->extradata = av_malloc(esize + FF_INPUT_BUFFER_PADDING_SIZE);
             if (!avctx->extradata)
@@ -2373,9 +2390,8 @@ static int latm_decode_audio_specific_config(struct LATMContext *latmctx,
         avctx->extradata_size = esize;
         memcpy(avctx->extradata, gb->buffer + (config_start_bit/8), esize);
         memset(avctx->extradata+esize, 0, FF_INPUT_BUFFER_PADDING_SIZE);
-
-        skip_bits_long(gb, bits_consumed);
     }
+    skip_bits_long(gb, bits_consumed);
 
     return bits_consumed;
 }
@@ -2512,8 +2528,8 @@ static int read_audio_mux_element(struct LATMContext *latmctx,
 }
 
 
-static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size,
-                             AVPacket *avpkt)
+static int latm_decode_frame(AVCodecContext *avctx, void *out,
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
     struct LATMContext *latmctx = avctx->priv_data;
     int                 muxlength, err;
@@ -2535,12 +2551,12 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size,
 
     if (!latmctx->initialized) {
         if (!avctx->extradata) {
-            *out_size = 0;
+            *got_frame_ptr = 0;
             return avpkt->size;
         } else {
             if ((err = decode_audio_specific_config(
                     &latmctx->aac_ctx, avctx, &latmctx->aac_ctx.m4ac,
-                    avctx->extradata, avctx->extradata_size, 8*avctx->extradata_size)) < 0)
+                    avctx->extradata, avctx->extradata_size*8, 1)) < 0)
                 return err;
             latmctx->initialized = 1;
         }
@@ -2553,7 +2569,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size,
         return AVERROR_INVALIDDATA;
     }
 
-    if ((err = aac_decode_frame_int(avctx, out, out_size, &gb)) < 0)
+    if ((err = aac_decode_frame_int(avctx, out, got_frame_ptr, &gb)) < 0)
         return err;
 
     return muxlength;
@@ -2583,7 +2599,7 @@ AVCodec ff_aac_decoder = {
     .sample_fmts = (const enum AVSampleFormat[]) {
         AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
     },
-    .capabilities = CODEC_CAP_CHANNEL_CONF,
+    .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
     .channel_layouts = aac_channel_layout,
 };
 
@@ -2604,7 +2620,7 @@ AVCodec ff_aac_latm_decoder = {
     .sample_fmts = (const enum AVSampleFormat[]) {
         AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
     },
-    .capabilities = CODEC_CAP_CHANNEL_CONF,
+    .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
     .channel_layouts = aac_channel_layout,
     .flush = flush,
 };
diff --git a/libavcodec/ac3dec.c b/libavcodec/ac3dec.c
index 09b9a3102c5f319cba6ef4b0bcd2a1f8d1ae43f1..c65088143098489b5b2bdd4f6f9647463fe6970c 100644
--- a/libavcodec/ac3dec.c
+++ b/libavcodec/ac3dec.c
@@ -208,6 +208,9 @@ static av_cold int ac3_decode_init(AVCodecContext *avctx)
     }
     s->downmixed = 1;
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
@@ -1296,16 +1299,15 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
 /**
  * Decode a single AC-3 frame.
  */
-static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
-                            AVPacket *avpkt)
+static int ac3_decode_frame(AVCodecContext * avctx, void *data,
+                            int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     AC3DecodeContext *s = avctx->priv_data;
-    float   *out_samples_flt = data;
-    int16_t *out_samples_s16 = data;
-    int blk, ch, err;
-    int data_size_orig, data_size_tmp;
+    float   *out_samples_flt;
+    int16_t *out_samples_s16;
+    int blk, ch, err, ret;
     const uint8_t *channel_map;
     const float *output[AC3_MAX_CHANNELS];
 
@@ -1322,8 +1324,6 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
     init_get_bits(&s->gbc, buf, buf_size * 8);
 
     /* parse the syncinfo */
-    data_size_orig = *data_size;
-    *data_size = 0;
     err = parse_frame_header(s);
 
     if (err) {
@@ -1345,6 +1345,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
                 /* TODO: add support for substreams and dependent frames */
                 if(s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) {
                     av_log(avctx, AV_LOG_ERROR, "unsupported frame type : skipping frame\n");
+                    *got_frame_ptr = 0;
                     return s->frame_size;
                 } else {
                     av_log(avctx, AV_LOG_ERROR, "invalid frame type\n");
@@ -1406,21 +1407,24 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
     if (s->bitstream_mode == 0x7 && s->channels > 1)
         avctx->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE;
 
+    /* get output buffer */
+    s->frame.nb_samples = s->num_blocks * 256;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
+    }
+    out_samples_flt = (float   *)s->frame.data[0];
+    out_samples_s16 = (int16_t *)s->frame.data[0];
+
     /* decode the audio blocks */
     channel_map = ff_ac3_dec_channel_map[s->output_mode & ~AC3_OUTPUT_LFEON][s->lfe_on];
     for (ch = 0; ch < s->out_channels; ch++)
         output[ch] = s->output[channel_map[ch]];
-    data_size_tmp = s->num_blocks * 256 * avctx->channels;
-    data_size_tmp *= avctx->sample_fmt == AV_SAMPLE_FMT_FLT ? sizeof(*out_samples_flt) : sizeof(*out_samples_s16);
-    if (data_size_orig < data_size_tmp)
-        return -1;
-    *data_size = data_size_tmp;
     for (blk = 0; blk < s->num_blocks; blk++) {
         if (!err && decode_audio_block(s, blk)) {
             av_log(avctx, AV_LOG_ERROR, "error decoding the audio block\n");
             err = 1;
         }
-
         if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT) {
             s->fmt_conv.float_interleave(out_samples_flt, output, 256,
                                          s->out_channels);
@@ -1431,8 +1435,10 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
             out_samples_s16 += 256 * s->out_channels;
         }
     }
-    *data_size = s->num_blocks * 256 * avctx->channels *
-                 av_get_bytes_per_sample(avctx->sample_fmt);
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return FFMIN(buf_size, s->frame_size);
 }
 
@@ -1477,6 +1483,7 @@ AVCodec ff_ac3_decoder = {
     .init = ac3_decode_init,
     .close = ac3_decode_end,
     .decode = ac3_decode_frame,
+    .capabilities = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
     .sample_fmts = (const enum AVSampleFormat[]) {
         AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
@@ -1499,6 +1506,7 @@ AVCodec ff_eac3_decoder = {
     .init = ac3_decode_init,
     .close = ac3_decode_end,
     .decode = ac3_decode_frame,
+    .capabilities = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
     .sample_fmts = (const enum AVSampleFormat[]) {
         AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
diff --git a/libavcodec/ac3dec.h b/libavcodec/ac3dec.h
index aa346cb020e3e15c07849fdce8d8fc3f9a955b9b..0f5e86e6b332e8c84a4427b20376af6f452d2991 100644
--- a/libavcodec/ac3dec.h
+++ b/libavcodec/ac3dec.h
@@ -68,6 +68,7 @@
 typedef struct {
     AVClass        *class;                  ///< class for AVOptions
     AVCodecContext *avctx;                  ///< parent context
+    AVFrame frame;                          ///< AVFrame for decoded output
     GetBitContext gbc;                      ///< bitstream reader
 
 ///@name Bit stream information
diff --git a/libavcodec/adpcm.c b/libavcodec/adpcm.c
index 1281acc33aab5c55c746bf033826429eb4e1c160..e305220af5304f7b2afaa8561411e93bf18c156f 100644
--- a/libavcodec/adpcm.c
+++ b/libavcodec/adpcm.c
@@ -84,6 +84,7 @@ static const int swf_index_tables[4][16] = {
 /* end of tables */
 
 typedef struct ADPCMDecodeContext {
+    AVFrame frame;
     ADPCMChannelStatus status[6];
 } ADPCMDecodeContext;
 
@@ -124,6 +125,10 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
         break;
     }
     avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+
+    avcodec_get_frame_defaults(&c->frame);
+    avctx->coded_frame = &c->frame;
+
     return 0;
 }
 
@@ -501,9 +506,8 @@ static int get_nb_samples(AVCodecContext *avctx, const uint8_t *buf,
         decode_top_nibble_next = 1; \
     }
 
-static int adpcm_decode_frame(AVCodecContext *avctx,
-                            void *data, int *data_size,
-                            AVPacket *avpkt)
+static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
+                              int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
@@ -514,7 +518,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
     const uint8_t *src;
     int st; /* stereo */
     int count1, count2;
-    int nb_samples, coded_samples, out_bps, out_size;
+    int nb_samples, coded_samples, ret;
 
     nb_samples = get_nb_samples(avctx, buf, buf_size, &coded_samples);
     if (nb_samples <= 0) {
@@ -522,22 +526,22 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
         return AVERROR_INVALIDDATA;
     }
 
-    out_bps  = av_get_bytes_per_sample(avctx->sample_fmt);
-    out_size = nb_samples * avctx->channels * out_bps;
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    c->frame.nb_samples = nb_samples;
+    if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    samples = (short *)c->frame.data[0];
+
     /* use coded_samples when applicable */
     /* it is always <= nb_samples, so the output buffer will be large enough */
     if (coded_samples) {
         if (coded_samples != nb_samples)
             av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
-        nb_samples = coded_samples;
-        out_size = nb_samples * avctx->channels * out_bps;
+        c->frame.nb_samples = nb_samples = coded_samples;
     }
 
-    samples = data;
     src = buf;
 
     st = avctx->channels == 2 ? 1 : 0;
@@ -576,7 +580,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
                 cs->step_index = 88;
             }
 
-            samples = (short*)data + channel;
+            samples = (short *)c->frame.data[0] + channel;
 
             for (m = 0; m < 32; m++) {
                 *samples = adpcm_ima_qt_expand_nibble(cs, src[0] & 0x0F, 3);
@@ -628,7 +632,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
         }
 
         for (i = 0; i < avctx->channels; i++) {
-            samples = (short*)data + i;
+            samples = (short *)c->frame.data[0] + i;
             cs = &c->status[i];
             for (n = nb_samples >> 1; n > 0; n--, src++) {
                 uint8_t v = *src;
@@ -965,7 +969,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
             }
         }
 
-        out_size = count * 28 * avctx->channels * out_bps;
+        c->frame.nb_samples = count * 28;
         src = src_end;
         break;
     }
@@ -1144,7 +1148,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
             prev[0][i] = (int16_t)bytestream_get_be16(&src);
 
         for (ch = 0; ch <= st; ch++) {
-            samples = (unsigned short *) data + ch;
+            samples = (short *)c->frame.data[0] + ch;
 
             /* Read in every sample for this channel.  */
             for (i = 0; i < nb_samples / 14; i++) {
@@ -1177,7 +1181,10 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
     default:
         return -1;
     }
-    *data_size = out_size;
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = c->frame;
+
     return src - buf;
 }
 
@@ -1190,6 +1197,7 @@ AVCodec ff_ ## name_ ## _decoder = {                        \
     .priv_data_size = sizeof(ADPCMDecodeContext),           \
     .init           = adpcm_decode_init,                    \
     .decode         = adpcm_decode_frame,                   \
+    .capabilities   = CODEC_CAP_DR1,                        \
     .long_name      = NULL_IF_CONFIG_SMALL(long_name_),     \
 }
 
diff --git a/libavcodec/adx.h b/libavcodec/adx.h
index 572483d28f99edea1ea3903e8e89875ecb88f8bf..a14ddce4999a4dd21f43f66e4971ff6cde22b405 100644
--- a/libavcodec/adx.h
+++ b/libavcodec/adx.h
@@ -40,6 +40,7 @@ typedef struct {
 } ADXChannelState;
 
 typedef struct {
+    AVFrame frame;
     int channels;
     ADXChannelState prev[2];
     int header_parsed;
diff --git a/libavcodec/adxdec.c b/libavcodec/adxdec.c
index 0fed1220ef0d90fb08a49530be505949e1e3cd90..f049def6266bbe5443a90aa4d9692ffef1fa8378 100644
--- a/libavcodec/adxdec.c
+++ b/libavcodec/adxdec.c
@@ -50,6 +50,10 @@ static av_cold int adx_decode_init(AVCodecContext *avctx)
     c->channels = avctx->channels;
 
     avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+
+    avcodec_get_frame_defaults(&c->frame);
+    avctx->coded_frame = &c->frame;
+
     return 0;
 }
 
@@ -89,36 +93,42 @@ static int adx_decode(ADXContext *c, int16_t *out, const uint8_t *in, int ch)
     return 0;
 }
 
-static int adx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
-                            AVPacket *avpkt)
+static int adx_decode_frame(AVCodecContext *avctx, void *data,
+                            int *got_frame_ptr, AVPacket *avpkt)
 {
     int buf_size        = avpkt->size;
     ADXContext *c       = avctx->priv_data;
-    int16_t *samples    = data;
+    int16_t *samples;
     const uint8_t *buf  = avpkt->data;
-    int num_blocks, ch;
+    int num_blocks, ch, ret;
 
     if (c->eof) {
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return buf_size;
     }
 
-    /* 18 bytes of data are expanded into 32*2 bytes of audio,
-       so guard against buffer overflows */
+    /* calculate number of blocks in the packet */
     num_blocks = buf_size / (BLOCK_SIZE * c->channels);
-    if (num_blocks > *data_size / (BLOCK_SAMPLES * c->channels)) {
-        buf_size   = (*data_size / (BLOCK_SAMPLES * c->channels)) * BLOCK_SIZE;
-        num_blocks = buf_size / (BLOCK_SIZE * c->channels);
-    }
-    if (!buf_size || buf_size % (BLOCK_SIZE * avctx->channels)) {
+
+    /* if the packet is not an even multiple of BLOCK_SIZE, check for an EOF
+       packet */
+    if (!num_blocks || buf_size % (BLOCK_SIZE * avctx->channels)) {
         if (buf_size >= 4 && (AV_RB16(buf) & 0x8000)) {
             c->eof = 1;
-            *data_size = 0;
+            *got_frame_ptr = 0;
             return avpkt->size;
         }
         return AVERROR_INVALIDDATA;
     }
 
+    /* get output buffer */
+    c->frame.nb_samples = num_blocks * BLOCK_SAMPLES;
+    if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
+    }
+    samples = (int16_t *)c->frame.data[0];
+
     while (num_blocks--) {
         for (ch = 0; ch < c->channels; ch++) {
             if (adx_decode(c, samples + ch, buf, ch)) {
@@ -132,7 +142,9 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
         samples += BLOCK_SAMPLES * c->channels;
     }
 
-    *data_size = (uint8_t*)samples - (uint8_t*)data;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = c->frame;
+
     return buf - avpkt->data;
 }
 
@@ -143,5 +155,6 @@ AVCodec ff_adpcm_adx_decoder = {
     .priv_data_size = sizeof(ADXContext),
     .init           = adx_decode_init,
     .decode         = adx_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name      = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"),
 };
diff --git a/libavcodec/alac.c b/libavcodec/alac.c
index 4e143270a552e522d81477c91b92736d85cadde5..2788238c78ea93ca00ced912f4844aba0e09456f 100644
--- a/libavcodec/alac.c
+++ b/libavcodec/alac.c
@@ -62,10 +62,10 @@
 typedef struct {
 
     AVCodecContext *avctx;
+    AVFrame frame;
     GetBitContext gb;
 
     int numchannels;
-    int bytespersample;
 
     /* buffers */
     int32_t *predicterror_buffer[MAX_CHANNELS];
@@ -351,9 +351,8 @@ static void interleave_stereo_24(int32_t *buffer[MAX_CHANNELS],
     }
 }
 
-static int alac_decode_frame(AVCodecContext *avctx,
-                             void *outbuffer, int *outputsize,
-                             AVPacket *avpkt)
+static int alac_decode_frame(AVCodecContext *avctx, void *data,
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *inbuffer = avpkt->data;
     int input_buffer_size = avpkt->size;
@@ -366,7 +365,7 @@ static int alac_decode_frame(AVCodecContext *avctx,
     int isnotcompressed;
     uint8_t interlacing_shift;
     uint8_t interlacing_leftweight;
-    int i, ch;
+    int i, ch, ret;
 
     init_get_bits(&alac->gb, inbuffer, input_buffer_size * 8);
 
@@ -401,14 +400,17 @@ static int alac_decode_frame(AVCodecContext *avctx,
     } else
         outputsamples = alac->setinfo_max_samples_per_frame;
 
-    alac->bytespersample = channels * av_get_bytes_per_sample(avctx->sample_fmt);
-
-    if(outputsamples > *outputsize / alac->bytespersample){
-        av_log(avctx, AV_LOG_ERROR, "sample buffer too small\n");
-        return -1;
+    /* get output buffer */
+    if (outputsamples > INT32_MAX) {
+        av_log(avctx, AV_LOG_ERROR, "unsupported block size: %u\n", outputsamples);
+        return AVERROR_INVALIDDATA;
+    }
+    alac->frame.nb_samples = outputsamples;
+    if ((ret = avctx->get_buffer(avctx, &alac->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
 
-    *outputsize = outputsamples * alac->bytespersample;
     readsamplesize = alac->setinfo_sample_size - alac->extra_bits + channels - 1;
     if (readsamplesize > MIN_CACHE_BITS) {
         av_log(avctx, AV_LOG_ERROR, "readsamplesize too big (%d)\n", readsamplesize);
@@ -501,21 +503,23 @@ static int alac_decode_frame(AVCodecContext *avctx,
     switch(alac->setinfo_sample_size) {
     case 16:
         if (channels == 2) {
-            interleave_stereo_16(alac->outputsamples_buffer, outbuffer,
-                                 outputsamples);
+            interleave_stereo_16(alac->outputsamples_buffer,
+                                 (int16_t *)alac->frame.data[0], outputsamples);
         } else {
+            int16_t *outbuffer = (int16_t *)alac->frame.data[0];
             for (i = 0; i < outputsamples; i++) {
-                ((int16_t*)outbuffer)[i] = alac->outputsamples_buffer[0][i];
+                outbuffer[i] = alac->outputsamples_buffer[0][i];
             }
         }
         break;
     case 24:
         if (channels == 2) {
-            interleave_stereo_24(alac->outputsamples_buffer, outbuffer,
-                                 outputsamples);
+            interleave_stereo_24(alac->outputsamples_buffer,
+                                 (int32_t *)alac->frame.data[0], outputsamples);
         } else {
+            int32_t *outbuffer = (int32_t *)alac->frame.data[0];
             for (i = 0; i < outputsamples; i++)
-                ((int32_t *)outbuffer)[i] = alac->outputsamples_buffer[0][i] << 8;
+                outbuffer[i] = alac->outputsamples_buffer[0][i] << 8;
         }
         break;
     }
@@ -523,6 +527,9 @@ static int alac_decode_frame(AVCodecContext *avctx,
     if (input_buffer_size * 8 - get_bits_count(&alac->gb) > 8)
         av_log(avctx, AV_LOG_ERROR, "Error : %d bits left\n", input_buffer_size * 8 - get_bits_count(&alac->gb));
 
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = alac->frame;
+
     return input_buffer_size;
 }
 
@@ -637,6 +644,9 @@ static av_cold int alac_decode_init(AVCodecContext * avctx)
         return ret;
     }
 
+    avcodec_get_frame_defaults(&alac->frame);
+    avctx->coded_frame = &alac->frame;
+
     return 0;
 }
 
@@ -648,5 +658,6 @@ AVCodec ff_alac_decoder = {
     .init           = alac_decode_init,
     .close          = alac_decode_close,
     .decode         = alac_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"),
 };
diff --git a/libavcodec/alacenc.c b/libavcodec/alacenc.c
index c399471c1dc7f3ac5fb64baddaa2fb8631bee232..305a5b825bd5eb9e403466a26cb78bdb10bf926d 100644
--- a/libavcodec/alacenc.c
+++ b/libavcodec/alacenc.c
@@ -75,20 +75,22 @@ typedef struct AlacEncodeContext {
 } AlacEncodeContext;
 
 
-static void init_sample_buffers(AlacEncodeContext *s, const int16_t *input_samples)
+static void init_sample_buffers(AlacEncodeContext *s,
+                                const int16_t *input_samples)
 {
     int ch, i;
 
-    for(ch=0;ch<s->avctx->channels;ch++) {
+    for (ch = 0; ch < s->avctx->channels; ch++) {
         const int16_t *sptr = input_samples + ch;
-        for(i=0;i<s->avctx->frame_size;i++) {
+        for (i = 0; i < s->avctx->frame_size; i++) {
             s->sample_buf[ch][i] = *sptr;
             sptr += s->avctx->channels;
         }
     }
 }
 
-static void encode_scalar(AlacEncodeContext *s, int x, int k, int write_sample_size)
+static void encode_scalar(AlacEncodeContext *s, int x,
+                          int k, int write_sample_size)
 {
     int divisor, q, r;
 
@@ -97,17 +99,17 @@ static void encode_scalar(AlacEncodeContext *s, int x, int k, int write_sample_s
     q = x / divisor;
     r = x % divisor;
 
-    if(q > 8) {
+    if (q > 8) {
         // write escape code and sample value directly
         put_bits(&s->pbctx, 9, ALAC_ESCAPE_CODE);
         put_bits(&s->pbctx, write_sample_size, x);
     } else {
-        if(q)
+        if (q)
             put_bits(&s->pbctx, q, (1<<q) - 1);
         put_bits(&s->pbctx, 1, 0);
 
-        if(k != 1) {
-            if(r > 0)
+        if (k != 1) {
+            if (r > 0)
                 put_bits(&s->pbctx, k, r+1);
             else
                 put_bits(&s->pbctx, k-1, 0);
@@ -164,7 +166,7 @@ static int estimate_stereo_mode(int32_t *left_ch, int32_t *right_ch, int n)
 
     /* calculate sum of 2nd order residual for each channel */
     sum[0] = sum[1] = sum[2] = sum[3] = 0;
-    for(i=2; i<n; i++) {
+    for (i = 2; i < n; i++) {
         lt = left_ch[i] - 2*left_ch[i-1] + left_ch[i-2];
         rt = right_ch[i] - 2*right_ch[i-1] + right_ch[i-2];
         sum[2] += FFABS((lt + rt) >> 1);
@@ -181,8 +183,8 @@ static int estimate_stereo_mode(int32_t *left_ch, int32_t *right_ch, int n)
 
     /* return mode with lowest score */
     best = 0;
-    for(i=1; i<4; i++) {
-        if(score[i] < score[best]) {
+    for (i = 1; i < 4; i++) {
+        if (score[i] < score[best]) {
             best = i;
         }
     }
@@ -205,7 +207,7 @@ static void alac_stereo_decorrelation(AlacEncodeContext *s)
             break;
 
         case ALAC_CHMODE_LEFT_SIDE:
-            for(i=0; i<n; i++) {
+            for (i = 0; i < n; i++) {
                 right[i] = left[i] - right[i];
             }
             s->interlacing_leftweight = 1;
@@ -213,7 +215,7 @@ static void alac_stereo_decorrelation(AlacEncodeContext *s)
             break;
 
         case ALAC_CHMODE_RIGHT_SIDE:
-            for(i=0; i<n; i++) {
+            for (i = 0; i < n; i++) {
                 tmp = right[i];
                 right[i] = left[i] - right[i];
                 left[i] = tmp + (right[i] >> 31);
@@ -223,7 +225,7 @@ static void alac_stereo_decorrelation(AlacEncodeContext *s)
             break;
 
         default:
-            for(i=0; i<n; i++) {
+            for (i = 0; i < n; i++) {
                 tmp = left[i];
                 left[i] = (tmp + right[i]) >> 1;
                 right[i] = tmp - right[i];
@@ -239,10 +241,10 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch)
     int i;
     AlacLPCContext lpc = s->lpc[ch];
 
-    if(lpc.lpc_order == 31) {
+    if (lpc.lpc_order == 31) {
         s->predictor_buf[0] = s->sample_buf[ch][0];
 
-        for(i=1; i<s->avctx->frame_size; i++)
+        for (i = 1; i < s->avctx->frame_size; i++)
             s->predictor_buf[i] = s->sample_buf[ch][i] - s->sample_buf[ch][i-1];
 
         return;
@@ -250,17 +252,17 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch)
 
     // generalised linear predictor
 
-    if(lpc.lpc_order > 0) {
+    if (lpc.lpc_order > 0) {
         int32_t *samples  = s->sample_buf[ch];
         int32_t *residual = s->predictor_buf;
 
         // generate warm-up samples
         residual[0] = samples[0];
-        for(i=1;i<=lpc.lpc_order;i++)
+        for (i = 1; i <= lpc.lpc_order; i++)
             residual[i] = samples[i] - samples[i-1];
 
         // perform lpc on remaining samples
-        for(i = lpc.lpc_order + 1; i < s->avctx->frame_size; i++) {
+        for (i = lpc.lpc_order + 1; i < s->avctx->frame_size; i++) {
             int sum = 1 << (lpc.lpc_quant - 1), res_val, j;
 
             for (j = 0; j < lpc.lpc_order; j++) {
@@ -303,7 +305,7 @@ static void alac_entropy_coder(AlacEncodeContext *s)
     int sign_modifier = 0, i, k;
     int32_t *samples = s->predictor_buf;
 
-    for(i=0;i < s->avctx->frame_size;) {
+    for (i = 0; i < s->avctx->frame_size;) {
         int x;
 
         k = av_log2((history >> 9) + 3);
@@ -320,15 +322,15 @@ static void alac_entropy_coder(AlacEncodeContext *s)
                    - ((history * s->rc.history_mult) >> 9);
 
         sign_modifier = 0;
-        if(x > 0xFFFF)
+        if (x > 0xFFFF)
             history = 0xFFFF;
 
-        if((history < 128) && (i < s->avctx->frame_size)) {
+        if (history < 128 && i < s->avctx->frame_size) {
             unsigned int block_size = 0;
 
             k = 7 - av_log2(history) + ((history + 16) >> 6);
 
-            while((*samples == 0) && (i < s->avctx->frame_size)) {
+            while (*samples == 0 && i < s->avctx->frame_size) {
                 samples++;
                 i++;
                 block_size++;
@@ -347,12 +349,12 @@ static void write_compressed_frame(AlacEncodeContext *s)
 {
     int i, j;
 
-    if(s->avctx->channels == 2)
+    if (s->avctx->channels == 2)
         alac_stereo_decorrelation(s);
     put_bits(&s->pbctx, 8, s->interlacing_shift);
     put_bits(&s->pbctx, 8, s->interlacing_leftweight);
 
-    for(i=0;i<s->avctx->channels;i++) {
+    for (i = 0; i < s->avctx->channels; i++) {
 
         calc_predictor_params(s, i);
 
@@ -362,14 +364,14 @@ static void write_compressed_frame(AlacEncodeContext *s)
         put_bits(&s->pbctx, 3, s->rc.rice_modifier);
         put_bits(&s->pbctx, 5, s->lpc[i].lpc_order);
         // predictor coeff. table
-        for(j=0;j<s->lpc[i].lpc_order;j++) {
+        for (j = 0; j < s->lpc[i].lpc_order; j++) {
             put_sbits(&s->pbctx, 16, s->lpc[i].lpc_coeff[j]);
         }
     }
 
     // apply lpc and entropy coding to audio samples
 
-    for(i=0;i<s->avctx->channels;i++) {
+    for (i = 0; i < s->avctx->channels; i++) {
         alac_linear_predictor(s, i);
         alac_entropy_coder(s);
     }
@@ -384,7 +386,7 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
     avctx->frame_size      = DEFAULT_FRAME_SIZE;
     avctx->bits_per_coded_sample = DEFAULT_SAMPLE_SIZE;
 
-    if(avctx->sample_fmt != AV_SAMPLE_FMT_S16) {
+    if (avctx->sample_fmt != AV_SAMPLE_FMT_S16) {
         av_log(avctx, AV_LOG_ERROR, "only pcm_s16 input samples are supported\n");
         return -1;
     }
@@ -395,7 +397,7 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
     }
 
     // Set default compression level
-    if(avctx->compression_level == FF_COMPRESSION_DEFAULT)
+    if (avctx->compression_level == FF_COMPRESSION_DEFAULT)
         s->compression_level = 2;
     else
         s->compression_level = av_clip(avctx->compression_level, 0, 2);
@@ -416,21 +418,23 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
     AV_WB8 (alac_extradata+17, avctx->bits_per_coded_sample);
     AV_WB8 (alac_extradata+21, avctx->channels);
     AV_WB32(alac_extradata+24, s->max_coded_frame_size);
-    AV_WB32(alac_extradata+28, avctx->sample_rate*avctx->channels*avctx->bits_per_coded_sample); // average bitrate
+    AV_WB32(alac_extradata+28,
+            avctx->sample_rate * avctx->channels * avctx->bits_per_coded_sample); // average bitrate
     AV_WB32(alac_extradata+32, avctx->sample_rate);
 
     // Set relevant extradata fields
-    if(s->compression_level > 0) {
+    if (s->compression_level > 0) {
         AV_WB8(alac_extradata+18, s->rc.history_mult);
         AV_WB8(alac_extradata+19, s->rc.initial_history);
         AV_WB8(alac_extradata+20, s->rc.k_modifier);
     }
 
     s->min_prediction_order = DEFAULT_MIN_PRED_ORDER;
-    if(avctx->min_prediction_order >= 0) {
-        if(avctx->min_prediction_order < MIN_LPC_ORDER ||
+    if (avctx->min_prediction_order >= 0) {
+        if (avctx->min_prediction_order < MIN_LPC_ORDER ||
            avctx->min_prediction_order > ALAC_MAX_LPC_ORDER) {
-            av_log(avctx, AV_LOG_ERROR, "invalid min prediction order: %d\n", avctx->min_prediction_order);
+            av_log(avctx, AV_LOG_ERROR, "invalid min prediction order: %d\n",
+                   avctx->min_prediction_order);
                 return -1;
         }
 
@@ -438,18 +442,20 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
     }
 
     s->max_prediction_order = DEFAULT_MAX_PRED_ORDER;
-    if(avctx->max_prediction_order >= 0) {
-        if(avctx->max_prediction_order < MIN_LPC_ORDER ||
-           avctx->max_prediction_order > ALAC_MAX_LPC_ORDER) {
-            av_log(avctx, AV_LOG_ERROR, "invalid max prediction order: %d\n", avctx->max_prediction_order);
+    if (avctx->max_prediction_order >= 0) {
+        if (avctx->max_prediction_order < MIN_LPC_ORDER ||
+            avctx->max_prediction_order > ALAC_MAX_LPC_ORDER) {
+            av_log(avctx, AV_LOG_ERROR, "invalid max prediction order: %d\n",
+                   avctx->max_prediction_order);
                 return -1;
         }
 
         s->max_prediction_order = avctx->max_prediction_order;
     }
 
-    if(s->max_prediction_order < s->min_prediction_order) {
-        av_log(avctx, AV_LOG_ERROR, "invalid prediction orders: min=%d max=%d\n",
+    if (s->max_prediction_order < s->min_prediction_order) {
+        av_log(avctx, AV_LOG_ERROR,
+               "invalid prediction orders: min=%d max=%d\n",
                s->min_prediction_order, s->max_prediction_order);
         return -1;
     }
@@ -474,12 +480,12 @@ static int alac_encode_frame(AVCodecContext *avctx, uint8_t *frame,
     PutBitContext *pb = &s->pbctx;
     int i, out_bytes, verbatim_flag = 0;
 
-    if(avctx->frame_size > DEFAULT_FRAME_SIZE) {
+    if (avctx->frame_size > DEFAULT_FRAME_SIZE) {
         av_log(avctx, AV_LOG_ERROR, "input frame size exceeded\n");
         return -1;
     }
 
-    if(buf_size < 2*s->max_coded_frame_size) {
+    if (buf_size < 2 * s->max_coded_frame_size) {
         av_log(avctx, AV_LOG_ERROR, "buffer size is too small\n");
         return -1;
     }
@@ -487,11 +493,11 @@ static int alac_encode_frame(AVCodecContext *avctx, uint8_t *frame,
 verbatim:
     init_put_bits(pb, frame, buf_size);
 
-    if((s->compression_level == 0) || verbatim_flag) {
+    if (s->compression_level == 0 || verbatim_flag) {
         // Verbatim mode
         const int16_t *samples = data;
         write_frame_header(s, 1);
-        for(i=0; i<avctx->frame_size*avctx->channels; i++) {
+        for (i = 0; i < avctx->frame_size * avctx->channels; i++) {
             put_sbits(pb, 16, *samples++);
         }
     } else {
@@ -504,9 +510,9 @@ verbatim:
     flush_put_bits(pb);
     out_bytes = put_bits_count(pb) >> 3;
 
-    if(out_bytes > s->max_coded_frame_size) {
+    if (out_bytes > s->max_coded_frame_size) {
         /* frame too large. use verbatim mode */
-        if(verbatim_flag || (s->compression_level == 0)) {
+        if (verbatim_flag || s->compression_level == 0) {
             /* still too large. must be an error. */
             av_log(avctx, AV_LOG_ERROR, "error encoding frame\n");
             return -1;
@@ -537,6 +543,7 @@ AVCodec ff_alac_encoder = {
     .encode         = alac_encode_frame,
     .close          = alac_encode_close,
     .capabilities = CODEC_CAP_SMALL_LAST_FRAME,
-    .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE},
+    .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
+                                                  AV_SAMPLE_FMT_NONE },
     .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"),
 };
diff --git a/libavcodec/alsdec.c b/libavcodec/alsdec.c
index fc98970bd7479574f7967972693cabcc21a63009..0e5509c74a4126efe86a793d40eb19c354b826cb 100644
--- a/libavcodec/alsdec.c
+++ b/libavcodec/alsdec.c
@@ -191,6 +191,7 @@ typedef struct {
 
 typedef struct {
     AVCodecContext *avctx;
+    AVFrame frame;
     ALSSpecificConfig sconf;
     GetBitContext gb;
     DSPContext dsp;
@@ -290,7 +291,7 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
     init_get_bits(&gb, avctx->extradata, avctx->extradata_size * 8);
 
     config_offset = avpriv_mpeg4audio_get_config(&m4ac, avctx->extradata,
-                                             avctx->extradata_size);
+                                                 avctx->extradata_size * 8, 1);
 
     if (config_offset < 0)
         return -1;
@@ -1415,15 +1416,14 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
 
 /** Decode an ALS frame.
  */
-static int decode_frame(AVCodecContext *avctx,
-                        void *data, int *data_size,
+static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
                         AVPacket *avpkt)
 {
     ALSDecContext *ctx       = avctx->priv_data;
     ALSSpecificConfig *sconf = &ctx->sconf;
     const uint8_t *buffer    = avpkt->data;
     int buffer_size          = avpkt->size;
-    int invalid_frame, size;
+    int invalid_frame, ret;
     unsigned int c, sample, ra_frame, bytes_read, shift;
 
     init_get_bits(&ctx->gb, buffer, buffer_size * 8);
@@ -1448,21 +1448,17 @@ static int decode_frame(AVCodecContext *avctx,
 
     ctx->frame_id++;
 
-    // check for size of decoded data
-    size = ctx->cur_frame_length * avctx->channels *
-           av_get_bytes_per_sample(avctx->sample_fmt);
-
-    if (size > *data_size) {
-        av_log(avctx, AV_LOG_ERROR, "Decoded data exceeds buffer size.\n");
-        return -1;
+    /* get output buffer */
+    ctx->frame.nb_samples = ctx->cur_frame_length;
+    if ((ret = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
 
-    *data_size = size;
-
     // transform decoded frame into output format
     #define INTERLEAVE_OUTPUT(bps)                                 \
     {                                                              \
-        int##bps##_t *dest = (int##bps##_t*) data;                 \
+        int##bps##_t *dest = (int##bps##_t*)ctx->frame.data[0];    \
         shift = bps - ctx->avctx->bits_per_raw_sample;             \
         for (sample = 0; sample < ctx->cur_frame_length; sample++) \
             for (c = 0; c < avctx->channels; c++)                  \
@@ -1480,7 +1476,7 @@ static int decode_frame(AVCodecContext *avctx,
         int swap = HAVE_BIGENDIAN != sconf->msb_first;
 
         if (ctx->avctx->bits_per_raw_sample == 24) {
-            int32_t *src = data;
+            int32_t *src = (int32_t *)ctx->frame.data[0];
 
             for (sample = 0;
                  sample < ctx->cur_frame_length * avctx->channels;
@@ -1501,22 +1497,25 @@ static int decode_frame(AVCodecContext *avctx,
 
             if (swap) {
                 if (ctx->avctx->bits_per_raw_sample <= 16) {
-                    int16_t *src  = (int16_t*) data;
+                    int16_t *src  = (int16_t*) ctx->frame.data[0];
                     int16_t *dest = (int16_t*) ctx->crc_buffer;
                     for (sample = 0;
                          sample < ctx->cur_frame_length * avctx->channels;
                          sample++)
                         *dest++ = av_bswap16(src[sample]);
                 } else {
-                    ctx->dsp.bswap_buf((uint32_t*)ctx->crc_buffer, data,
+                    ctx->dsp.bswap_buf((uint32_t*)ctx->crc_buffer,
+                                       (uint32_t *)ctx->frame.data[0],
                                        ctx->cur_frame_length * avctx->channels);
                 }
                 crc_source = ctx->crc_buffer;
             } else {
-                crc_source = data;
+                crc_source = ctx->frame.data[0];
             }
 
-            ctx->crc = av_crc(ctx->crc_table, ctx->crc, crc_source, size);
+            ctx->crc = av_crc(ctx->crc_table, ctx->crc, crc_source,
+                              ctx->cur_frame_length * avctx->channels *
+                              av_get_bytes_per_sample(avctx->sample_fmt));
         }
 
 
@@ -1527,6 +1526,9 @@ static int decode_frame(AVCodecContext *avctx,
         }
     }
 
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = ctx->frame;
+
 
     bytes_read = invalid_frame ? buffer_size :
                                  (get_bits_count(&ctx->gb) + 7) >> 3;
@@ -1724,6 +1726,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
 
     dsputil_init(&ctx->dsp, avctx);
 
+    avcodec_get_frame_defaults(&ctx->frame);
+    avctx->coded_frame = &ctx->frame;
+
     return 0;
 }
 
@@ -1747,7 +1752,7 @@ AVCodec ff_als_decoder = {
     .close          = decode_end,
     .decode         = decode_frame,
     .flush = flush,
-    .capabilities = CODEC_CAP_SUBFRAMES,
+    .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Audio Lossless Coding (ALS)"),
 };
 
diff --git a/libavcodec/amrnbdec.c b/libavcodec/amrnbdec.c
index b8d826e13903db934c2f2330b50f989e6dcf6003..0a4a7e6dda569615d663cd6b2e656cadc66f2fee 100644
--- a/libavcodec/amrnbdec.c
+++ b/libavcodec/amrnbdec.c
@@ -95,6 +95,7 @@
 #define AMR_AGC_ALPHA      0.9
 
 typedef struct AMRContext {
+    AVFrame                         avframe; ///< AVFrame for decoded samples
     AMRNBFrame                        frame; ///< decoded AMR parameters (lsf coefficients, codebook indexes, etc)
     uint8_t             bad_frame_indicator; ///< bad frame ? 1 : 0
     enum Mode                cur_frame_mode;
@@ -167,6 +168,9 @@ static av_cold int amrnb_decode_init(AVCodecContext *avctx)
     for (i = 0; i < 4; i++)
         p->prediction_error[i] = MIN_ENERGY;
 
+    avcodec_get_frame_defaults(&p->avframe);
+    avctx->coded_frame = &p->avframe;
+
     return 0;
 }
 
@@ -919,21 +923,29 @@ static void postfilter(AMRContext *p, float *lpc, float *buf_out)
 
 /// @}
 
-static int amrnb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
-                              AVPacket *avpkt)
+static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
+                              int *got_frame_ptr, AVPacket *avpkt)
 {
 
     AMRContext *p = avctx->priv_data;        // pointer to private data
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
-    float *buf_out = data;                   // pointer to the output data buffer
-    int i, subframe;
+    float *buf_out;                          // pointer to the output data buffer
+    int i, subframe, ret;
     float fixed_gain_factor;
     AMRFixed fixed_sparse = {0};             // fixed vector up to anti-sparseness processing
     float spare_vector[AMR_SUBFRAME_SIZE];   // extra stack space to hold result from anti-sparseness processing
     float synth_fixed_gain;                  // the fixed gain that synthesis should use
     const float *synth_fixed_vector;         // pointer to the fixed vector that synthesis should use
 
+    /* get output buffer */
+    p->avframe.nb_samples = AMR_BLOCK_SIZE;
+    if ((ret = avctx->get_buffer(avctx, &p->avframe)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
+    }
+    buf_out = (float *)p->avframe.data[0];
+
     p->cur_frame_mode = unpack_bitstream(p, buf, buf_size);
     if (p->cur_frame_mode == MODE_DTX) {
         av_log_missing_feature(avctx, "dtx mode", 0);
@@ -1029,8 +1041,8 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
     ff_weighted_vector_sumf(p->lsf_avg, p->lsf_avg, p->lsf_q[3],
                             0.84, 0.16, LP_FILTER_ORDER);
 
-    /* report how many samples we got */
-    *data_size = AMR_BLOCK_SIZE * sizeof(float);
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = p->avframe;
 
     /* return the amount of bytes consumed if everything was OK */
     return frame_sizes_nb[p->cur_frame_mode] + 1; // +7 for rounding and +8 for TOC
@@ -1044,6 +1056,7 @@ AVCodec ff_amrnb_decoder = {
     .priv_data_size = sizeof(AMRContext),
     .init           = amrnb_decode_init,
     .decode         = amrnb_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name      = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate NarrowBand"),
     .sample_fmts    = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
 };
diff --git a/libavcodec/amrwbdec.c b/libavcodec/amrwbdec.c
index fa3f8dd05058886a493d277b3d1b17c4ae572c76..b0e54f9261509ecfff2ab6aac9c54692c34b86f0 100644
--- a/libavcodec/amrwbdec.c
+++ b/libavcodec/amrwbdec.c
@@ -41,6 +41,7 @@
 #include "amrwbdata.h"
 
 typedef struct {
+    AVFrame                              avframe; ///< AVFrame for decoded samples
     AMRWBFrame                             frame; ///< AMRWB parameters decoded from bitstream
     enum Mode                        fr_cur_mode; ///< mode index of current frame
     uint8_t                           fr_quality; ///< frame quality index (FQI)
@@ -102,6 +103,9 @@ static av_cold int amrwb_decode_init(AVCodecContext *avctx)
     for (i = 0; i < 4; i++)
         ctx->prediction_error[i] = MIN_ENERGY;
 
+    avcodec_get_frame_defaults(&ctx->avframe);
+    avctx->coded_frame = &ctx->avframe;
+
     return 0;
 }
 
@@ -1062,15 +1066,15 @@ static void update_sub_state(AMRWBContext *ctx)
             LP_ORDER_16k * sizeof(float));
 }
 
-static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
-                              AVPacket *avpkt)
+static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
+                              int *got_frame_ptr, AVPacket *avpkt)
 {
     AMRWBContext *ctx  = avctx->priv_data;
     AMRWBFrame   *cf   = &ctx->frame;
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
     int expected_fr_size, header_size;
-    float *buf_out = data;
+    float *buf_out;
     float spare_vector[AMRWB_SFR_SIZE];      // extra stack space to hold result from anti-sparseness processing
     float fixed_gain_factor;                 // fixed gain correction factor (gamma)
     float *synth_fixed_vector;               // pointer to the fixed vector that synthesis should use
@@ -1080,7 +1084,15 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
     float hb_exc[AMRWB_SFR_SIZE_16k];        // excitation for the high frequency band
     float hb_samples[AMRWB_SFR_SIZE_16k];    // filtered high-band samples from synthesis
     float hb_gain;
-    int sub, i;
+    int sub, i, ret;
+
+    /* get output buffer */
+    ctx->avframe.nb_samples = 4 * AMRWB_SFR_SIZE_16k;
+    if ((ret = avctx->get_buffer(avctx, &ctx->avframe)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
+    }
+    buf_out = (float *)ctx->avframe.data[0];
 
     header_size      = decode_mime_header(ctx, buf);
     expected_fr_size = ((cf_sizes_wb[ctx->fr_cur_mode] + 7) >> 3) + 1;
@@ -1088,7 +1100,7 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
     if (buf_size < expected_fr_size) {
         av_log(avctx, AV_LOG_ERROR,
             "Frame too small (%d bytes). Truncated file?\n", buf_size);
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return buf_size;
     }
 
@@ -1219,8 +1231,8 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
     memcpy(ctx->isp_sub4_past, ctx->isp[3], LP_ORDER * sizeof(ctx->isp[3][0]));
     memcpy(ctx->isf_past_final, ctx->isf_cur, LP_ORDER * sizeof(float));
 
-    /* report how many samples we got */
-    *data_size = 4 * AMRWB_SFR_SIZE_16k * sizeof(float);
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = ctx->avframe;
 
     return expected_fr_size;
 }
@@ -1232,6 +1244,7 @@ AVCodec ff_amrwb_decoder = {
     .priv_data_size = sizeof(AMRWBContext),
     .init           = amrwb_decode_init,
     .decode         = amrwb_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name      = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate WideBand"),
     .sample_fmts    = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
 };
diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c
index 10f75212ff79a2a0b1f8a2b68a5bcc13f519ffca..df7529835ce9819d273ac361a280278bf4dd6d25 100644
--- a/libavcodec/apedec.c
+++ b/libavcodec/apedec.c
@@ -129,6 +129,7 @@ typedef struct APEPredictor {
 /** Decoder context */
 typedef struct APEContext {
     AVCodecContext *avctx;
+    AVFrame frame;
     DSPContext dsp;
     int channels;
     int samples;                             ///< samples left to decode in current frame
@@ -215,6 +216,10 @@ static av_cold int ape_decode_init(AVCodecContext *avctx)
     dsputil_init(&s->dsp, avctx);
     avctx->sample_fmt = AV_SAMPLE_FMT_S16;
     avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
+
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 filter_alloc_fail:
     ape_decode_close(avctx);
@@ -805,16 +810,15 @@ static void ape_unpack_stereo(APEContext *ctx, int count)
     }
 }
 
-static int ape_decode_frame(AVCodecContext *avctx,
-                            void *data, int *data_size,
-                            AVPacket *avpkt)
+static int ape_decode_frame(AVCodecContext *avctx, void *data,
+                            int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     APEContext *s = avctx->priv_data;
-    int16_t *samples = data;
-    int i;
-    int blockstodecode, out_size;
+    int16_t *samples;
+    int i, ret;
+    int blockstodecode;
     int bytes_used = 0;
 
     /* this should never be negative, but bad things will happen if it is, so
@@ -826,7 +830,7 @@ static int ape_decode_frame(AVCodecContext *avctx,
         void *tmp_data;
 
         if (!buf_size) {
-            *data_size = 0;
+            *got_frame_ptr = 0;
             return 0;
         }
         if (buf_size < 8) {
@@ -874,18 +878,19 @@ static int ape_decode_frame(AVCodecContext *avctx,
     }
 
     if (!s->data) {
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return buf_size;
     }
 
     blockstodecode = FFMIN(BLOCKS_PER_LOOP, s->samples);
 
-    out_size = blockstodecode * avctx->channels *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small.\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    s->frame.nb_samples = blockstodecode;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    samples = (int16_t *)s->frame.data[0];
 
     s->error=0;
 
@@ -909,7 +914,9 @@ static int ape_decode_frame(AVCodecContext *avctx,
 
     s->samples -= blockstodecode;
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return bytes_used;
 }
 
@@ -927,7 +934,7 @@ AVCodec ff_ape_decoder = {
     .init           = ape_decode_init,
     .close          = ape_decode_close,
     .decode         = ape_decode_frame,
-    .capabilities   = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY,
+    .capabilities   = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1,
     .flush = ape_flush,
     .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
 };
diff --git a/libavcodec/arm/h264dsp_neon.S b/libavcodec/arm/h264dsp_neon.S
index adc21f9500c974d1a35da12327cd6b8f79a22ca1..9f4da2cb7b38f69e2bc94b26b1c61c6006a924d3 100644
--- a/libavcodec/arm/h264dsp_neon.S
+++ b/libavcodec/arm/h264dsp_neon.S
@@ -19,55 +19,16 @@
  */
 
 #include "asm.S"
-
-        .macro transpose_8x8 r0 r1 r2 r3 r4 r5 r6 r7
-        vtrn.32         \r0, \r4
-        vtrn.32         \r1, \r5
-        vtrn.32         \r2, \r6
-        vtrn.32         \r3, \r7
-        vtrn.16         \r0, \r2
-        vtrn.16         \r1, \r3
-        vtrn.16         \r4, \r6
-        vtrn.16         \r5, \r7
-        vtrn.8          \r0, \r1
-        vtrn.8          \r2, \r3
-        vtrn.8          \r4, \r5
-        vtrn.8          \r6, \r7
-        .endm
-
-        .macro transpose_4x4 r0 r1 r2 r3
-        vtrn.16         \r0, \r2
-        vtrn.16         \r1, \r3
-        vtrn.8          \r0, \r1
-        vtrn.8          \r2, \r3
-        .endm
-
-        .macro swap4 r0 r1 r2 r3 r4 r5 r6 r7
-        vswp            \r0, \r4
-        vswp            \r1, \r5
-        vswp            \r2, \r6
-        vswp            \r3, \r7
-        .endm
-
-        .macro transpose16_4x4 r0 r1 r2 r3 r4 r5 r6 r7
-        vtrn.32         \r0, \r2
-        vtrn.32         \r1, \r3
-        vtrn.32         \r4, \r6
-        vtrn.32         \r5, \r7
-        vtrn.16         \r0, \r1
-        vtrn.16         \r2, \r3
-        vtrn.16         \r4, \r5
-        vtrn.16         \r6, \r7
-        .endm
+#include "neon.S"
 
 /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
-        .macro  h264_chroma_mc8 type
+.macro  h264_chroma_mc8 type
 function ff_\type\()_h264_chroma_mc8_neon, export=1
         push            {r4-r7, lr}
         ldrd            r4,  [sp, #20]
-.ifc \type,avg
+  .ifc \type,avg
         mov             lr,  r0
-.endif
+  .endif
         pld             [r1]
         pld             [r1, r2]
 
@@ -75,7 +36,7 @@ A       muls            r7,  r4,  r5
 T       mul             r7,  r4,  r5
 T       cmp             r7,  #0
         rsb             r6,  r7,  r5,  lsl #3
-        rsb             ip,  r7,  r4,  lsl #3
+        rsb             r12, r7,  r4,  lsl #3
         sub             r4,  r7,  r4,  lsl #3
         sub             r4,  r4,  r5,  lsl #3
         add             r4,  r4,  #64
@@ -86,10 +47,10 @@ T       cmp             r7,  #0
 
         vdup.8          d0,  r4
         lsl             r4,  r2,  #1
-        vdup.8          d1,  ip
-        vld1.64         {d4, d5}, [r1], r4
+        vdup.8          d1,  r12
+        vld1.8          {d4, d5}, [r1], r4
         vdup.8          d2,  r6
-        vld1.64         {d6, d7}, [r5], r4
+        vld1.8          {d6, d7}, [r5], r4
         vdup.8          d3,  r7
 
         vext.8          d5,  d4,  d5,  #1
@@ -98,7 +59,7 @@ T       cmp             r7,  #0
 1:      pld             [r5]
         vmull.u8        q8,  d4,  d0
         vmlal.u8        q8,  d5,  d1
-        vld1.64         {d4, d5}, [r1], r4
+        vld1.8          {d4, d5}, [r1], r4
         vmlal.u8        q8,  d6,  d2
         vext.8          d5,  d4,  d5,  #1
         vmlal.u8        q8,  d7,  d3
@@ -108,57 +69,57 @@ T       cmp             r7,  #0
         vmlal.u8        q9,  d4,  d2
         vmlal.u8        q9,  d5,  d3
         vrshrn.u16      d16, q8,  #6
-        vld1.64         {d6, d7}, [r5], r4
+        vld1.8          {d6, d7}, [r5], r4
         pld             [r1]
         vrshrn.u16      d17, q9,  #6
-.ifc \type,avg
-        vld1.64         {d20}, [lr,:64], r2
-        vld1.64         {d21}, [lr,:64], r2
+  .ifc \type,avg
+        vld1.8          {d20}, [lr,:64], r2
+        vld1.8          {d21}, [lr,:64], r2
         vrhadd.u8       q8,  q8,  q10
-.endif
+  .endif
         vext.8          d7,  d6,  d7,  #1
-        vst1.64         {d16}, [r0,:64], r2
-        vst1.64         {d17}, [r0,:64], r2
+        vst1.8          {d16}, [r0,:64], r2
+        vst1.8          {d17}, [r0,:64], r2
         bgt             1b
 
         pop             {r4-r7, pc}
 
 2:      tst             r6,  r6
-        add             ip,  ip,  r6
+        add             r12, r12, r6
         vdup.8          d0,  r4
-        vdup.8          d1,  ip
+        vdup.8          d1,  r12
 
         beq             4f
 
         add             r5,  r1,  r2
         lsl             r4,  r2,  #1
-        vld1.64         {d4}, [r1], r4
-        vld1.64         {d6}, [r5], r4
+        vld1.8          {d4}, [r1], r4
+        vld1.8          {d6}, [r5], r4
 
 3:      pld             [r5]
         vmull.u8        q8,  d4,  d0
         vmlal.u8        q8,  d6,  d1
-        vld1.64         {d4}, [r1], r4
+        vld1.8          {d4}, [r1], r4
         vmull.u8        q9,  d6,  d0
         vmlal.u8        q9,  d4,  d1
-        vld1.64         {d6}, [r5], r4
+        vld1.8          {d6}, [r5], r4
         vrshrn.u16      d16, q8,  #6
         vrshrn.u16      d17, q9,  #6
-.ifc \type,avg
-        vld1.64         {d20}, [lr,:64], r2
-        vld1.64         {d21}, [lr,:64], r2
+  .ifc \type,avg
+        vld1.8          {d20}, [lr,:64], r2
+        vld1.8          {d21}, [lr,:64], r2
         vrhadd.u8       q8,  q8,  q10
-.endif
+  .endif
         subs            r3,  r3,  #2
         pld             [r1]
-        vst1.64         {d16}, [r0,:64], r2
-        vst1.64         {d17}, [r0,:64], r2
+        vst1.8          {d16}, [r0,:64], r2
+        vst1.8          {d17}, [r0,:64], r2
         bgt             3b
 
         pop             {r4-r7, pc}
 
-4:      vld1.64         {d4, d5}, [r1], r2
-        vld1.64         {d6, d7}, [r1], r2
+4:      vld1.8          {d4, d5}, [r1], r2
+        vld1.8          {d6, d7}, [r1], r2
         vext.8          d5,  d4,  d5,  #1
         vext.8          d7,  d6,  d7,  #1
 
@@ -166,36 +127,36 @@ T       cmp             r7,  #0
         subs            r3,  r3,  #2
         vmull.u8        q8,  d4,  d0
         vmlal.u8        q8,  d5,  d1
-        vld1.64         {d4, d5}, [r1], r2
+        vld1.8          {d4, d5}, [r1], r2
         vmull.u8        q9,  d6,  d0
         vmlal.u8        q9,  d7,  d1
         pld             [r1]
         vext.8          d5,  d4,  d5,  #1
         vrshrn.u16      d16, q8,  #6
         vrshrn.u16      d17, q9,  #6
-.ifc \type,avg
-        vld1.64         {d20}, [lr,:64], r2
-        vld1.64         {d21}, [lr,:64], r2
+  .ifc \type,avg
+        vld1.8          {d20}, [lr,:64], r2
+        vld1.8          {d21}, [lr,:64], r2
         vrhadd.u8       q8,  q8,  q10
-.endif
-        vld1.64         {d6, d7}, [r1], r2
+  .endif
+        vld1.8          {d6, d7}, [r1], r2
         vext.8          d7,  d6,  d7,  #1
-        vst1.64         {d16}, [r0,:64], r2
-        vst1.64         {d17}, [r0,:64], r2
+        vst1.8          {d16}, [r0,:64], r2
+        vst1.8          {d17}, [r0,:64], r2
         bgt             5b
 
         pop             {r4-r7, pc}
 endfunc
-        .endm
+.endm
 
 /* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
-        .macro  h264_chroma_mc4 type
+.macro  h264_chroma_mc4 type
 function ff_\type\()_h264_chroma_mc4_neon, export=1
         push            {r4-r7, lr}
         ldrd            r4,  [sp, #20]
-.ifc \type,avg
+  .ifc \type,avg
         mov             lr,  r0
-.endif
+  .endif
         pld             [r1]
         pld             [r1, r2]
 
@@ -203,7 +164,7 @@ A       muls            r7,  r4,  r5
 T       mul             r7,  r4,  r5
 T       cmp             r7,  #0
         rsb             r6,  r7,  r5,  lsl #3
-        rsb             ip,  r7,  r4,  lsl #3
+        rsb             r12, r7,  r4,  lsl #3
         sub             r4,  r7,  r4,  lsl #3
         sub             r4,  r4,  r5,  lsl #3
         add             r4,  r4,  #64
@@ -214,10 +175,10 @@ T       cmp             r7,  #0
 
         vdup.8          d0,  r4
         lsl             r4,  r2,  #1
-        vdup.8          d1,  ip
-        vld1.64         {d4},     [r1], r4
+        vdup.8          d1,  r12
+        vld1.8          {d4},     [r1], r4
         vdup.8          d2,  r6
-        vld1.64         {d6},     [r5], r4
+        vld1.8          {d6},     [r5], r4
         vdup.8          d3,  r7
 
         vext.8          d5,  d4,  d5,  #1
@@ -231,22 +192,22 @@ T       cmp             r7,  #0
 1:      pld             [r5]
         vmull.u8        q8,  d4,  d0
         vmlal.u8        q8,  d6,  d2
-        vld1.64         {d4},     [r1], r4
+        vld1.8          {d4},     [r1], r4
         vext.8          d5,  d4,  d5,  #1
         vtrn.32         d4,  d5
         vmull.u8        q9,  d6,  d0
         vmlal.u8        q9,  d4,  d2
-        vld1.64         {d6},     [r5], r4
+        vld1.8          {d6},     [r5], r4
         vadd.i16        d16, d16, d17
         vadd.i16        d17, d18, d19
         vrshrn.u16      d16, q8,  #6
         subs            r3,  r3,  #2
         pld             [r1]
-.ifc \type,avg
+  .ifc \type,avg
         vld1.32         {d20[0]}, [lr,:32], r2
         vld1.32         {d20[1]}, [lr,:32], r2
         vrhadd.u8       d16, d16, d20
-.endif
+  .endif
         vext.8          d7,  d6,  d7,  #1
         vtrn.32         d6,  d7
         vst1.32         {d16[0]}, [r0,:32], r2
@@ -256,9 +217,9 @@ T       cmp             r7,  #0
         pop             {r4-r7, pc}
 
 2:      tst             r6,  r6
-        add             ip,  ip,  r6
+        add             r12, r12, r6
         vdup.8          d0,  r4
-        vdup.8          d1,  ip
+        vdup.8          d1,  r12
         vtrn.32         d0,  d1
 
         beq             4f
@@ -277,11 +238,11 @@ T       cmp             r7,  #0
         vadd.i16        d16, d16, d17
         vadd.i16        d17, d18, d19
         vrshrn.u16      d16, q8,  #6
-.ifc \type,avg
+  .ifc \type,avg
         vld1.32         {d20[0]}, [lr,:32], r2
         vld1.32         {d20[1]}, [lr,:32], r2
         vrhadd.u8       d16, d16, d20
-.endif
+  .endif
         subs            r3,  r3,  #2
         pld             [r1]
         vst1.32         {d16[0]}, [r0,:32], r2
@@ -290,8 +251,8 @@ T       cmp             r7,  #0
 
         pop             {r4-r7, pc}
 
-4:      vld1.64         {d4},     [r1], r2
-        vld1.64         {d6},     [r1], r2
+4:      vld1.8          {d4},     [r1], r2
+        vld1.8          {d6},     [r1], r2
         vext.8          d5,  d4,  d5,  #1
         vext.8          d7,  d6,  d7,  #1
         vtrn.32         d4,  d5
@@ -300,19 +261,19 @@ T       cmp             r7,  #0
 5:      vmull.u8        q8,  d4,  d0
         vmull.u8        q9,  d6,  d0
         subs            r3,  r3,  #2
-        vld1.64         {d4},     [r1], r2
+        vld1.8          {d4},     [r1], r2
         vext.8          d5,  d4,  d5,  #1
         vtrn.32         d4,  d5
         vadd.i16        d16, d16, d17
         vadd.i16        d17, d18, d19
         pld             [r1]
         vrshrn.u16      d16, q8,  #6
-.ifc \type,avg
+  .ifc \type,avg
         vld1.32         {d20[0]}, [lr,:32], r2
         vld1.32         {d20[1]}, [lr,:32], r2
         vrhadd.u8       d16, d16, d20
-.endif
-        vld1.64         {d6},     [r1], r2
+  .endif
+        vld1.8          {d6},     [r1], r2
         vext.8          d7,  d6,  d7,  #1
         vtrn.32         d6,  d7
         pld             [r1]
@@ -322,9 +283,9 @@ T       cmp             r7,  #0
 
         pop             {r4-r7, pc}
 endfunc
-        .endm
+.endm
 
-        .macro  h264_chroma_mc2 type
+.macro  h264_chroma_mc2 type
 function ff_\type\()_h264_chroma_mc2_neon, export=1
         push            {r4-r6, lr}
         ldr             r4,  [sp, #16]
@@ -354,29 +315,29 @@ function ff_\type\()_h264_chroma_mc2_neon, export=1
         vtrn.16         q2,  q3
         vmull.u8        q8,  d4,  d0
         vmlal.u8        q8,  d5,  d1
-.ifc \type,avg
+  .ifc \type,avg
         vld1.16         {d18[0]}, [r0,:16], r2
         vld1.16         {d18[1]}, [r0,:16]
         sub             r0,  r0,  r2
-.endif
+  .endif
         vtrn.32         d16, d17
         vadd.i16        d16, d16, d17
         vrshrn.u16      d16, q8,  #6
-.ifc \type,avg
+  .ifc \type,avg
         vrhadd.u8       d16, d16, d18
-.endif
+  .endif
         vst1.16         {d16[0]}, [r0,:16], r2
         vst1.16         {d16[1]}, [r0,:16], r2
         subs            r3,  r3,  #2
         bgt             1b
         pop             {r4-r6, pc}
 2:
-.ifc \type,put
+  .ifc \type,put
         ldrh_post       r5,  r1,  r2
         strh_post       r5,  r0,  r2
         ldrh_post       r6,  r1,  r2
         strh_post       r6,  r0,  r2
-.else
+  .else
         vld1.16         {d16[0]}, [r1], r2
         vld1.16         {d16[1]}, [r1], r2
         vld1.16         {d18[0]}, [r0,:16], r2
@@ -385,7 +346,7 @@ function ff_\type\()_h264_chroma_mc2_neon, export=1
         vrhadd.u8       d16, d16, d18
         vst1.16         {d16[0]}, [r0,:16], r2
         vst1.16         {d16[1]}, [r0,:16], r2
-.endif
+  .endif
         subs            r3,  r3,  #2
         bgt             2b
         pop             {r4-r6, pc}
@@ -401,22 +362,22 @@ endfunc
 
         /* H.264 loop filter */
 
-        .macro h264_loop_filter_start
-        ldr             ip,  [sp]
+.macro  h264_loop_filter_start
+        ldr             r12, [sp]
         tst             r2,  r2
-        ldr             ip,  [ip]
+        ldr             r12, [r12]
         it              ne
         tstne           r3,  r3
-        vmov.32         d24[0], ip
-        and             ip,  ip,  ip, lsl #16
+        vmov.32         d24[0], r12
+        and             r12, r12, r12, lsl #16
         it              eq
         bxeq            lr
-        ands            ip,  ip,  ip, lsl #8
+        ands            r12, r12, r12, lsl #8
         it              lt
         bxlt            lr
-        .endm
+.endm
 
-        .macro h264_loop_filter_luma
+.macro  h264_loop_filter_luma
         vdup.8          q11, r2         @ alpha
         vmovl.u8        q12, d24
         vabd.u8         q6,  q8,  q0    @ abs(p0 - q0)
@@ -482,29 +443,29 @@ endfunc
         vqmovun.s16     d17, q6
         vqmovun.s16     d0,  q11
         vqmovun.s16     d1,  q12
-        .endm
+.endm
 
 function ff_h264_v_loop_filter_luma_neon, export=1
         h264_loop_filter_start
 
-        vld1.64         {d0, d1},  [r0,:128], r1
-        vld1.64         {d2, d3},  [r0,:128], r1
-        vld1.64         {d4, d5},  [r0,:128], r1
+        vld1.8          {d0, d1},  [r0,:128], r1
+        vld1.8          {d2, d3},  [r0,:128], r1
+        vld1.8          {d4, d5},  [r0,:128], r1
         sub             r0,  r0,  r1, lsl #2
         sub             r0,  r0,  r1, lsl #1
-        vld1.64         {d20,d21}, [r0,:128], r1
-        vld1.64         {d18,d19}, [r0,:128], r1
-        vld1.64         {d16,d17}, [r0,:128], r1
+        vld1.8          {d20,d21}, [r0,:128], r1
+        vld1.8          {d18,d19}, [r0,:128], r1
+        vld1.8          {d16,d17}, [r0,:128], r1
 
         vpush           {d8-d15}
 
         h264_loop_filter_luma
 
         sub             r0,  r0,  r1, lsl #1
-        vst1.64         {d8, d9},  [r0,:128], r1
-        vst1.64         {d16,d17}, [r0,:128], r1
-        vst1.64         {d0, d1},  [r0,:128], r1
-        vst1.64         {d10,d11}, [r0,:128]
+        vst1.8          {d8, d9},  [r0,:128], r1
+        vst1.8          {d16,d17}, [r0,:128], r1
+        vst1.8          {d0, d1},  [r0,:128], r1
+        vst1.8          {d10,d11}, [r0,:128]
 
         vpop            {d8-d15}
         bx              lr
@@ -514,22 +475,22 @@ function ff_h264_h_loop_filter_luma_neon, export=1
         h264_loop_filter_start
 
         sub             r0,  r0,  #4
-        vld1.64         {d6},  [r0], r1
-        vld1.64         {d20}, [r0], r1
-        vld1.64         {d18}, [r0], r1
-        vld1.64         {d16}, [r0], r1
-        vld1.64         {d0},  [r0], r1
-        vld1.64         {d2},  [r0], r1
-        vld1.64         {d4},  [r0], r1
-        vld1.64         {d26}, [r0], r1
-        vld1.64         {d7},  [r0], r1
-        vld1.64         {d21}, [r0], r1
-        vld1.64         {d19}, [r0], r1
-        vld1.64         {d17}, [r0], r1
-        vld1.64         {d1},  [r0], r1
-        vld1.64         {d3},  [r0], r1
-        vld1.64         {d5},  [r0], r1
-        vld1.64         {d27}, [r0], r1
+        vld1.8          {d6},  [r0], r1
+        vld1.8          {d20}, [r0], r1
+        vld1.8          {d18}, [r0], r1
+        vld1.8          {d16}, [r0], r1
+        vld1.8          {d0},  [r0], r1
+        vld1.8          {d2},  [r0], r1
+        vld1.8          {d4},  [r0], r1
+        vld1.8          {d26}, [r0], r1
+        vld1.8          {d7},  [r0], r1
+        vld1.8          {d21}, [r0], r1
+        vld1.8          {d19}, [r0], r1
+        vld1.8          {d17}, [r0], r1
+        vld1.8          {d1},  [r0], r1
+        vld1.8          {d3},  [r0], r1
+        vld1.8          {d5},  [r0], r1
+        vld1.8          {d27}, [r0], r1
 
         transpose_8x8   q3, q10, q9, q8, q0, q1, q2, q13
 
@@ -562,7 +523,7 @@ function ff_h264_h_loop_filter_luma_neon, export=1
         bx              lr
 endfunc
 
-        .macro h264_loop_filter_chroma
+.macro  h264_loop_filter_chroma
         vdup.8          d22, r2         @ alpha
         vmovl.u8        q12, d24
         vabd.u8         d26, d16, d0    @ abs(p0 - q0)
@@ -591,22 +552,22 @@ endfunc
         vsubw.s8        q11, q11, d4
         vqmovun.s16     d16, q14
         vqmovun.s16     d0,  q11
-        .endm
+.endm
 
 function ff_h264_v_loop_filter_chroma_neon, export=1
         h264_loop_filter_start
 
         sub             r0,  r0,  r1, lsl #1
-        vld1.64         {d18}, [r0,:64], r1
-        vld1.64         {d16}, [r0,:64], r1
-        vld1.64         {d0},  [r0,:64], r1
-        vld1.64         {d2},  [r0,:64]
+        vld1.8          {d18}, [r0,:64], r1
+        vld1.8          {d16}, [r0,:64], r1
+        vld1.8          {d0},  [r0,:64], r1
+        vld1.8          {d2},  [r0,:64]
 
         h264_loop_filter_chroma
 
         sub             r0,  r0,  r1, lsl #1
-        vst1.64         {d16}, [r0,:64], r1
-        vst1.64         {d0},  [r0,:64], r1
+        vst1.8          {d16}, [r0,:64], r1
+        vst1.8          {d0},  [r0,:64], r1
 
         bx              lr
 endfunc
@@ -651,20 +612,20 @@ endfunc
 
         /* H.264 qpel MC */
 
-        .macro  lowpass_const r
+.macro  lowpass_const   r
         movw            \r,  #5
         movt            \r,  #20
         vmov.32         d6[0], \r
-        .endm
+.endm
 
-        .macro  lowpass_8 r0, r1, r2, r3, d0, d1, narrow=1
-.if \narrow
+.macro  lowpass_8       r0,  r1,  r2,  r3,  d0,  d1,  narrow=1
+  .if \narrow
         t0 .req q0
         t1 .req q8
-.else
+  .else
         t0 .req \d0
         t1 .req \d1
-.endif
+  .endif
         vext.8          d2,  \r0, \r1, #2
         vext.8          d3,  \r0, \r1, #3
         vaddl.u8        q1,  d2,  d3
@@ -685,20 +646,20 @@ endfunc
         vaddl.u8        t1,  \r2, d31
         vmla.i16        t1,  q9,  d6[1]
         vmls.i16        t1,  q10, d6[0]
-.if \narrow
+  .if \narrow
         vqrshrun.s16    \d0, t0,  #5
         vqrshrun.s16    \d1, t1,  #5
-.endif
+  .endif
         .unreq  t0
         .unreq  t1
-        .endm
+.endm
 
-        .macro  lowpass_8_1 r0, r1, d0, narrow=1
-.if \narrow
+.macro  lowpass_8_1     r0,  r1,  d0,  narrow=1
+  .if \narrow
         t0 .req q0
-.else
+  .else
         t0 .req \d0
-.endif
+  .endif
         vext.8          d2,  \r0, \r1, #2
         vext.8          d3,  \r0, \r1, #3
         vaddl.u8        q1,  d2,  d3
@@ -709,13 +670,13 @@ endfunc
         vaddl.u8        t0,  \r0, d30
         vmla.i16        t0,  q1,  d6[1]
         vmls.i16        t0,  q2,  d6[0]
-.if \narrow
+  .if \narrow
         vqrshrun.s16    \d0, t0,  #5
-.endif
+  .endif
         .unreq  t0
-        .endm
+.endm
 
-        .macro  lowpass_8.16 r0, r1, l0, h0, l1, h1, d
+.macro  lowpass_8.16    r0,  r1,  l0,  h0,  l1,  h1,  d
         vext.16         q1,  \r0, \r1, #2
         vext.16         q0,  \r0, \r1, #3
         vaddl.s16       q9,  d2,  d0
@@ -750,59 +711,59 @@ endfunc
         vrshrn.s32      d19, q1,  #10
 
         vqmovun.s16     \d,  q9
-        .endm
+.endm
 
 function put_h264_qpel16_h_lowpass_neon_packed
         mov             r4,  lr
-        mov             ip,  #16
+        mov             r12, #16
         mov             r3,  #8
         bl              put_h264_qpel8_h_lowpass_neon
         sub             r1,  r1,  r2, lsl #4
         add             r1,  r1,  #8
-        mov             ip,  #16
+        mov             r12, #16
         mov             lr,  r4
         b               put_h264_qpel8_h_lowpass_neon
 endfunc
 
-        .macro h264_qpel_h_lowpass type
+.macro  h264_qpel_h_lowpass type
 function \type\()_h264_qpel16_h_lowpass_neon
         push            {lr}
-        mov             ip,  #16
+        mov             r12, #16
         bl              \type\()_h264_qpel8_h_lowpass_neon
         sub             r0,  r0,  r3, lsl #4
         sub             r1,  r1,  r2, lsl #4
         add             r0,  r0,  #8
         add             r1,  r1,  #8
-        mov             ip,  #16
+        mov             r12, #16
         pop             {lr}
 endfunc
 
 function \type\()_h264_qpel8_h_lowpass_neon
-1:      vld1.64         {d0, d1},  [r1], r2
-        vld1.64         {d16,d17}, [r1], r2
-        subs            ip,  ip,  #2
+1:      vld1.8          {d0, d1},  [r1], r2
+        vld1.8          {d16,d17}, [r1], r2
+        subs            r12, r12, #2
         lowpass_8       d0,  d1,  d16, d17, d0,  d16
-.ifc \type,avg
+  .ifc \type,avg
         vld1.8          {d2},     [r0,:64], r3
         vrhadd.u8       d0,  d0,  d2
         vld1.8          {d3},     [r0,:64]
         vrhadd.u8       d16, d16, d3
         sub             r0,  r0,  r3
-.endif
-        vst1.64         {d0},     [r0,:64], r3
-        vst1.64         {d16},    [r0,:64], r3
+  .endif
+        vst1.8          {d0},     [r0,:64], r3
+        vst1.8          {d16},    [r0,:64], r3
         bne             1b
         bx              lr
 endfunc
-        .endm
+.endm
 
         h264_qpel_h_lowpass put
         h264_qpel_h_lowpass avg
 
-        .macro h264_qpel_h_lowpass_l2 type
+.macro  h264_qpel_h_lowpass_l2 type
 function \type\()_h264_qpel16_h_lowpass_l2_neon
         push            {lr}
-        mov             ip,  #16
+        mov             r12, #16
         bl              \type\()_h264_qpel8_h_lowpass_l2_neon
         sub             r0,  r0,  r2, lsl #4
         sub             r1,  r1,  r2, lsl #4
@@ -810,31 +771,31 @@ function \type\()_h264_qpel16_h_lowpass_l2_neon
         add             r0,  r0,  #8
         add             r1,  r1,  #8
         add             r3,  r3,  #8
-        mov             ip,  #16
+        mov             r12, #16
         pop             {lr}
 endfunc
 
 function \type\()_h264_qpel8_h_lowpass_l2_neon
-1:      vld1.64         {d0, d1},  [r1], r2
-        vld1.64         {d16,d17}, [r1], r2
-        vld1.64         {d28},     [r3], r2
-        vld1.64         {d29},     [r3], r2
-        subs            ip,  ip,  #2
+1:      vld1.8          {d0, d1},  [r1], r2
+        vld1.8          {d16,d17}, [r1], r2
+        vld1.8          {d28},     [r3], r2
+        vld1.8          {d29},     [r3], r2
+        subs            r12, r12, #2
         lowpass_8       d0,  d1,  d16, d17, d0,  d1
         vrhadd.u8       q0,  q0,  q14
-.ifc \type,avg
+  .ifc \type,avg
         vld1.8          {d2},      [r0,:64], r2
         vrhadd.u8       d0,  d0,  d2
         vld1.8          {d3},      [r0,:64]
         vrhadd.u8       d1,  d1,  d3
         sub             r0,  r0,  r2
-.endif
-        vst1.64         {d0},      [r0,:64], r2
-        vst1.64         {d1},      [r0,:64], r2
+  .endif
+        vst1.8          {d0},      [r0,:64], r2
+        vst1.8          {d1},      [r0,:64], r2
         bne             1b
         bx              lr
 endfunc
-        .endm
+.endm
 
         h264_qpel_h_lowpass_l2 put
         h264_qpel_h_lowpass_l2 avg
@@ -854,7 +815,7 @@ function put_h264_qpel16_v_lowpass_neon_packed
         b               put_h264_qpel8_v_lowpass_neon
 endfunc
 
-        .macro h264_qpel_v_lowpass type
+.macro  h264_qpel_v_lowpass type
 function \type\()_h264_qpel16_v_lowpass_neon
         mov             r4,  lr
         bl              \type\()_h264_qpel8_v_lowpass_neon
@@ -871,19 +832,19 @@ function \type\()_h264_qpel16_v_lowpass_neon
 endfunc
 
 function \type\()_h264_qpel8_v_lowpass_neon
-        vld1.64         {d8},  [r1], r3
-        vld1.64         {d10}, [r1], r3
-        vld1.64         {d12}, [r1], r3
-        vld1.64         {d14}, [r1], r3
-        vld1.64         {d22}, [r1], r3
-        vld1.64         {d24}, [r1], r3
-        vld1.64         {d26}, [r1], r3
-        vld1.64         {d28}, [r1], r3
-        vld1.64         {d9},  [r1], r3
-        vld1.64         {d11}, [r1], r3
-        vld1.64         {d13}, [r1], r3
-        vld1.64         {d15}, [r1], r3
-        vld1.64         {d23}, [r1]
+        vld1.8          {d8},  [r1], r3
+        vld1.8          {d10}, [r1], r3
+        vld1.8          {d12}, [r1], r3
+        vld1.8          {d14}, [r1], r3
+        vld1.8          {d22}, [r1], r3
+        vld1.8          {d24}, [r1], r3
+        vld1.8          {d26}, [r1], r3
+        vld1.8          {d28}, [r1], r3
+        vld1.8          {d9},  [r1], r3
+        vld1.8          {d11}, [r1], r3
+        vld1.8          {d13}, [r1], r3
+        vld1.8          {d15}, [r1], r3
+        vld1.8          {d23}, [r1]
 
         transpose_8x8   q4,  q5,  q6,  q7,  q11, q12, q13, q14
         lowpass_8       d8,  d9,  d10, d11, d8,  d10
@@ -892,7 +853,7 @@ function \type\()_h264_qpel8_v_lowpass_neon
         lowpass_8       d26, d27, d28, d29, d26, d28
         transpose_8x8   d8,  d10, d12, d14, d22, d24, d26, d28
 
-.ifc \type,avg
+  .ifc \type,avg
         vld1.8          {d9},  [r0,:64], r2
         vrhadd.u8       d8,  d8,  d9
         vld1.8          {d11}, [r0,:64], r2
@@ -910,34 +871,34 @@ function \type\()_h264_qpel8_v_lowpass_neon
         vld1.8          {d29}, [r0,:64], r2
         vrhadd.u8       d28, d28, d29
         sub             r0,  r0,  r2,  lsl #3
-.endif
+  .endif
 
-        vst1.64         {d8},  [r0,:64], r2
-        vst1.64         {d10}, [r0,:64], r2
-        vst1.64         {d12}, [r0,:64], r2
-        vst1.64         {d14}, [r0,:64], r2
-        vst1.64         {d22}, [r0,:64], r2
-        vst1.64         {d24}, [r0,:64], r2
-        vst1.64         {d26}, [r0,:64], r2
-        vst1.64         {d28}, [r0,:64], r2
+        vst1.8          {d8},  [r0,:64], r2
+        vst1.8          {d10}, [r0,:64], r2
+        vst1.8          {d12}, [r0,:64], r2
+        vst1.8          {d14}, [r0,:64], r2
+        vst1.8          {d22}, [r0,:64], r2
+        vst1.8          {d24}, [r0,:64], r2
+        vst1.8          {d26}, [r0,:64], r2
+        vst1.8          {d28}, [r0,:64], r2
 
         bx              lr
 endfunc
-        .endm
+.endm
 
         h264_qpel_v_lowpass put
         h264_qpel_v_lowpass avg
 
-        .macro h264_qpel_v_lowpass_l2 type
+.macro  h264_qpel_v_lowpass_l2 type
 function \type\()_h264_qpel16_v_lowpass_l2_neon
         mov             r4,  lr
         bl              \type\()_h264_qpel8_v_lowpass_l2_neon
         sub             r1,  r1,  r3, lsl #2
         bl              \type\()_h264_qpel8_v_lowpass_l2_neon
         sub             r0,  r0,  r3, lsl #4
-        sub             ip,  ip,  r2, lsl #4
+        sub             r12, r12, r2, lsl #4
         add             r0,  r0,  #8
-        add             ip,  ip,  #8
+        add             r12, r12, #8
         sub             r1,  r1,  r3, lsl #4
         sub             r1,  r1,  r3, lsl #2
         add             r1,  r1,  #8
@@ -947,19 +908,19 @@ function \type\()_h264_qpel16_v_lowpass_l2_neon
 endfunc
 
 function \type\()_h264_qpel8_v_lowpass_l2_neon
-        vld1.64         {d8},  [r1], r3
-        vld1.64         {d10}, [r1], r3
-        vld1.64         {d12}, [r1], r3
-        vld1.64         {d14}, [r1], r3
-        vld1.64         {d22}, [r1], r3
-        vld1.64         {d24}, [r1], r3
-        vld1.64         {d26}, [r1], r3
-        vld1.64         {d28}, [r1], r3
-        vld1.64         {d9},  [r1], r3
-        vld1.64         {d11}, [r1], r3
-        vld1.64         {d13}, [r1], r3
-        vld1.64         {d15}, [r1], r3
-        vld1.64         {d23}, [r1]
+        vld1.8          {d8},  [r1], r3
+        vld1.8          {d10}, [r1], r3
+        vld1.8          {d12}, [r1], r3
+        vld1.8          {d14}, [r1], r3
+        vld1.8          {d22}, [r1], r3
+        vld1.8          {d24}, [r1], r3
+        vld1.8          {d26}, [r1], r3
+        vld1.8          {d28}, [r1], r3
+        vld1.8          {d9},  [r1], r3
+        vld1.8          {d11}, [r1], r3
+        vld1.8          {d13}, [r1], r3
+        vld1.8          {d15}, [r1], r3
+        vld1.8          {d23}, [r1]
 
         transpose_8x8   q4,  q5,  q6,  q7,  q11, q12, q13, q14
         lowpass_8       d8,  d9,  d10, d11, d8,  d9
@@ -968,20 +929,20 @@ function \type\()_h264_qpel8_v_lowpass_l2_neon
         lowpass_8       d26, d27, d28, d29, d26, d27
         transpose_8x8   d8,  d9,  d12, d13, d22, d23, d26, d27
 
-        vld1.64         {d0},  [ip], r2
-        vld1.64         {d1},  [ip], r2
-        vld1.64         {d2},  [ip], r2
-        vld1.64         {d3},  [ip], r2
-        vld1.64         {d4},  [ip], r2
+        vld1.8          {d0},  [r12], r2
+        vld1.8          {d1},  [r12], r2
+        vld1.8          {d2},  [r12], r2
+        vld1.8          {d3},  [r12], r2
+        vld1.8          {d4},  [r12], r2
         vrhadd.u8       q0,  q0,  q4
-        vld1.64         {d5},  [ip], r2
+        vld1.8          {d5},  [r12], r2
         vrhadd.u8       q1,  q1,  q6
-        vld1.64         {d10}, [ip], r2
+        vld1.8          {d10}, [r12], r2
         vrhadd.u8       q2,  q2,  q11
-        vld1.64         {d11}, [ip], r2
+        vld1.8          {d11}, [r12], r2
         vrhadd.u8       q5,  q5,  q13
 
-.ifc \type,avg
+  .ifc \type,avg
         vld1.8          {d16}, [r0,:64], r3
         vrhadd.u8       d0,  d0,  d16
         vld1.8          {d17}, [r0,:64], r3
@@ -999,51 +960,51 @@ function \type\()_h264_qpel8_v_lowpass_l2_neon
         vld1.8          {d17}, [r0,:64], r3
         vrhadd.u8       d11, d11, d17
         sub             r0,  r0,  r3,  lsl #3
-.endif
+  .endif
 
-        vst1.64         {d0},  [r0,:64], r3
-        vst1.64         {d1},  [r0,:64], r3
-        vst1.64         {d2},  [r0,:64], r3
-        vst1.64         {d3},  [r0,:64], r3
-        vst1.64         {d4},  [r0,:64], r3
-        vst1.64         {d5},  [r0,:64], r3
-        vst1.64         {d10}, [r0,:64], r3
-        vst1.64         {d11}, [r0,:64], r3
+        vst1.8          {d0},  [r0,:64], r3
+        vst1.8          {d1},  [r0,:64], r3
+        vst1.8          {d2},  [r0,:64], r3
+        vst1.8          {d3},  [r0,:64], r3
+        vst1.8          {d4},  [r0,:64], r3
+        vst1.8          {d5},  [r0,:64], r3
+        vst1.8          {d10}, [r0,:64], r3
+        vst1.8          {d11}, [r0,:64], r3
 
         bx              lr
 endfunc
-        .endm
+.endm
 
         h264_qpel_v_lowpass_l2 put
         h264_qpel_v_lowpass_l2 avg
 
 function put_h264_qpel8_hv_lowpass_neon_top
-        lowpass_const   ip
-        mov             ip,  #12
-1:      vld1.64         {d0, d1},  [r1], r3
-        vld1.64         {d16,d17}, [r1], r3
-        subs            ip,  ip,  #2
+        lowpass_const   r12
+        mov             r12, #12
+1:      vld1.8          {d0, d1},  [r1], r3
+        vld1.8          {d16,d17}, [r1], r3
+        subs            r12, r12, #2
         lowpass_8       d0,  d1,  d16, d17, q11, q12, narrow=0
-        vst1.64         {d22-d25}, [r4,:128]!
+        vst1.8          {d22-d25}, [r4,:128]!
         bne             1b
 
-        vld1.64         {d0, d1},  [r1]
+        vld1.8          {d0, d1},  [r1]
         lowpass_8_1     d0,  d1,  q12, narrow=0
 
-        mov             ip,  #-16
-        add             r4,  r4,  ip
-        vld1.64         {d30,d31}, [r4,:128], ip
-        vld1.64         {d20,d21}, [r4,:128], ip
-        vld1.64         {d18,d19}, [r4,:128], ip
-        vld1.64         {d16,d17}, [r4,:128], ip
-        vld1.64         {d14,d15}, [r4,:128], ip
-        vld1.64         {d12,d13}, [r4,:128], ip
-        vld1.64         {d10,d11}, [r4,:128], ip
-        vld1.64         {d8, d9},  [r4,:128], ip
-        vld1.64         {d6, d7},  [r4,:128], ip
-        vld1.64         {d4, d5},  [r4,:128], ip
-        vld1.64         {d2, d3},  [r4,:128], ip
-        vld1.64         {d0, d1},  [r4,:128]
+        mov             r12, #-16
+        add             r4,  r4,  r12
+        vld1.8          {d30,d31}, [r4,:128], r12
+        vld1.8          {d20,d21}, [r4,:128], r12
+        vld1.8          {d18,d19}, [r4,:128], r12
+        vld1.8          {d16,d17}, [r4,:128], r12
+        vld1.8          {d14,d15}, [r4,:128], r12
+        vld1.8          {d12,d13}, [r4,:128], r12
+        vld1.8          {d10,d11}, [r4,:128], r12
+        vld1.8          {d8, d9},  [r4,:128], r12
+        vld1.8          {d6, d7},  [r4,:128], r12
+        vld1.8          {d4, d5},  [r4,:128], r12
+        vld1.8          {d2, d3},  [r4,:128], r12
+        vld1.8          {d0, d1},  [r4,:128]
 
         swap4           d1,  d3,  d5,  d7,  d8,  d10, d12, d14
         transpose16_4x4 q0,  q1,  q2,  q3,  q4,  q5,  q6,  q7
@@ -1051,31 +1012,31 @@ function put_h264_qpel8_hv_lowpass_neon_top
         swap4           d17, d19, d21, d31, d24, d26, d28, d22
         transpose16_4x4 q8,  q9,  q10, q15, q12, q13, q14, q11
 
-        vst1.64         {d30,d31}, [r4,:128]!
-        vst1.64         {d6, d7},  [r4,:128]!
-        vst1.64         {d20,d21}, [r4,:128]!
-        vst1.64         {d4, d5},  [r4,:128]!
-        vst1.64         {d18,d19}, [r4,:128]!
-        vst1.64         {d2, d3},  [r4,:128]!
-        vst1.64         {d16,d17}, [r4,:128]!
-        vst1.64         {d0, d1},  [r4,:128]
+        vst1.8          {d30,d31}, [r4,:128]!
+        vst1.8          {d6, d7},  [r4,:128]!
+        vst1.8          {d20,d21}, [r4,:128]!
+        vst1.8          {d4, d5},  [r4,:128]!
+        vst1.8          {d18,d19}, [r4,:128]!
+        vst1.8          {d2, d3},  [r4,:128]!
+        vst1.8          {d16,d17}, [r4,:128]!
+        vst1.8          {d0, d1},  [r4,:128]
 
         lowpass_8.16    q4,  q12, d8,  d9,  d24, d25, d8
         lowpass_8.16    q5,  q13, d10, d11, d26, d27, d9
         lowpass_8.16    q6,  q14, d12, d13, d28, d29, d10
         lowpass_8.16    q7,  q11, d14, d15, d22, d23, d11
 
-        vld1.64         {d16,d17}, [r4,:128], ip
-        vld1.64         {d30,d31}, [r4,:128], ip
+        vld1.8          {d16,d17}, [r4,:128], r12
+        vld1.8          {d30,d31}, [r4,:128], r12
         lowpass_8.16    q8,  q15, d16, d17, d30, d31, d12
-        vld1.64         {d16,d17}, [r4,:128], ip
-        vld1.64         {d30,d31}, [r4,:128], ip
+        vld1.8          {d16,d17}, [r4,:128], r12
+        vld1.8          {d30,d31}, [r4,:128], r12
         lowpass_8.16    q8,  q15, d16, d17, d30, d31, d13
-        vld1.64         {d16,d17}, [r4,:128], ip
-        vld1.64         {d30,d31}, [r4,:128], ip
+        vld1.8          {d16,d17}, [r4,:128], r12
+        vld1.8          {d30,d31}, [r4,:128], r12
         lowpass_8.16    q8,  q15, d16, d17, d30, d31, d14
-        vld1.64         {d16,d17}, [r4,:128], ip
-        vld1.64         {d30,d31}, [r4,:128]
+        vld1.8          {d16,d17}, [r4,:128], r12
+        vld1.8          {d30,d31}, [r4,:128]
         lowpass_8.16    q8,  q15, d16, d17, d30, d31, d15
 
         transpose_8x8   d12, d13, d14, d15, d8,  d9,  d10, d11
@@ -1083,11 +1044,11 @@ function put_h264_qpel8_hv_lowpass_neon_top
         bx              lr
 endfunc
 
-        .macro h264_qpel8_hv_lowpass type
+.macro  h264_qpel8_hv_lowpass type
 function \type\()_h264_qpel8_hv_lowpass_neon
         mov             r10, lr
         bl              put_h264_qpel8_hv_lowpass_neon_top
-.ifc \type,avg
+  .ifc \type,avg
         vld1.8          {d0},      [r0,:64], r2
         vrhadd.u8       d12, d12, d0
         vld1.8          {d1},      [r0,:64], r2
@@ -1105,39 +1066,39 @@ function \type\()_h264_qpel8_hv_lowpass_neon
         vld1.8          {d7},      [r0,:64], r2
         vrhadd.u8       d11, d11, d7
         sub             r0,  r0,  r2,  lsl #3
-.endif
+  .endif
 
-        vst1.64         {d12},     [r0,:64], r2
-        vst1.64         {d13},     [r0,:64], r2
-        vst1.64         {d14},     [r0,:64], r2
-        vst1.64         {d15},     [r0,:64], r2
-        vst1.64         {d8},      [r0,:64], r2
-        vst1.64         {d9},      [r0,:64], r2
-        vst1.64         {d10},     [r0,:64], r2
-        vst1.64         {d11},     [r0,:64], r2
+        vst1.8          {d12},     [r0,:64], r2
+        vst1.8          {d13},     [r0,:64], r2
+        vst1.8          {d14},     [r0,:64], r2
+        vst1.8          {d15},     [r0,:64], r2
+        vst1.8          {d8},      [r0,:64], r2
+        vst1.8          {d9},      [r0,:64], r2
+        vst1.8          {d10},     [r0,:64], r2
+        vst1.8          {d11},     [r0,:64], r2
 
         mov             lr,  r10
         bx              lr
 endfunc
-        .endm
+.endm
 
         h264_qpel8_hv_lowpass put
         h264_qpel8_hv_lowpass avg
 
-        .macro h264_qpel8_hv_lowpass_l2 type
+.macro  h264_qpel8_hv_lowpass_l2 type
 function \type\()_h264_qpel8_hv_lowpass_l2_neon
         mov             r10, lr
         bl              put_h264_qpel8_hv_lowpass_neon_top
 
-        vld1.64         {d0, d1},  [r2,:128]!
-        vld1.64         {d2, d3},  [r2,:128]!
+        vld1.8          {d0, d1},  [r2,:128]!
+        vld1.8          {d2, d3},  [r2,:128]!
         vrhadd.u8       q0,  q0,  q6
-        vld1.64         {d4, d5},  [r2,:128]!
+        vld1.8          {d4, d5},  [r2,:128]!
         vrhadd.u8       q1,  q1,  q7
-        vld1.64         {d6, d7},  [r2,:128]!
+        vld1.8          {d6, d7},  [r2,:128]!
         vrhadd.u8       q2,  q2,  q4
         vrhadd.u8       q3,  q3,  q5
-.ifc \type,avg
+  .ifc \type,avg
         vld1.8          {d16},     [r0,:64], r3
         vrhadd.u8       d0,  d0,  d16
         vld1.8          {d17},     [r0,:64], r3
@@ -1155,25 +1116,25 @@ function \type\()_h264_qpel8_hv_lowpass_l2_neon
         vld1.8          {d23},     [r0,:64], r3
         vrhadd.u8       d7,  d7,  d23
         sub             r0,  r0,  r3,  lsl #3
-.endif
-        vst1.64         {d0},      [r0,:64], r3
-        vst1.64         {d1},      [r0,:64], r3
-        vst1.64         {d2},      [r0,:64], r3
-        vst1.64         {d3},      [r0,:64], r3
-        vst1.64         {d4},      [r0,:64], r3
-        vst1.64         {d5},      [r0,:64], r3
-        vst1.64         {d6},      [r0,:64], r3
-        vst1.64         {d7},      [r0,:64], r3
+  .endif
+        vst1.8          {d0},      [r0,:64], r3
+        vst1.8          {d1},      [r0,:64], r3
+        vst1.8          {d2},      [r0,:64], r3
+        vst1.8          {d3},      [r0,:64], r3
+        vst1.8          {d4},      [r0,:64], r3
+        vst1.8          {d5},      [r0,:64], r3
+        vst1.8          {d6},      [r0,:64], r3
+        vst1.8          {d7},      [r0,:64], r3
 
         mov             lr,  r10
         bx              lr
 endfunc
-        .endm
+.endm
 
         h264_qpel8_hv_lowpass_l2 put
         h264_qpel8_hv_lowpass_l2 avg
 
-        .macro h264_qpel16_hv type
+.macro  h264_qpel16_hv  type
 function \type\()_h264_qpel16_hv_lowpass_neon
         mov             r9,  lr
         bl              \type\()_h264_qpel8_hv_lowpass_neon
@@ -1206,17 +1167,17 @@ function \type\()_h264_qpel16_hv_lowpass_l2_neon
         mov             lr,  r9
         b               \type\()_h264_qpel8_hv_lowpass_l2_neon
 endfunc
-        .endm
+.endm
 
         h264_qpel16_hv put
         h264_qpel16_hv avg
 
-        .macro h264_qpel8 type
+.macro  h264_qpel8      type
 function ff_\type\()_h264_qpel8_mc10_neon, export=1
         lowpass_const   r3
         mov             r3,  r1
         sub             r1,  r1,  #2
-        mov             ip,  #8
+        mov             r12, #8
         b               \type\()_h264_qpel8_h_lowpass_l2_neon
 endfunc
 
@@ -1224,7 +1185,7 @@ function ff_\type\()_h264_qpel8_mc20_neon, export=1
         lowpass_const   r3
         sub             r1,  r1,  #2
         mov             r3,  r2
-        mov             ip,  #8
+        mov             r12, #8
         b               \type\()_h264_qpel8_h_lowpass_neon
 endfunc
 
@@ -1232,13 +1193,13 @@ function ff_\type\()_h264_qpel8_mc30_neon, export=1
         lowpass_const   r3
         add             r3,  r1,  #1
         sub             r1,  r1,  #2
-        mov             ip,  #8
+        mov             r12, #8
         b               \type\()_h264_qpel8_h_lowpass_l2_neon
 endfunc
 
 function ff_\type\()_h264_qpel8_mc01_neon, export=1
         push            {lr}
-        mov             ip,  r1
+        mov             r12, r1
 \type\()_h264_qpel8_mc01:
         lowpass_const   r3
         mov             r3,  r2
@@ -1261,12 +1222,12 @@ T       mov             sp,  r0
         mov             r0,  sp
         sub             r1,  r1,  #2
         mov             r3,  #8
-        mov             ip,  #8
+        mov             r12, #8
         vpush           {d8-d15}
         bl              put_h264_qpel8_h_lowpass_neon
         ldrd            r0,  [r11], #8
         mov             r3,  r2
-        add             ip,  sp,  #64
+        add             r12, sp,  #64
         sub             r1,  r1,  r2, lsl #1
         mov             r2,  #8
         bl              \type\()_h264_qpel8_v_lowpass_l2_neon
@@ -1287,7 +1248,7 @@ T       mov             sp,  r0
         sub             r1,  r1,  #2
         mov             r3,  #8
         mov             r0,  sp
-        mov             ip,  #8
+        mov             r12, #8
         vpush           {d8-d15}
         bl              put_h264_qpel8_h_lowpass_neon
         mov             r4,  r0
@@ -1372,7 +1333,7 @@ endfunc
 
 function ff_\type\()_h264_qpel8_mc03_neon, export=1
         push            {lr}
-        add             ip,  r1,  r2
+        add             r12, r1,  r2
         b               \type\()_h264_qpel8_mc01
 endfunc
 
@@ -1395,12 +1356,12 @@ function ff_\type\()_h264_qpel8_mc33_neon, export=1
         sub             r1,  r1,  #1
         b               \type\()_h264_qpel8_mc11
 endfunc
-        .endm
+.endm
 
         h264_qpel8 put
         h264_qpel8 avg
 
-        .macro h264_qpel16 type
+.macro  h264_qpel16     type
 function ff_\type\()_h264_qpel16_mc10_neon, export=1
         lowpass_const   r3
         mov             r3,  r1
@@ -1424,7 +1385,7 @@ endfunc
 
 function ff_\type\()_h264_qpel16_mc01_neon, export=1
         push            {r4, lr}
-        mov             ip,  r1
+        mov             r12, r1
 \type\()_h264_qpel16_mc01:
         lowpass_const   r3
         mov             r3,  r2
@@ -1451,7 +1412,7 @@ T       mov             sp,  r0
         bl              put_h264_qpel16_h_lowpass_neon
         ldrd            r0,  [r11], #8
         mov             r3,  r2
-        add             ip,  sp,  #64
+        add             r12, sp,  #64
         sub             r1,  r1,  r2, lsl #1
         mov             r2,  #16
         bl              \type\()_h264_qpel16_v_lowpass_l2_neon
@@ -1554,7 +1515,7 @@ endfunc
 
 function ff_\type\()_h264_qpel16_mc03_neon, export=1
         push            {r4, lr}
-        add             ip,  r1,  r2
+        add             r12, r1,  r2
         b               \type\()_h264_qpel16_mc01
 endfunc
 
@@ -1577,14 +1538,14 @@ function ff_\type\()_h264_qpel16_mc33_neon, export=1
         sub             r1,  r1,  #1
         b               \type\()_h264_qpel16_mc11
 endfunc
-        .endm
+.endm
 
         h264_qpel16 put
         h264_qpel16 avg
 
 @ Biweighted prediction
 
-        .macro  biweight_16 macs, macd
+.macro  biweight_16     macs, macd
         vdup.8          d0,  r4
         vdup.8          d1,  r5
         vmov            q2,  q8
@@ -1622,9 +1583,9 @@ endfunc
         vst1.8          {d24-d25},[r6,:128], r2
         bne             1b
         pop             {r4-r6, pc}
-        .endm
+.endm
 
-        .macro  biweight_8 macs, macd
+.macro  biweight_8      macs, macd
         vdup.8          d0,  r4
         vdup.8          d1,  r5
         vmov            q1,  q8
@@ -1652,9 +1613,9 @@ endfunc
         vst1.8          {d4},[r6,:64], r2
         bne             1b
         pop             {r4-r6, pc}
-        .endm
+.endm
 
-        .macro  biweight_4 macs, macd
+.macro  biweight_4      macs, macd
         vdup.8          d0,  r4
         vdup.8          d1,  r5
         vmov            q1,  q8
@@ -1694,9 +1655,9 @@ endfunc
         vst1.32         {d2[0]},[r6,:32], r2
         vst1.32         {d2[1]},[r6,:32], r2
         pop             {r4-r6, pc}
-        .endm
+.endm
 
-        .macro  biweight_func w
+.macro  biweight_func   w
 function ff_biweight_h264_pixels_\w\()_neon, export=1
         push            {r4-r6, lr}
         ldr             r12, [sp, #16]
@@ -1726,7 +1687,7 @@ function ff_biweight_h264_pixels_\w\()_neon, export=1
 40:     rsb             r5,  r5,  #0
         biweight_\w     vmlsl.u8, vmlal.u8
 endfunc
-        .endm
+.endm
 
         biweight_func   16
         biweight_func   8
@@ -1734,7 +1695,7 @@ endfunc
 
 @ Weighted prediction
 
-        .macro  weight_16 add
+.macro  weight_16       add
         vdup.8          d0,  r12
 1:      subs            r2,  r2,  #2
         vld1.8          {d20-d21},[r0,:128], r1
@@ -1761,9 +1722,9 @@ endfunc
         vst1.8          {d24-d25},[r4,:128], r1
         bne             1b
         pop             {r4, pc}
-        .endm
+.endm
 
-        .macro  weight_8 add
+.macro  weight_8        add
         vdup.8          d0,  r12
 1:      subs            r2,  r2,  #2
         vld1.8          {d4},[r0,:64], r1
@@ -1782,9 +1743,9 @@ endfunc
         vst1.8          {d4},[r4,:64], r1
         bne             1b
         pop             {r4, pc}
-        .endm
+.endm
 
-        .macro  weight_4 add
+.macro  weight_4        add
         vdup.8          d0,  r12
         vmov            q1,  q8
         vmov            q10, q8
@@ -1818,9 +1779,9 @@ endfunc
         vst1.32         {d2[0]},[r4,:32], r1
         vst1.32         {d2[1]},[r4,:32], r1
         pop             {r4, pc}
-        .endm
+.endm
 
-        .macro  weight_func w
+.macro  weight_func     w
 function ff_weight_h264_pixels_\w\()_neon, export=1
         push            {r4, lr}
         ldr             r12, [sp, #8]
@@ -1845,7 +1806,7 @@ function ff_weight_h264_pixels_\w\()_neon, export=1
 10:     rsb             r12, r12, #0
         weight_\w       vsub.s16
 endfunc
-        .endm
+.endm
 
         weight_func     16
         weight_func     8
diff --git a/libavcodec/arm/neon.S b/libavcodec/arm/neon.S
new file mode 100644
index 0000000000000000000000000000000000000000..716a607af77b4492494e83df332442b7ad4cbc0c
--- /dev/null
+++ b/libavcodec/arm/neon.S
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+.macro  transpose_8x8   r0, r1, r2, r3, r4, r5, r6, r7
+        vtrn.32         \r0, \r4
+        vtrn.32         \r1, \r5
+        vtrn.32         \r2, \r6
+        vtrn.32         \r3, \r7
+        vtrn.16         \r0, \r2
+        vtrn.16         \r1, \r3
+        vtrn.16         \r4, \r6
+        vtrn.16         \r5, \r7
+        vtrn.8          \r0, \r1
+        vtrn.8          \r2, \r3
+        vtrn.8          \r4, \r5
+        vtrn.8          \r6, \r7
+.endm
+
+.macro  transpose_4x4   r0, r1, r2, r3
+        vtrn.16         \r0, \r2
+        vtrn.16         \r1, \r3
+        vtrn.8          \r0, \r1
+        vtrn.8          \r2, \r3
+.endm
+
+.macro  swap4           r0, r1, r2, r3, r4, r5, r6, r7
+        vswp            \r0, \r4
+        vswp            \r1, \r5
+        vswp            \r2, \r6
+        vswp            \r3, \r7
+.endm
+
+.macro  transpose16_4x4 r0, r1, r2, r3, r4, r5, r6, r7
+        vtrn.32         \r0, \r2
+        vtrn.32         \r1, \r3
+        vtrn.32         \r4, \r6
+        vtrn.32         \r5, \r7
+        vtrn.16         \r0, \r1
+        vtrn.16         \r2, \r3
+        vtrn.16         \r4, \r5
+        vtrn.16         \r6, \r7
+.endm
diff --git a/libavcodec/arm/vp8dsp_neon.S b/libavcodec/arm/vp8dsp_neon.S
index 1fb3753aabf1b09b933cef105d56a68b8ade7e79..b4ab1c70328d67856655fbedaf27e8e5629b07a7 100644
--- a/libavcodec/arm/vp8dsp_neon.S
+++ b/libavcodec/arm/vp8dsp_neon.S
@@ -22,6 +22,7 @@
  */
 
 #include "asm.S"
+#include "neon.S"
 
 function ff_vp8_luma_dc_wht_neon, export=1
         vld1.16         {q0-q1},  [r1,:128]
@@ -442,23 +443,6 @@ endfunc
     .endif
 .endm
 
-.macro transpose8x16matrix
-        vtrn.32         q0,   q4
-        vtrn.32         q1,   q5
-        vtrn.32         q2,   q6
-        vtrn.32         q3,   q7
-
-        vtrn.16         q0,   q2
-        vtrn.16         q1,   q3
-        vtrn.16         q4,   q6
-        vtrn.16         q5,   q7
-
-        vtrn.8          q0,   q1
-        vtrn.8          q2,   q3
-        vtrn.8          q4,   q5
-        vtrn.8          q6,   q7
-.endm
-
 .macro  vp8_v_loop_filter16 name, inner=0, simple=0
 function ff_vp8_v_loop_filter16\name\()_neon, export=1
         vpush           {q4-q7}
@@ -593,7 +577,7 @@ function ff_vp8_h_loop_filter16\name\()_neon, export=1
         vld1.8          {d13},    [r0], r1
         vld1.8          {d15},    [r0], r1
 
-        transpose8x16matrix
+        transpose_8x8   q0,  q1,  q2,  q3,  q4,  q5,  q6,  q7
 
         vdup.8          q14, r2                 @ flim_E
     .if !\simple
@@ -604,7 +588,7 @@ function ff_vp8_h_loop_filter16\name\()_neon, export=1
 
         sub             r0,  r0,  r1, lsl #4    @ backup 16 rows
 
-        transpose8x16matrix
+        transpose_8x8   q0,  q1,  q2,  q3,  q4,  q5,  q6,  q7
 
         @ Store pixels:
         vst1.8          {d0},     [r0],     r1
@@ -658,7 +642,7 @@ function ff_vp8_h_loop_filter8uv\name\()_neon, export=1
         vld1.8          {d14},    [r0], r2
         vld1.8          {d15},    [r1], r2
 
-        transpose8x16matrix
+        transpose_8x8   q0,  q1,  q2,  q3,  q4,  q5,  q6,  q7
 
         vdup.8          q14, r3                 @ flim_E
         vdup.8          q15, r12                @ flim_I
@@ -669,7 +653,7 @@ function ff_vp8_h_loop_filter8uv\name\()_neon, export=1
         sub             r0,  r0,  r2, lsl #3    @ backup u 8 rows
         sub             r1,  r1,  r2, lsl #3    @ backup v 8 rows
 
-        transpose8x16matrix
+        transpose_8x8   q0,  q1,  q2,  q3,  q4,  q5,  q6,  q7
 
         @ Store pixels:
         vst1.8          {d0},     [r0], r2
diff --git a/libavcodec/atrac1.c b/libavcodec/atrac1.c
index f341b1c554fc8cdfccae2dc47970314c09ea8f81..c796235c803c4cacb226382cb9e4a9c11a457d87 100644
--- a/libavcodec/atrac1.c
+++ b/libavcodec/atrac1.c
@@ -72,6 +72,7 @@ typedef struct {
  * The atrac1 context, holds all needed parameters for decoding
  */
 typedef struct {
+    AVFrame frame;
     AT1SUCtx            SUs[AT1_MAX_CHANNELS];              ///< channel sound unit
     DECLARE_ALIGNED(32, float, spec)[AT1_SU_SAMPLES];      ///< the mdct spectrum buffer
 
@@ -273,14 +274,14 @@ static void at1_subband_synthesis(AT1Ctx *q, AT1SUCtx* su, float *pOut)
 
 
 static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
-                               int *data_size, AVPacket *avpkt)
+                               int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
     AT1Ctx *q          = avctx->priv_data;
-    int ch, ret, out_size;
+    int ch, ret;
     GetBitContext gb;
-    float* samples = data;
+    float *samples;
 
 
     if (buf_size < 212 * q->channels) {
@@ -288,12 +289,13 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
         return AVERROR_INVALIDDATA;
     }
 
-    out_size = q->channels * AT1_SU_SAMPLES *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    q->frame.nb_samples = AT1_SU_SAMPLES;
+    if ((ret = avctx->get_buffer(avctx, &q->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    samples = (float *)q->frame.data[0];
 
     for (ch = 0; ch < q->channels; ch++) {
         AT1SUCtx* su = &q->SUs[ch];
@@ -321,7 +323,9 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
                                      AT1_SU_SAMPLES, 2);
     }
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = q->frame;
+
     return avctx->block_align;
 }
 
@@ -389,6 +393,9 @@ static av_cold int atrac1_decode_init(AVCodecContext *avctx)
     q->SUs[1].spectrum[0] = q->SUs[1].spec1;
     q->SUs[1].spectrum[1] = q->SUs[1].spec2;
 
+    avcodec_get_frame_defaults(&q->frame);
+    avctx->coded_frame = &q->frame;
+
     return 0;
 }
 
@@ -401,5 +408,6 @@ AVCodec ff_atrac1_decoder = {
     .init = atrac1_decode_init,
     .close = atrac1_decode_end,
     .decode = atrac1_decode_frame,
+    .capabilities = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("Atrac 1 (Adaptive TRansform Acoustic Coding)"),
 };
diff --git a/libavcodec/atrac3.c b/libavcodec/atrac3.c
index 25beeeeb6c269f7f942f963cd2291679ebeb2cf7..ccbb718e002cf7f5f821d5afe751675c09faddfb 100644
--- a/libavcodec/atrac3.c
+++ b/libavcodec/atrac3.c
@@ -86,6 +86,7 @@ typedef struct {
 } channel_unit;
 
 typedef struct {
+    AVFrame             frame;
     GetBitContext       gb;
     //@{
     /** stream data */
@@ -823,16 +824,16 @@ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf,
  * @param avctx     pointer to the AVCodecContext
  */
 
-static int atrac3_decode_frame(AVCodecContext *avctx,
-            void *data, int *data_size,
-            AVPacket *avpkt) {
+static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
+                               int *got_frame_ptr, AVPacket *avpkt)
+{
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     ATRAC3Context *q = avctx->priv_data;
-    int result = 0, out_size;
+    int result;
     const uint8_t* databuf;
-    float   *samples_flt = data;
-    int16_t *samples_s16 = data;
+    float   *samples_flt;
+    int16_t *samples_s16;
 
     if (buf_size < avctx->block_align) {
         av_log(avctx, AV_LOG_ERROR,
@@ -840,12 +841,14 @@ static int atrac3_decode_frame(AVCodecContext *avctx,
         return AVERROR_INVALIDDATA;
     }
 
-    out_size = SAMPLES_PER_FRAME * q->channels *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    q->frame.nb_samples = SAMPLES_PER_FRAME;
+    if ((result = avctx->get_buffer(avctx, &q->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return result;
     }
+    samples_flt = (float   *)q->frame.data[0];
+    samples_s16 = (int16_t *)q->frame.data[0];
 
     /* Check if we need to descramble and what buffer to pass on. */
     if (q->scrambled_stream) {
@@ -875,7 +878,9 @@ static int atrac3_decode_frame(AVCodecContext *avctx,
                                               (const float **)q->outSamples,
                                               SAMPLES_PER_FRAME, q->channels);
     }
-    *data_size = out_size;
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = q->frame;
 
     return avctx->block_align;
 }
@@ -1047,6 +1052,9 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
         }
     }
 
+    avcodec_get_frame_defaults(&q->frame);
+    avctx->coded_frame = &q->frame;
+
     return 0;
 }
 
@@ -1060,6 +1068,6 @@ AVCodec ff_atrac3_decoder =
     .init = atrac3_decode_init,
     .close = atrac3_decode_close,
     .decode = atrac3_decode_frame,
-    .capabilities = CODEC_CAP_SUBFRAMES,
+    .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("Atrac 3 (Adaptive TRansform Acoustic Coding 3)"),
 };
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 1381a3e0181e429bc46c617648fce0fff509feff..dfd15e568337fca94880b769be4471a4735fdf77 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -491,8 +491,10 @@ enum CodecID {
 #define CH_LAYOUT_STEREO_DOWNMIX AV_CH_LAYOUT_STEREO_DOWNMIX
 #endif
 
+#if FF_API_OLD_DECODE_AUDIO
 /* in bytes */
 #define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
+#endif
 
 /**
  * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
@@ -947,21 +949,37 @@ typedef struct AVPacket {
  * sizeof(AVFrame) must not be used outside libav*.
  */
 typedef struct AVFrame {
+#if FF_API_DATA_POINTERS
+#define AV_NUM_DATA_POINTERS 4
+#else
+#define AV_NUM_DATA_POINTERS 8
+#endif
     /**
-     * pointer to the picture planes.
+     * pointer to the picture/channel planes.
      * This might be different from the first allocated byte
-     * - encoding:
-     * - decoding:
+     * - encoding: Set by user
+     * - decoding: set by AVCodecContext.get_buffer()
+     */
+    uint8_t *data[AV_NUM_DATA_POINTERS];
+
+    /**
+     * Size, in bytes, of the data for each picture/channel plane.
+     *
+     * For audio, only linesize[0] may be set. For planar audio, each channel
+     * plane must be the same size.
+     *
+     * - encoding: Set by user (video only)
+     * - decoding: set by AVCodecContext.get_buffer()
      */
-    uint8_t *data[4];
-    int linesize[4];
+    int linesize[AV_NUM_DATA_POINTERS];
+
     /**
      * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.
      * This isn't used by libavcodec unless the default get/release_buffer() is used.
      * - encoding:
      * - decoding:
      */
-    uint8_t *base[4];
+    uint8_t *base[AV_NUM_DATA_POINTERS];
     /**
      * 1 -> keyframe, 0-> not
      * - encoding: Set by libavcodec.
@@ -1008,7 +1026,7 @@ typedef struct AVFrame {
      * buffer age (1->was last buffer and dint change, 2->..., ...).
      * Set to INT_MAX if the buffer has not been used yet.
      * - encoding: unused
-     * - decoding: MUST be set by get_buffer().
+     * - decoding: MUST be set by get_buffer() for video.
      */
     int age;
 
@@ -1085,7 +1103,7 @@ typedef struct AVFrame {
      * - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.
      * - decoding: unused
      */
-    uint64_t error[4];
+    uint64_t error[AV_NUM_DATA_POINTERS];
 
     /**
      * type of the buffer (to keep track of who has to deallocate data[*])
@@ -1206,6 +1224,33 @@ typedef struct AVFrame {
      */
     void *thread_opaque;
 
+    /**
+     * number of audio samples (per channel) described by this frame
+     * - encoding: unused
+     * - decoding: Set by libavcodec
+     */
+    int nb_samples;
+
+    /**
+     * pointers to the data planes/channels.
+     *
+     * For video, this should simply point to data[].
+     *
+     * For planar audio, each channel has a separate data pointer, and
+     * linesize[0] contains the size of each channel buffer.
+     * For packed audio, there is just one data pointer, and linesize[0]
+     * contains the total size of the buffer for all channels.
+     *
+     * Note: Both data and extended_data will always be set by get_buffer(),
+     * but for planar audio with more channels that can fit in data,
+     * extended_data must be used by the decoder in order to access all
+     * channels.
+     *
+     * encoding: unused
+     * decoding: set by AVCodecContext.get_buffer()
+     */
+    uint8_t **extended_data;
+
     /**
      * frame timestamp estimated using various heuristics, in stream time base
      * - encoding: unused
@@ -1379,7 +1424,7 @@ typedef struct AVCodecContext {
      * @param offset offset into the AVFrame.data from which the slice should be read
      */
     void (*draw_horiz_band)(struct AVCodecContext *s,
-                            const AVFrame *src, int offset[4],
+                            const AVFrame *src, int offset[AV_NUM_DATA_POINTERS],
                             int y, int type, int height);
 
     /* audio only */
@@ -1602,15 +1647,56 @@ typedef struct AVCodecContext {
 
     /**
      * Called at the beginning of each frame to get a buffer for it.
-     * If pic.reference is set then the frame will be read later by libavcodec.
-     * avcodec_align_dimensions2() should be used to find the required width and
-     * height, as they normally need to be rounded up to the next multiple of 16.
+     *
+     * The function will set AVFrame.data[], AVFrame.linesize[].
+     * AVFrame.extended_data[] must also be set, but it should be the same as
+     * AVFrame.data[] except for planar audio with more channels than can fit
+     * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as
+     * many data pointers as it can hold.
+     *
      * if CODEC_CAP_DR1 is not set then get_buffer() must call
      * avcodec_default_get_buffer() instead of providing buffers allocated by
      * some other means.
+     *
+     * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't
+     * need it. avcodec_default_get_buffer() aligns the output buffer properly,
+     * but if get_buffer() is overridden then alignment considerations should
+     * be taken into account.
+     *
+     * @see avcodec_default_get_buffer()
+     *
+     * Video:
+     *
+     * If pic.reference is set then the frame will be read later by libavcodec.
+     * avcodec_align_dimensions2() should be used to find the required width and
+     * height, as they normally need to be rounded up to the next multiple of 16.
+     *
      * If frame multithreading is used and thread_safe_callbacks is set,
-     * it may be called from a different thread, but not from more than one at once.
-     * Does not need to be reentrant.
+     * it may be called from a different thread, but not from more than one at
+     * once. Does not need to be reentrant.
+     *
+     * @see release_buffer(), reget_buffer()
+     * @see avcodec_align_dimensions2()
+     *
+     * Audio:
+     *
+     * Decoders request a buffer of a particular size by setting
+     * AVFrame.nb_samples prior to calling get_buffer(). The decoder may,
+     * however, utilize only part of the buffer by setting AVFrame.nb_samples
+     * to a smaller value in the output frame.
+     *
+     * Decoders cannot use the buffer after returning from
+     * avcodec_decode_audio4(), so they will not call release_buffer(), as it
+     * is assumed to be released immediately upon return.
+     *
+     * As a convenience, av_samples_get_buffer_size() and
+     * av_samples_fill_arrays() in libavutil may be used by custom get_buffer()
+     * functions to find the required data size and to fill data pointers and
+     * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+     * since all planes must be the same size.
+     *
+     * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+     *
      * - encoding: unused
      * - decoding: Set by libavcodec, user can override.
      */
@@ -1929,7 +2015,7 @@ typedef struct AVCodecContext {
      * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR.
      * - decoding: unused
      */
-    uint64_t error[4];
+    uint64_t error[AV_NUM_DATA_POINTERS];
 
     /**
      * motion estimation comparison function
@@ -3253,8 +3339,8 @@ typedef struct AVHWAccel {
  * the last component is alpha
  */
 typedef struct AVPicture {
-    uint8_t *data[4];
-    int linesize[4];       ///< number of bytes per line
+    uint8_t *data[AV_NUM_DATA_POINTERS];
+    int linesize[AV_NUM_DATA_POINTERS];     ///< number of bytes per line
 } AVPicture;
 
 #define AVPALETTE_SIZE 1024
@@ -3922,7 +4008,7 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
  * according to avcodec_get_edge_width() before.
  */
 void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
-                               int linesize_align[4]);
+                               int linesize_align[AV_NUM_DATA_POINTERS]);
 
 enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
 
@@ -4005,7 +4091,12 @@ int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
  */
 int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options);
 
+#if FF_API_OLD_DECODE_AUDIO
 /**
+ * Wrapper function which calls avcodec_decode_audio4.
+ *
+ * @deprecated Use avcodec_decode_audio4 instead.
+ *
  * Decode the audio frame of size avpkt->size from avpkt->data into samples.
  * Some decoders may support multiple frames in a single AVPacket, such
  * decoders would then just decode the first frame. In this case,
@@ -4040,6 +4131,8 @@ int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options)
  *
  * @param avctx the codec context
  * @param[out] samples the output buffer, sample type in avctx->sample_fmt
+ *                     If the sample format is planar, each channel plane will
+ *                     be the same size, with no padding between channels.
  * @param[in,out] frame_size_ptr the output buffer size in bytes
  * @param[in] avpkt The input AVPacket containing the input buffer.
  *            You can create such packet with av_init_packet() and by then setting
@@ -4048,9 +4141,46 @@ int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options)
  * @return On error a negative value is returned, otherwise the number of bytes
  * used or zero if no frame data was decompressed (used) from the input AVPacket.
  */
-int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
+attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
                          int *frame_size_ptr,
                          AVPacket *avpkt);
+#endif
+
+/**
+ * Decode the audio frame of size avpkt->size from avpkt->data into frame.
+ *
+ * Some decoders may support multiple frames in a single AVPacket. Such
+ * decoders would then just decode the first frame. In this case,
+ * avcodec_decode_audio4 has to be called again with an AVPacket containing
+ * the remaining data in order to decode the second frame, etc...
+ * Even if no frames are returned, the packet needs to be fed to the decoder
+ * with remaining data until it is completely consumed or an error occurs.
+ *
+ * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE
+ *          larger than the actual read bytes because some optimized bitstream
+ *          readers read 32 or 64 bits at once and could read over the end.
+ *
+ * @note You might have to align the input buffer. The alignment requirements
+ *       depend on the CPU and the decoder.
+ *
+ * @param      avctx the codec context
+ * @param[out] frame The AVFrame in which to store decoded audio samples.
+ *                   Decoders request a buffer of a particular size by setting
+ *                   AVFrame.nb_samples prior to calling get_buffer(). The
+ *                   decoder may, however, only utilize part of the buffer by
+ *                   setting AVFrame.nb_samples to a smaller value in the
+ *                   output frame.
+ * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
+ *                           non-zero.
+ * @param[in]  avpkt The input AVPacket containing the input buffer.
+ *                   At least avpkt->data and avpkt->size should be set. Some
+ *                   decoders might also require additional fields to be set.
+ * @return A negative error code is returned if an error occurred during
+ *         decoding, otherwise the number of bytes consumed from the input
+ *         AVPacket is returned.
+ */
+int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
+                          int *got_frame_ptr, AVPacket *avpkt);
 
 /**
  * Decode the video frame of size avpkt->size from avpkt->data into picture.
diff --git a/libavcodec/binkaudio.c b/libavcodec/binkaudio.c
index 4f716824c7ca55de9d970cc4507e84306d45deab..94be26ed49e1ddc0db6f63605dca171c56387fcc 100644
--- a/libavcodec/binkaudio.c
+++ b/libavcodec/binkaudio.c
@@ -45,6 +45,7 @@ static float quant_table[96];
 #define BINK_BLOCK_MAX_SIZE (MAX_CHANNELS << 11)
 
 typedef struct {
+    AVFrame frame;
     GetBitContext gb;
     DSPContext dsp;
     FmtConvertContext fmt_conv;
@@ -147,6 +148,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
     else
         return -1;
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
@@ -293,6 +297,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
         ff_rdft_end(&s->trans.rdft);
     else if (CONFIG_BINKAUDIO_DCT_DECODER)
         ff_dct_end(&s->trans.dct);
+
     return 0;
 }
 
@@ -302,20 +307,19 @@ static void get_bits_align32(GetBitContext *s)
     if (n) skip_bits(s, n);
 }
 
-static int decode_frame(AVCodecContext *avctx,
-                        void *data, int *data_size,
-                        AVPacket *avpkt)
+static int decode_frame(AVCodecContext *avctx, void *data,
+                        int *got_frame_ptr, AVPacket *avpkt)
 {
     BinkAudioContext *s = avctx->priv_data;
-    int16_t *samples      = data;
+    int16_t *samples;
     GetBitContext *gb = &s->gb;
-    int out_size, consumed = 0;
+    int ret, consumed = 0;
 
     if (!get_bits_left(gb)) {
         uint8_t *buf;
         /* handle end-of-stream */
         if (!avpkt->size) {
-            *data_size = 0;
+            *got_frame_ptr = 0;
             return 0;
         }
         if (avpkt->size < 4) {
@@ -334,11 +338,13 @@ static int decode_frame(AVCodecContext *avctx,
         skip_bits_long(gb, 32);
     }
 
-    out_size = s->block_size * av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    s->frame.nb_samples = s->block_size / avctx->channels;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    samples = (int16_t *)s->frame.data[0];
 
     if (decode_block(s, samples, avctx->codec->id == CODEC_ID_BINKAUDIO_DCT)) {
         av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n");
@@ -346,7 +352,9 @@ static int decode_frame(AVCodecContext *avctx,
     }
     get_bits_align32(gb);
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return consumed;
 }
 
@@ -358,7 +366,7 @@ AVCodec ff_binkaudio_rdft_decoder = {
     .init           = decode_init,
     .close          = decode_end,
     .decode         = decode_frame,
-    .capabilities   = CODEC_CAP_DELAY,
+    .capabilities   = CODEC_CAP_DELAY | CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (RDFT)")
 };
 
@@ -370,6 +378,6 @@ AVCodec ff_binkaudio_dct_decoder = {
     .init           = decode_init,
     .close          = decode_end,
     .decode         = decode_frame,
-    .capabilities   = CODEC_CAP_DELAY,
+    .capabilities   = CODEC_CAP_DELAY | CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (DCT)")
 };
diff --git a/libavcodec/cook.c b/libavcodec/cook.c
index 6a076d931039fd942059f37bcc9d378d4a53d11d..b285f88a152cf6bcc56a7048155c0080e9e36f4f 100644
--- a/libavcodec/cook.c
+++ b/libavcodec/cook.c
@@ -122,6 +122,7 @@ typedef struct cook {
     void (* saturate_output) (struct cook *q, int chan, float *out);
 
     AVCodecContext*     avctx;
+    AVFrame             frame;
     GetBitContext       gb;
     /* stream data */
     int                 nb_channels;
@@ -131,6 +132,7 @@ typedef struct cook {
     int                 samples_per_channel;
     /* states */
     AVLFG               random_state;
+    int                 discarded_packets;
 
     /* transform data */
     FFTContext          mdct_ctx;
@@ -896,7 +898,8 @@ mlt_compensate_output(COOKContext *q, float *decode_buffer,
                       float *out, int chan)
 {
     imlt_gain(q, decode_buffer, gains_ptr, previous_buffer);
-    q->saturate_output (q, chan, out);
+    if (out)
+        q->saturate_output(q, chan, out);
 }
 
 
@@ -953,24 +956,28 @@ static void decode_subpacket(COOKContext *q, COOKSubpacket *p,
  * @param avctx     pointer to the AVCodecContext
  */
 
-static int cook_decode_frame(AVCodecContext *avctx,
-            void *data, int *data_size,
-            AVPacket *avpkt) {
+static int cook_decode_frame(AVCodecContext *avctx, void *data,
+                             int *got_frame_ptr, AVPacket *avpkt)
+{
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     COOKContext *q = avctx->priv_data;
-    int i, out_size;
+    float *samples = NULL;
+    int i, ret;
     int offset = 0;
     int chidx = 0;
 
     if (buf_size < avctx->block_align)
         return buf_size;
 
-    out_size = q->nb_channels * q->samples_per_channel *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    if (q->discarded_packets >= 2) {
+        q->frame.nb_samples = q->samples_per_channel;
+        if ((ret = avctx->get_buffer(avctx, &q->frame)) < 0) {
+            av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+            return ret;
+        }
+        samples = (float *)q->frame.data[0];
     }
 
     /* estimate subpacket sizes */
@@ -990,15 +997,21 @@ static int cook_decode_frame(AVCodecContext *avctx,
         q->subpacket[i].bits_per_subpacket = (q->subpacket[i].size*8)>>q->subpacket[i].bits_per_subpdiv;
         q->subpacket[i].ch_idx = chidx;
         av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] size %i js %i %i block_align %i\n",i,q->subpacket[i].size,q->subpacket[i].joint_stereo,offset,avctx->block_align);
-        decode_subpacket(q, &q->subpacket[i], buf + offset, data);
+        decode_subpacket(q, &q->subpacket[i], buf + offset, samples);
         offset += q->subpacket[i].size;
         chidx += q->subpacket[i].num_channels;
         av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] %i %i\n",i,q->subpacket[i].size * 8,get_bits_count(&q->gb));
     }
-    *data_size = out_size;
 
     /* Discard the first two frames: no valid audio. */
-    if (avctx->frame_number < 2) *data_size = 0;
+    if (q->discarded_packets < 2) {
+        q->discarded_packets++;
+        *got_frame_ptr = 0;
+        return avctx->block_align;
+    }
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = q->frame;
 
     return avctx->block_align;
 }
@@ -1246,6 +1259,9 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
     else
         avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
 
+    avcodec_get_frame_defaults(&q->frame);
+    avctx->coded_frame = &q->frame;
+
 #ifdef DEBUG
     dump_cook_context(q);
 #endif
@@ -1262,5 +1278,6 @@ AVCodec ff_cook_decoder =
     .init = cook_decode_init,
     .close = cook_decode_close,
     .decode = cook_decode_frame,
+    .capabilities = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("COOK"),
 };
diff --git a/libavcodec/dca.c b/libavcodec/dca.c
index edafb967b401c2d10c229f422418e215c77ff4ef..7c43b922b14a24275b68505f5b6c51b53938281c 100644
--- a/libavcodec/dca.c
+++ b/libavcodec/dca.c
@@ -261,6 +261,7 @@ static av_always_inline int get_bitalloc(GetBitContext *gb, BitAlloc *ba, int id
 
 typedef struct {
     AVCodecContext *avctx;
+    AVFrame frame;
     /* Frame header */
     int frame_type;             ///< type of the current frame
     int samples_deficit;        ///< deficit sample count
@@ -1634,9 +1635,8 @@ static void dca_exss_parse_header(DCAContext *s)
  * Main frame decoding function
  * FIXME add arguments
  */
-static int dca_decode_frame(AVCodecContext * avctx,
-                            void *data, int *data_size,
-                            AVPacket *avpkt)
+static int dca_decode_frame(AVCodecContext *avctx, void *data,
+                            int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
@@ -1644,9 +1644,8 @@ static int dca_decode_frame(AVCodecContext * avctx,
     int lfe_samples;
     int num_core_channels = 0;
     int i, ret;
-    float   *samples_flt = data;
-    int16_t *samples_s16 = data;
-    int out_size;
+    float   *samples_flt;
+    int16_t *samples_s16;
     DCAContext *s = avctx->priv_data;
     int channels;
     int core_ss_end;
@@ -1832,11 +1831,14 @@ static int dca_decode_frame(AVCodecContext * avctx,
         avctx->channels = channels;
     }
 
-    out_size = 256 / 8 * s->sample_blocks * channels *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size)
-        return AVERROR(EINVAL);
-    *data_size = out_size;
+    /* get output buffer */
+    s->frame.nb_samples = 256 * (s->sample_blocks / 8);
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
+    }
+    samples_flt = (float   *)s->frame.data[0];
+    samples_s16 = (int16_t *)s->frame.data[0];
 
     /* filter to get final output */
     for (i = 0; i < (s->sample_blocks / 8); i++) {
@@ -1870,6 +1872,9 @@ static int dca_decode_frame(AVCodecContext * avctx,
         s->lfe_data[i] = s->lfe_data[i + lfe_samples];
     }
 
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return buf_size;
 }
 
@@ -1912,6 +1917,9 @@ static av_cold int dca_decode_init(AVCodecContext * avctx)
         avctx->channels = avctx->request_channels;
     }
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
@@ -1940,7 +1948,7 @@ AVCodec ff_dca_decoder = {
     .decode = dca_decode_frame,
     .close = dca_decode_end,
     .long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"),
-    .capabilities = CODEC_CAP_CHANNEL_CONF,
+    .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
     .sample_fmts = (const enum AVSampleFormat[]) {
         AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
     },
diff --git a/libavcodec/dpcm.c b/libavcodec/dpcm.c
index 2f87f51774dbd6ab19d3671443d6797fc2d99056..b96461984b9a99e5975279d5924cd4d58269ce6b 100644
--- a/libavcodec/dpcm.c
+++ b/libavcodec/dpcm.c
@@ -42,6 +42,7 @@
 #include "bytestream.h"
 
 typedef struct DPCMContext {
+    AVFrame frame;
     int channels;
     int16_t roq_square_array[256];
     int sample[2];                  ///< previous sample (for SOL_DPCM)
@@ -162,22 +163,25 @@ static av_cold int dpcm_decode_init(AVCodecContext *avctx)
     else
         avctx->sample_fmt = AV_SAMPLE_FMT_S16;
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
 
-static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
-                             AVPacket *avpkt)
+static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     const uint8_t *buf_end = buf + buf_size;
     DPCMContext *s = avctx->priv_data;
-    int out = 0;
+    int out = 0, ret;
     int predictor[2];
     int ch = 0;
     int stereo = s->channels - 1;
-    int16_t *output_samples = data;
+    int16_t *output_samples;
 
     /* calculate output size */
     switch(avctx->codec->id) {
@@ -197,15 +201,18 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
             out = buf_size;
         break;
     }
-    out *= av_get_bytes_per_sample(avctx->sample_fmt);
     if (out <= 0) {
         av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
         return AVERROR(EINVAL);
     }
-    if (*data_size < out) {
-        av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
-        return AVERROR(EINVAL);
+
+    /* get output buffer */
+    s->frame.nb_samples = out / s->channels;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    output_samples = (int16_t *)s->frame.data[0];
 
     switch(avctx->codec->id) {
 
@@ -307,7 +314,9 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
         break;
     }
 
-    *data_size = out;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return buf_size;
 }
 
@@ -319,6 +328,7 @@ AVCodec ff_ ## name_ ## _decoder = {                        \
     .priv_data_size = sizeof(DPCMContext),                  \
     .init           = dpcm_decode_init,                     \
     .decode         = dpcm_decode_frame,                    \
+    .capabilities   = CODEC_CAP_DR1,                        \
     .long_name      = NULL_IF_CONFIG_SMALL(long_name_),     \
 }
 
diff --git a/libavcodec/dsicinav.c b/libavcodec/dsicinav.c
index 3cf32ca32b86420a2a354a9ea10dc188e10edb6b..4c6c41c2feb650526c6575cb31e1ba17c4031de9 100644
--- a/libavcodec/dsicinav.c
+++ b/libavcodec/dsicinav.c
@@ -44,6 +44,7 @@ typedef struct CinVideoContext {
 } CinVideoContext;
 
 typedef struct CinAudioContext {
+    AVFrame frame;
     int initial_decode_frame;
     int delta;
 } CinAudioContext;
@@ -318,25 +319,28 @@ static av_cold int cinaudio_decode_init(AVCodecContext *avctx)
     cin->delta = 0;
     avctx->sample_fmt = AV_SAMPLE_FMT_S16;
 
+    avcodec_get_frame_defaults(&cin->frame);
+    avctx->coded_frame = &cin->frame;
+
     return 0;
 }
 
-static int cinaudio_decode_frame(AVCodecContext *avctx,
-                                 void *data, int *data_size,
-                                 AVPacket *avpkt)
+static int cinaudio_decode_frame(AVCodecContext *avctx, void *data,
+                                 int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     CinAudioContext *cin = avctx->priv_data;
     const uint8_t *buf_end = buf + avpkt->size;
-    int16_t *samples = data;
-    int delta, out_size;
-
-    out_size = (avpkt->size - cin->initial_decode_frame) *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    int16_t *samples;
+    int delta, ret;
+
+    /* get output buffer */
+    cin->frame.nb_samples = avpkt->size - cin->initial_decode_frame;
+    if ((ret = avctx->get_buffer(avctx, &cin->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    samples = (int16_t *)cin->frame.data[0];
 
     delta = cin->delta;
     if (cin->initial_decode_frame) {
@@ -352,7 +356,8 @@ static int cinaudio_decode_frame(AVCodecContext *avctx,
     }
     cin->delta = delta;
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = cin->frame;
 
     return avpkt->size;
 }
@@ -377,5 +382,6 @@ AVCodec ff_dsicinaudio_decoder = {
     .priv_data_size = sizeof(CinAudioContext),
     .init           = cinaudio_decode_init,
     .decode         = cinaudio_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN audio"),
 };
diff --git a/libavcodec/flacdec.c b/libavcodec/flacdec.c
index c1404404366b9a30c5cab954bea5b2c14ae5194a..ca0863107aee1297544ca86f5742294b68171674 100644
--- a/libavcodec/flacdec.c
+++ b/libavcodec/flacdec.c
@@ -49,6 +49,7 @@ typedef struct FLACContext {
     FLACSTREAMINFO
 
     AVCodecContext *avctx;                  ///< parent AVCodecContext
+    AVFrame frame;
     GetBitContext gb;                       ///< GetBitContext initialized to start at the current frame
 
     int blocksize;                          ///< number of samples in the current frame
@@ -116,6 +117,9 @@ static av_cold int flac_decode_init(AVCodecContext *avctx)
     allocate_buffers(s);
     s->got_streaminfo = 1;
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
@@ -542,20 +546,18 @@ static int decode_frame(FLACContext *s)
     return 0;
 }
 
-static int flac_decode_frame(AVCodecContext *avctx,
-                            void *data, int *data_size,
-                            AVPacket *avpkt)
+static int flac_decode_frame(AVCodecContext *avctx, void *data,
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     FLACContext *s = avctx->priv_data;
     int i, j = 0, bytes_read = 0;
-    int16_t *samples_16 = data;
-    int32_t *samples_32 = data;
-    int alloc_data_size= *data_size;
-    int output_size;
+    int16_t *samples_16;
+    int32_t *samples_32;
+    int ret;
 
-    *data_size=0;
+    *got_frame_ptr = 0;
 
     if (s->max_framesize == 0) {
         s->max_framesize =
@@ -586,15 +588,14 @@ static int flac_decode_frame(AVCodecContext *avctx,
     }
     bytes_read = (get_bits_count(&s->gb)+7)/8;
 
-    /* check if allocated data size is large enough for output */
-    output_size = s->blocksize * s->channels *
-                  av_get_bytes_per_sample(avctx->sample_fmt);
-    if (output_size > alloc_data_size) {
-        av_log(s->avctx, AV_LOG_ERROR, "output data size is larger than "
-                                       "allocated data size\n");
-        return -1;
+    /* get output buffer */
+    s->frame.nb_samples = s->blocksize;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
-    *data_size = output_size;
+    samples_16 = (int16_t *)s->frame.data[0];
+    samples_32 = (int32_t *)s->frame.data[0];
 
 #define DECORRELATE(left, right)\
             assert(s->channels == 2);\
@@ -639,6 +640,9 @@ static int flac_decode_frame(AVCodecContext *avctx,
                buf_size - bytes_read, buf_size);
     }
 
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return bytes_read;
 }
 
@@ -662,5 +666,6 @@ AVCodec ff_flac_decoder = {
     .init           = flac_decode_init,
     .close          = flac_decode_close,
     .decode         = flac_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name= NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
 };
diff --git a/libavcodec/g722.h b/libavcodec/g722.h
index 5edb6c811901aa59bc1277a7a820b9606c49c7c2..69e7a86e25598bc2852fff76be8a186500cdf2ca 100644
--- a/libavcodec/g722.h
+++ b/libavcodec/g722.h
@@ -26,10 +26,12 @@
 #define AVCODEC_G722_H
 
 #include <stdint.h>
+#include "avcodec.h"
 
 #define PREV_SAMPLES_BUF_SIZE 1024
 
 typedef struct {
+    AVFrame frame;
     int16_t prev_samples[PREV_SAMPLES_BUF_SIZE]; ///< memory of past decoded samples
     int     prev_samples_pos;        ///< the number of values in prev_samples
 
diff --git a/libavcodec/g722dec.c b/libavcodec/g722dec.c
index 2be47159a4f57867891d665c6d1639471c6e5960..652a1aa4ae6c2c89464edf5047f9a16223aaf01c 100644
--- a/libavcodec/g722dec.c
+++ b/libavcodec/g722dec.c
@@ -66,6 +66,9 @@ static av_cold int g722_decode_init(AVCodecContext * avctx)
     c->band[1].scale_factor = 2;
     c->prev_samples_pos = 22;
 
+    avcodec_get_frame_defaults(&c->frame);
+    avctx->coded_frame = &c->frame;
+
     return 0;
 }
 
@@ -81,20 +84,22 @@ static const int16_t *low_inv_quants[3] = { ff_g722_low_inv_quant6,
                                             ff_g722_low_inv_quant4 };
 
 static int g722_decode_frame(AVCodecContext *avctx, void *data,
-                             int *data_size, AVPacket *avpkt)
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
     G722Context *c = avctx->priv_data;
-    int16_t *out_buf = data;
-    int j, out_len;
+    int16_t *out_buf;
+    int j, ret;
     const int skip = 8 - avctx->bits_per_coded_sample;
     const int16_t *quantizer_table = low_inv_quants[skip];
     GetBitContext gb;
 
-    out_len = avpkt->size * 2 * av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_len) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    c->frame.nb_samples = avpkt->size * 2;
+    if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    out_buf = (int16_t *)c->frame.data[0];
 
     init_get_bits(&gb, avpkt->data, avpkt->size * 8);
 
@@ -128,7 +133,10 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data,
             c->prev_samples_pos = 22;
         }
     }
-    *data_size = out_len;
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = c->frame;
+
     return avpkt->size;
 }
 
@@ -139,5 +147,6 @@ AVCodec ff_adpcm_g722_decoder = {
     .priv_data_size = sizeof(G722Context),
     .init           = g722_decode_init,
     .decode         = g722_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name      = NULL_IF_CONFIG_SMALL("G.722 ADPCM"),
 };
diff --git a/libavcodec/g726.c b/libavcodec/g726.c
index ae1b5a300165d22e43c5b4faa362e9b9e2b97ec9..8c02a392ccc96224068d32b5207f92417a4ba939 100644
--- a/libavcodec/g726.c
+++ b/libavcodec/g726.c
@@ -75,6 +75,7 @@ typedef struct G726Tables {
 
 typedef struct G726Context {
     AVClass *class;
+    AVFrame frame;
     G726Tables tbls;    /**< static tables needed for computation */
 
     Float11 sr[2];      /**< prev. reconstructed samples */
@@ -427,26 +428,31 @@ static av_cold int g726_decode_init(AVCodecContext *avctx)
 
     avctx->sample_fmt = AV_SAMPLE_FMT_S16;
 
+    avcodec_get_frame_defaults(&c->frame);
+    avctx->coded_frame = &c->frame;
+
     return 0;
 }
 
-static int g726_decode_frame(AVCodecContext *avctx,
-                             void *data, int *data_size,
-                             AVPacket *avpkt)
+static int g726_decode_frame(AVCodecContext *avctx, void *data,
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     G726Context *c = avctx->priv_data;
-    int16_t *samples = data;
+    int16_t *samples;
     GetBitContext gb;
-    int out_samples, out_size;
+    int out_samples, ret;
 
     out_samples = buf_size * 8 / c->code_size;
-    out_size    = out_samples * av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+
+    /* get output buffer */
+    c->frame.nb_samples = out_samples;
+    if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    samples = (int16_t *)c->frame.data[0];
 
     init_get_bits(&gb, buf, buf_size * 8);
 
@@ -456,7 +462,9 @@ static int g726_decode_frame(AVCodecContext *avctx,
     if (get_bits_left(&gb) > 0)
         av_log(avctx, AV_LOG_ERROR, "Frame invalidly split, missing parser?\n");
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = c->frame;
+
     return buf_size;
 }
 
@@ -474,6 +482,7 @@ AVCodec ff_adpcm_g726_decoder = {
     .init           = g726_decode_init,
     .decode         = g726_decode_frame,
     .flush          = g726_decode_flush,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"),
 };
 #endif
diff --git a/libavcodec/gsmdec.c b/libavcodec/gsmdec.c
index 2d1dee8384c8fee1712e737157defa0885a426ec..eec515b41fc92df6d3599ae8e0afc421917614ea 100644
--- a/libavcodec/gsmdec.c
+++ b/libavcodec/gsmdec.c
@@ -32,6 +32,8 @@
 
 static av_cold int gsm_init(AVCodecContext *avctx)
 {
+    GSMContext *s = avctx->priv_data;
+
     avctx->channels = 1;
     if (!avctx->sample_rate)
         avctx->sample_rate = 8000;
@@ -47,30 +49,35 @@ static av_cold int gsm_init(AVCodecContext *avctx)
         avctx->block_align = GSM_MS_BLOCK_SIZE;
     }
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
 static int gsm_decode_frame(AVCodecContext *avctx, void *data,
-                            int *data_size, AVPacket *avpkt)
+                            int *got_frame_ptr, AVPacket *avpkt)
 {
+    GSMContext *s = avctx->priv_data;
     int res;
     GetBitContext gb;
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
-    int16_t *samples = data;
-    int frame_bytes = avctx->frame_size *
-                      av_get_bytes_per_sample(avctx->sample_fmt);
-
-    if (*data_size < frame_bytes) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
-    }
+    int16_t *samples;
 
     if (buf_size < avctx->block_align) {
         av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
         return AVERROR_INVALIDDATA;
     }
 
+    /* get output buffer */
+    s->frame.nb_samples = avctx->frame_size;
+    if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return res;
+    }
+    samples = (int16_t *)s->frame.data[0];
+
     switch (avctx->codec_id) {
     case CODEC_ID_GSM:
         init_get_bits(&gb, buf, buf_size * 8);
@@ -85,7 +92,10 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data,
         if (res < 0)
             return res;
     }
-    *data_size = frame_bytes;
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return avctx->block_align;
 }
 
@@ -103,6 +113,7 @@ AVCodec ff_gsm_decoder = {
     .init           = gsm_init,
     .decode         = gsm_decode_frame,
     .flush          = gsm_flush,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("GSM"),
 };
 
@@ -114,5 +125,6 @@ AVCodec ff_gsm_ms_decoder = {
     .init           = gsm_init,
     .decode         = gsm_decode_frame,
     .flush          = gsm_flush,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("GSM Microsoft variant"),
 };
diff --git a/libavcodec/gsmdec_data.h b/libavcodec/gsmdec_data.h
index bb4f159c60ef74f129c56604619aaf90786e0031..4e2c7e6846f722e3dce1e8d12ef88072a01d1a0c 100644
--- a/libavcodec/gsmdec_data.h
+++ b/libavcodec/gsmdec_data.h
@@ -23,6 +23,7 @@
 #define AVCODEC_GSMDEC_DATA
 
 #include <stdint.h>
+#include "avcodec.h"
 
 // input and output sizes in byte
 #define GSM_BLOCK_SIZE    33
@@ -30,6 +31,7 @@
 #define GSM_FRAME_SIZE   160
 
 typedef struct {
+    AVFrame frame;
     // Contains first 120 elements from the previous frame
     // (used by long_term_synth according to the "lag"),
     // then in the following 160 elements the current
diff --git a/libavcodec/huffyuv.c b/libavcodec/huffyuv.c
index 1ac191374511c96f3809f855416705c2cfc8c9dd..574daacc0b39b2bbda242f7c8c9ac79523e73843 100644
--- a/libavcodec/huffyuv.c
+++ b/libavcodec/huffyuv.c
@@ -956,8 +956,8 @@ static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes){
 
 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
 static void draw_slice(HYuvContext *s, int y){
-    int h, cy;
-    int offset[4];
+    int h, cy, i;
+    int offset[AV_NUM_DATA_POINTERS];
 
     if(s->avctx->draw_horiz_band==NULL)
         return;
@@ -974,7 +974,8 @@ static void draw_slice(HYuvContext *s, int y){
     offset[0] = s->picture.linesize[0]*y;
     offset[1] = s->picture.linesize[1]*cy;
     offset[2] = s->picture.linesize[2]*cy;
-    offset[3] = 0;
+    for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
+        offset[i] = 0;
     emms_c();
 
     s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
diff --git a/libavcodec/imc.c b/libavcodec/imc.c
index bb6f0958757188a415f54ae72ae33a8e3ff78c0d..3919797038c9eeed0f7817b55fab833cce287a4e 100644
--- a/libavcodec/imc.c
+++ b/libavcodec/imc.c
@@ -51,6 +51,8 @@
 #define COEFFS 256
 
 typedef struct {
+    AVFrame frame;
+
     float old_floor[BANDS];
     float flcoeffs1[BANDS];
     float flcoeffs2[BANDS];
@@ -168,6 +170,10 @@ static av_cold int imc_decode_init(AVCodecContext * avctx)
     dsputil_init(&q->dsp, avctx);
     avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
     avctx->channel_layout = AV_CH_LAYOUT_MONO;
+
+    avcodec_get_frame_defaults(&q->frame);
+    avctx->coded_frame = &q->frame;
+
     return 0;
 }
 
@@ -649,9 +655,8 @@ static int imc_get_coeffs (IMCContext* q) {
     return 0;
 }
 
-static int imc_decode_frame(AVCodecContext * avctx,
-                            void *data, int *data_size,
-                            AVPacket *avpkt)
+static int imc_decode_frame(AVCodecContext * avctx, void *data,
+                            int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
@@ -659,7 +664,7 @@ static int imc_decode_frame(AVCodecContext * avctx,
     IMCContext *q = avctx->priv_data;
 
     int stream_format_code;
-    int imc_hdr, i, j, out_size, ret;
+    int imc_hdr, i, j, ret;
     int flag;
     int bits, summer;
     int counter, bitscount;
@@ -670,15 +675,16 @@ static int imc_decode_frame(AVCodecContext * avctx,
         return AVERROR_INVALIDDATA;
     }
 
-    out_size = COEFFS * av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    q->frame.nb_samples = COEFFS;
+    if ((ret = avctx->get_buffer(avctx, &q->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    q->out_samples = (float *)q->frame.data[0];
 
     q->dsp.bswap16_buf(buf16, (const uint16_t*)buf, IMC_BLOCK_SIZE / 2);
 
-    q->out_samples = data;
     init_get_bits(&q->gb, (const uint8_t*)buf16, IMC_BLOCK_SIZE * 8);
 
     /* Check the frame header */
@@ -823,7 +829,8 @@ static int imc_decode_frame(AVCodecContext * avctx,
 
     imc_imdct256(q);
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = q->frame;
 
     return IMC_BLOCK_SIZE;
 }
@@ -834,6 +841,7 @@ static av_cold int imc_decode_close(AVCodecContext * avctx)
     IMCContext *q = avctx->priv_data;
 
     ff_fft_end(&q->fft);
+
     return 0;
 }
 
@@ -846,5 +854,6 @@ AVCodec ff_imc_decoder = {
     .init = imc_decode_init,
     .close = imc_decode_close,
     .decode = imc_decode_frame,
+    .capabilities = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"),
 };
diff --git a/libavcodec/internal.h b/libavcodec/internal.h
index 694d344da1c9c4aac464a6be008d692282ae5637..352be678541484a93cf6713b827f1e1bbfa0ed53 100644
--- a/libavcodec/internal.h
+++ b/libavcodec/internal.h
@@ -31,12 +31,15 @@
 
 typedef struct InternalBuffer {
     int last_pic_num;
-    uint8_t *base[4];
-    uint8_t *data[4];
-    int linesize[4];
+    uint8_t *base[AV_NUM_DATA_POINTERS];
+    uint8_t *data[AV_NUM_DATA_POINTERS];
+    int linesize[AV_NUM_DATA_POINTERS];
     int width;
     int height;
     enum PixelFormat pix_fmt;
+    uint8_t **extended_data;
+    int audio_data_size;
+    int nb_channels;
 } InternalBuffer;
 
 typedef struct AVCodecInternal {
diff --git a/libavcodec/libgsm.c b/libavcodec/libgsm.c
index 95919a4c43feddd2ba55254696cfffbc1f478d91..9ba9da9e3f0b46f7a1edfdbfa9f0caaea3c8ba67 100644
--- a/libavcodec/libgsm.c
+++ b/libavcodec/libgsm.c
@@ -124,7 +124,14 @@ AVCodec ff_libgsm_ms_encoder = {
     .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"),
 };
 
+typedef struct LibGSMDecodeContext {
+    AVFrame frame;
+    struct gsm_state *state;
+} LibGSMDecodeContext;
+
 static av_cold int libgsm_decode_init(AVCodecContext *avctx) {
+    LibGSMDecodeContext *s = avctx->priv_data;
+
     if (avctx->channels > 1) {
         av_log(avctx, AV_LOG_ERROR, "Mono required for GSM, got %d channels\n",
                avctx->channels);
@@ -139,7 +146,7 @@ static av_cold int libgsm_decode_init(AVCodecContext *avctx) {
 
     avctx->sample_fmt = AV_SAMPLE_FMT_S16;
 
-    avctx->priv_data = gsm_create();
+    s->state = gsm_create();
 
     switch(avctx->codec_id) {
     case CODEC_ID_GSM:
@@ -154,59 +161,72 @@ static av_cold int libgsm_decode_init(AVCodecContext *avctx) {
         }
     }
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
 static av_cold int libgsm_decode_close(AVCodecContext *avctx) {
-    gsm_destroy(avctx->priv_data);
-    avctx->priv_data = NULL;
+    LibGSMDecodeContext *s = avctx->priv_data;
+
+    gsm_destroy(s->state);
+    s->state = NULL;
     return 0;
 }
 
-static int libgsm_decode_frame(AVCodecContext *avctx,
-                               void *data, int *data_size,
-                               AVPacket *avpkt) {
+static int libgsm_decode_frame(AVCodecContext *avctx, void *data,
+                               int *got_frame_ptr, AVPacket *avpkt)
+{
     int i, ret;
-    struct gsm_state *s = avctx->priv_data;
+    LibGSMDecodeContext *s = avctx->priv_data;
     uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
-    int16_t *samples = data;
-    int out_size = avctx->frame_size * av_get_bytes_per_sample(avctx->sample_fmt);
-
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
-    }
+    int16_t *samples;
 
     if (buf_size < avctx->block_align) {
         av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
         return AVERROR_INVALIDDATA;
     }
 
+    /* get output buffer */
+    s->frame.nb_samples = avctx->frame_size;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
+    }
+    samples = (int16_t *)s->frame.data[0];
+
     for (i = 0; i < avctx->frame_size / GSM_FRAME_SIZE; i++) {
-        if ((ret = gsm_decode(s, buf, samples)) < 0)
+        if ((ret = gsm_decode(s->state, buf, samples)) < 0)
             return -1;
         buf     += GSM_BLOCK_SIZE;
         samples += GSM_FRAME_SIZE;
     }
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return avctx->block_align;
 }
 
 static void libgsm_flush(AVCodecContext *avctx) {
-    gsm_destroy(avctx->priv_data);
-    avctx->priv_data = gsm_create();
+    LibGSMDecodeContext *s = avctx->priv_data;
+
+    gsm_destroy(s->state);
+    s->state = gsm_create();
 }
 
 AVCodec ff_libgsm_decoder = {
     .name           = "libgsm",
     .type           = AVMEDIA_TYPE_AUDIO,
     .id             = CODEC_ID_GSM,
+    .priv_data_size = sizeof(LibGSMDecodeContext),
     .init           = libgsm_decode_init,
     .close          = libgsm_decode_close,
     .decode         = libgsm_decode_frame,
     .flush          = libgsm_flush,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM"),
 };
 
@@ -214,9 +234,11 @@ AVCodec ff_libgsm_ms_decoder = {
     .name           = "libgsm_ms",
     .type           = AVMEDIA_TYPE_AUDIO,
     .id             = CODEC_ID_GSM_MS,
+    .priv_data_size = sizeof(LibGSMDecodeContext),
     .init           = libgsm_decode_init,
     .close          = libgsm_decode_close,
     .decode         = libgsm_decode_frame,
     .flush          = libgsm_flush,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"),
 };
diff --git a/libavcodec/libopencore-amr.c b/libavcodec/libopencore-amr.c
index e456c407a40fe00d405c58869a358f5a534811d0..0de7130f197b6e01bfe21bbfc8748b246ed36448 100644
--- a/libavcodec/libopencore-amr.c
+++ b/libavcodec/libopencore-amr.c
@@ -79,6 +79,7 @@ static int get_bitrate_mode(int bitrate, void *log_ctx)
 
 typedef struct AMRContext {
     AVClass *av_class;
+    AVFrame frame;
     void *dec_state;
     void *enc_state;
     int   enc_bitrate;
@@ -112,6 +113,9 @@ static av_cold int amr_nb_decode_init(AVCodecContext *avctx)
         return AVERROR(ENOSYS);
     }
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
@@ -120,26 +124,28 @@ static av_cold int amr_nb_decode_close(AVCodecContext *avctx)
     AMRContext *s = avctx->priv_data;
 
     Decoder_Interface_exit(s->dec_state);
+
     return 0;
 }
 
 static int amr_nb_decode_frame(AVCodecContext *avctx, void *data,
-                               int *data_size, AVPacket *avpkt)
+                               int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
     AMRContext *s      = avctx->priv_data;
     static const uint8_t block_size[16] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0 };
     enum Mode dec_mode;
-    int packet_size, out_size;
+    int packet_size, ret;
 
     av_dlog(avctx, "amr_decode_frame buf=%p buf_size=%d frame_count=%d!!\n",
             buf, buf_size, avctx->frame_number);
 
-    out_size = 160 * av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    s->frame.nb_samples = 160;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
 
     dec_mode    = (buf[0] >> 3) & 0x000F;
@@ -154,8 +160,10 @@ static int amr_nb_decode_frame(AVCodecContext *avctx, void *data,
     av_dlog(avctx, "packet_size=%d buf= 0x%X %X %X %X\n",
               packet_size, buf[0], buf[1], buf[2], buf[3]);
     /* call decoder */
-    Decoder_Interface_Decode(s->dec_state, buf, data, 0);
-    *data_size = out_size;
+    Decoder_Interface_Decode(s->dec_state, buf, (short *)s->frame.data[0], 0);
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
 
     return packet_size;
 }
@@ -168,6 +176,7 @@ AVCodec ff_libopencore_amrnb_decoder = {
     .init           = amr_nb_decode_init,
     .close          = amr_nb_decode_close,
     .decode         = amr_nb_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Narrow-Band"),
 };
 
@@ -251,6 +260,7 @@ AVCodec ff_libopencore_amrnb_encoder = {
 #include <opencore-amrwb/if_rom.h>
 
 typedef struct AMRWBContext {
+    AVFrame frame;
     void  *state;
 } AMRWBContext;
 
@@ -267,23 +277,27 @@ static av_cold int amr_wb_decode_init(AVCodecContext *avctx)
         return AVERROR(ENOSYS);
     }
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
 static int amr_wb_decode_frame(AVCodecContext *avctx, void *data,
-                               int *data_size, AVPacket *avpkt)
+                               int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
     AMRWBContext *s    = avctx->priv_data;
-    int mode;
-    int packet_size, out_size;
+    int mode, ret;
+    int packet_size;
     static const uint8_t block_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1};
 
-    out_size = 320 * av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    s->frame.nb_samples = 320;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
 
     mode        = (buf[0] >> 3) & 0x000F;
@@ -295,8 +309,11 @@ static int amr_wb_decode_frame(AVCodecContext *avctx, void *data,
         return AVERROR_INVALIDDATA;
     }
 
-    D_IF_decode(s->state, buf, data, _good_frame);
-    *data_size = out_size;
+    D_IF_decode(s->state, buf, (short *)s->frame.data[0], _good_frame);
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return packet_size;
 }
 
@@ -316,6 +333,7 @@ AVCodec ff_libopencore_amrwb_decoder = {
     .init           = amr_wb_decode_init,
     .close          = amr_wb_decode_close,
     .decode         = amr_wb_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Wide-Band"),
 };
 
diff --git a/libavcodec/libspeexdec.c b/libavcodec/libspeexdec.c
index 91f190525df0ff07cd20854102a910a69899e296..fdc39532b77a7cdcdfb0398c9e330bbe7e24d073 100644
--- a/libavcodec/libspeexdec.c
+++ b/libavcodec/libspeexdec.c
@@ -25,6 +25,7 @@
 #include "avcodec.h"
 
 typedef struct {
+    AVFrame frame;
     SpeexBits bits;
     SpeexStereoState stereo;
     void *dec_state;
@@ -89,26 +90,29 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx)
         s->stereo = (SpeexStereoState)SPEEX_STEREO_STATE_INIT;
         speex_decoder_ctl(s->dec_state, SPEEX_SET_HANDLER, &callback);
     }
+
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
-static int libspeex_decode_frame(AVCodecContext *avctx,
-                                 void *data, int *data_size,
-                                 AVPacket *avpkt)
+static int libspeex_decode_frame(AVCodecContext *avctx, void *data,
+                                 int *got_frame_ptr, AVPacket *avpkt)
 {
     uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     LibSpeexContext *s = avctx->priv_data;
-    int16_t *output = data;
-    int out_size, ret, consumed = 0;
-
-    /* check output buffer size */
-    out_size = s->frame_size * avctx->channels *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    int16_t *output;
+    int ret, consumed = 0;
+
+    /* get output buffer */
+    s->frame.nb_samples = s->frame_size;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    output = (int16_t *)s->frame.data[0];
 
     /* if there is not enough data left for the smallest possible frame,
        reset the libspeex buffer using the current packet, otherwise ignore
@@ -116,7 +120,7 @@ static int libspeex_decode_frame(AVCodecContext *avctx,
     if (speex_bits_remaining(&s->bits) < 43) {
         /* check for flush packet */
         if (!buf || !buf_size) {
-            *data_size = 0;
+            *got_frame_ptr = 0;
             return buf_size;
         }
         /* set new buffer */
@@ -133,7 +137,9 @@ static int libspeex_decode_frame(AVCodecContext *avctx,
     if (avctx->channels == 2)
         speex_decode_stereo_int(output, s->frame_size, &s->stereo);
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return consumed;
 }
 
@@ -163,6 +169,6 @@ AVCodec ff_libspeex_decoder = {
     .close          = libspeex_decode_close,
     .decode         = libspeex_decode_frame,
     .flush          = libspeex_decode_flush,
-    .capabilities   = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY,
+    .capabilities   = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("libspeex Speex"),
 };
diff --git a/libavcodec/mace.c b/libavcodec/mace.c
index 980ebb90277d1ee5251afd74255ec9aae61631cf..ffa11ad80de7b9136c2b078dac287f611fb66b6a 100644
--- a/libavcodec/mace.c
+++ b/libavcodec/mace.c
@@ -153,6 +153,7 @@ typedef struct ChannelData {
 } ChannelData;
 
 typedef struct MACEContext {
+    AVFrame frame;
     ChannelData chd[2];
 } MACEContext;
 
@@ -228,30 +229,35 @@ static void chomp6(ChannelData *chd, int16_t *output, uint8_t val,
 
 static av_cold int mace_decode_init(AVCodecContext * avctx)
 {
+    MACEContext *ctx = avctx->priv_data;
+
     if (avctx->channels > 2)
         return -1;
     avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+
+    avcodec_get_frame_defaults(&ctx->frame);
+    avctx->coded_frame = &ctx->frame;
+
     return 0;
 }
 
-static int mace_decode_frame(AVCodecContext *avctx,
-                              void *data, int *data_size,
-                              AVPacket *avpkt)
+static int mace_decode_frame(AVCodecContext *avctx, void *data,
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
-    int16_t *samples = data;
+    int16_t *samples;
     MACEContext *ctx = avctx->priv_data;
-    int i, j, k, l;
-    int out_size;
+    int i, j, k, l, ret;
     int is_mace3 = (avctx->codec_id == CODEC_ID_MACE3);
 
-    out_size = 3 * (buf_size << (1 - is_mace3)) *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    ctx->frame.nb_samples = 3 * (buf_size << (1 - is_mace3)) / avctx->channels;
+    if ((ret = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    samples = (int16_t *)ctx->frame.data[0];
 
     for(i = 0; i < avctx->channels; i++) {
         int16_t *output = samples + i;
@@ -277,7 +283,8 @@ static int mace_decode_frame(AVCodecContext *avctx,
             }
     }
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = ctx->frame;
 
     return buf_size;
 }
@@ -289,6 +296,7 @@ AVCodec ff_mace3_decoder = {
     .priv_data_size = sizeof(MACEContext),
     .init           = mace_decode_init,
     .decode         = mace_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 3:1"),
 };
 
@@ -299,6 +307,7 @@ AVCodec ff_mace6_decoder = {
     .priv_data_size = sizeof(MACEContext),
     .init           = mace_decode_init,
     .decode         = mace_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 6:1"),
 };
 
diff --git a/libavcodec/mlpdec.c b/libavcodec/mlpdec.c
index 0396423b92bd8d8a53c09b1ba9e3242690c1f1ef..4b439ddb67f821b2b21725c70da86adc53cf78bd 100644
--- a/libavcodec/mlpdec.c
+++ b/libavcodec/mlpdec.c
@@ -120,6 +120,7 @@ typedef struct SubStream {
 
 typedef struct MLPDecodeContext {
     AVCodecContext *avctx;
+    AVFrame     frame;
 
     //! Current access unit being read has a major sync.
     int         is_major_sync_unit;
@@ -242,6 +243,9 @@ static av_cold int mlp_decode_init(AVCodecContext *avctx)
         m->substream[substr].lossless_check_data = 0xffffffff;
     dsputil_init(&m->dsp, avctx);
 
+    avcodec_get_frame_defaults(&m->frame);
+    avctx->coded_frame = &m->frame;
+
     return 0;
 }
 
@@ -946,13 +950,14 @@ static void rematrix_channels(MLPDecodeContext *m, unsigned int substr)
 /** Write the audio data into the output buffer. */
 
 static int output_data(MLPDecodeContext *m, unsigned int substr,
-                       uint8_t *data, unsigned int *data_size)
+                       void *data, int *got_frame_ptr)
 {
+    AVCodecContext *avctx = m->avctx;
     SubStream *s = &m->substream[substr];
     unsigned int i, out_ch = 0;
-    int out_size;
-    int32_t *data_32 = (int32_t*) data;
-    int16_t *data_16 = (int16_t*) data;
+    int32_t *data_32;
+    int16_t *data_16;
+    int ret;
     int is32 = (m->avctx->sample_fmt == AV_SAMPLE_FMT_S32);
 
     if (m->avctx->channels != s->max_matrix_channel + 1) {
@@ -960,11 +965,14 @@ static int output_data(MLPDecodeContext *m, unsigned int substr,
         return AVERROR_INVALIDDATA;
     }
 
-    out_size = s->blockpos * m->avctx->channels *
-               av_get_bytes_per_sample(m->avctx->sample_fmt);
-
-    if (*data_size < out_size)
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    m->frame.nb_samples = s->blockpos;
+    if ((ret = avctx->get_buffer(avctx, &m->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
+    }
+    data_32 = (int32_t *)m->frame.data[0];
+    data_16 = (int16_t *)m->frame.data[0];
 
     for (i = 0; i < s->blockpos; i++) {
         for (out_ch = 0; out_ch <= s->max_matrix_channel; out_ch++) {
@@ -977,7 +985,8 @@ static int output_data(MLPDecodeContext *m, unsigned int substr,
         }
     }
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = m->frame;
 
     return 0;
 }
@@ -986,8 +995,8 @@ static int output_data(MLPDecodeContext *m, unsigned int substr,
  *  @return negative on error, 0 if not enough data is present in the input stream,
  *  otherwise the number of bytes consumed. */
 
-static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size,
-                            AVPacket *avpkt)
+static int read_access_unit(AVCodecContext *avctx, void* data,
+                            int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
@@ -1023,7 +1032,7 @@ static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size,
     if (!m->params_valid) {
         av_log(m->avctx, AV_LOG_WARNING,
                "Stream parameters not seen; skipping frame.\n");
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return length;
     }
 
@@ -1168,7 +1177,7 @@ next_substr:
 
     rematrix_channels(m, m->max_decoded_substream);
 
-    if ((ret = output_data(m, m->max_decoded_substream, data, data_size)) < 0)
+    if ((ret = output_data(m, m->max_decoded_substream, data, got_frame_ptr)) < 0)
         return ret;
 
     return length;
@@ -1189,6 +1198,7 @@ AVCodec ff_mlp_decoder = {
     .priv_data_size = sizeof(MLPDecodeContext),
     .init           = mlp_decode_init,
     .decode         = read_access_unit,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("MLP (Meridian Lossless Packing)"),
 };
 
@@ -1200,6 +1210,7 @@ AVCodec ff_truehd_decoder = {
     .priv_data_size = sizeof(MLPDecodeContext),
     .init           = mlp_decode_init,
     .decode         = read_access_unit,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("TrueHD"),
 };
 #endif /* CONFIG_TRUEHD_DECODER */
diff --git a/libavcodec/mpc.h b/libavcodec/mpc.h
index cd5769234cb78fb519e30775bec6a949619c054d..8b4deef689b8d62ba5ed6fdb23eb295cec32e8c1 100644
--- a/libavcodec/mpc.h
+++ b/libavcodec/mpc.h
@@ -50,6 +50,7 @@ typedef struct {
 }Band;
 
 typedef struct {
+    AVFrame frame;
     DSPContext dsp;
     MPADSPContext mpadsp;
     GetBitContext gb;
diff --git a/libavcodec/mpc7.c b/libavcodec/mpc7.c
index 1e76ddf56ab43e4ae040642e45fc0f9085fb9ed6..739325097142b78d6c91e6be954575f92de86e07 100644
--- a/libavcodec/mpc7.c
+++ b/libavcodec/mpc7.c
@@ -136,6 +136,10 @@ static av_cold int mpc7_decode_init(AVCodecContext * avctx)
         }
     }
     vlc_initialized = 1;
+
+    avcodec_get_frame_defaults(&c->frame);
+    avctx->coded_frame = &c->frame;
+
     return 0;
 }
 
@@ -192,9 +196,8 @@ static int get_scale_idx(GetBitContext *gb, int ref)
     return ref + t;
 }
 
-static int mpc7_decode_frame(AVCodecContext * avctx,
-                            void *data, int *data_size,
-                            AVPacket *avpkt)
+static int mpc7_decode_frame(AVCodecContext * avctx, void *data,
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
@@ -204,7 +207,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
     int i, ch;
     int mb = -1;
     Band *bands = c->bands;
-    int off, out_size;
+    int off, ret;
     int bits_used, bits_avail;
 
     memset(bands, 0, sizeof(*bands) * (c->maxbands + 1));
@@ -213,10 +216,11 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
         return AVERROR(EINVAL);
     }
 
-    out_size = (buf[1] ? c->lastframelen : MPC_FRAME_SIZE) * 4;
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    c->frame.nb_samples = buf[1] ? c->lastframelen : MPC_FRAME_SIZE;
+    if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
 
     bits = av_malloc(((buf_size - 1) & ~3) + FF_INPUT_BUFFER_PADDING_SIZE);
@@ -276,7 +280,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
         for(ch = 0; ch < 2; ch++)
             idx_to_quant(c, &gb, bands[i].res[ch], c->Q[ch] + off);
 
-    ff_mpc_dequantize_and_synth(c, mb, data, 2);
+    ff_mpc_dequantize_and_synth(c, mb, c->frame.data[0], 2);
 
     av_free(bits);
 
@@ -288,10 +292,12 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
     }
     if(c->frames_to_skip){
         c->frames_to_skip--;
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return buf_size;
     }
-    *data_size = out_size;
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = c->frame;
 
     return buf_size;
 }
@@ -312,5 +318,6 @@ AVCodec ff_mpc7_decoder = {
     .init           = mpc7_decode_init,
     .decode         = mpc7_decode_frame,
     .flush = mpc7_decode_flush,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("Musepack SV7"),
 };
diff --git a/libavcodec/mpc8.c b/libavcodec/mpc8.c
index 2f6bde3231f74aa8686dce14097bfc16a97b6293..a4750ad9619ff4293a7208ff4709a926ade813be 100644
--- a/libavcodec/mpc8.c
+++ b/libavcodec/mpc8.c
@@ -230,12 +230,15 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
                  &mpc8_q8_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
     }
     vlc_initialized = 1;
+
+    avcodec_get_frame_defaults(&c->frame);
+    avctx->coded_frame = &c->frame;
+
     return 0;
 }
 
-static int mpc8_decode_frame(AVCodecContext * avctx,
-                            void *data, int *data_size,
-                            AVPacket *avpkt)
+static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
@@ -243,14 +246,15 @@ static int mpc8_decode_frame(AVCodecContext * avctx,
     GetBitContext gb2, *gb = &gb2;
     int i, j, k, ch, cnt, res, t;
     Band *bands = c->bands;
-    int off, out_size;
+    int off;
     int maxband, keyframe;
     int last[2];
 
-    out_size = MPC_FRAME_SIZE * 2 * avctx->channels;
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    c->frame.nb_samples = MPC_FRAME_SIZE;
+    if ((res = avctx->get_buffer(avctx, &c->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return res;
     }
 
     keyframe = c->cur_frame == 0;
@@ -403,14 +407,16 @@ static int mpc8_decode_frame(AVCodecContext * avctx,
         }
     }
 
-    ff_mpc_dequantize_and_synth(c, maxband, data, avctx->channels);
+    ff_mpc_dequantize_and_synth(c, maxband, c->frame.data[0], avctx->channels);
 
     c->cur_frame++;
 
     c->last_bits_used = get_bits_count(gb);
     if(c->cur_frame >= c->frames)
         c->cur_frame = 0;
-    *data_size =  out_size;
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = c->frame;
 
     return c->cur_frame ? c->last_bits_used >> 3 : buf_size;
 }
@@ -422,5 +428,6 @@ AVCodec ff_mpc8_decoder = {
     .priv_data_size = sizeof(MPCContext),
     .init           = mpc8_decode_init,
     .decode         = mpc8_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"),
 };
diff --git a/libavcodec/mpeg4audio.c b/libavcodec/mpeg4audio.c
index ac546ba13762cf244cee8ab9d21e740018498063..67445935c4aa3e849c56ffb0cc1328e38ca29ae1 100644
--- a/libavcodec/mpeg4audio.c
+++ b/libavcodec/mpeg4audio.c
@@ -76,12 +76,13 @@ static inline int get_sample_rate(GetBitContext *gb, int *index)
         avpriv_mpeg4audio_sample_rates[*index];
 }
 
-int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int buf_size)
+int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf,
+                                 int bit_size, int sync_extension)
 {
     GetBitContext gb;
     int specific_config_bitindex;
 
-    init_get_bits(&gb, buf, buf_size*8);
+    init_get_bits(&gb, buf, bit_size);
     c->object_type = get_object_type(&gb);
     c->sample_rate = get_sample_rate(&gb, &c->sampling_index);
     c->chan_config = get_bits(&gb, 4);
@@ -117,7 +118,7 @@ int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int bu
             return -1;
     }
 
-    if (c->ext_object_type != AOT_SBR) {
+    if (c->ext_object_type != AOT_SBR && sync_extension) {
         while (get_bits_left(&gb) > 15) {
             if (show_bits(&gb, 11) == 0x2b7) { // sync extension
                 get_bits(&gb, 11);
diff --git a/libavcodec/mpeg4audio.h b/libavcodec/mpeg4audio.h
index 3d1f7e0c4c50d81d1fe954f361f7d152da6e7ee6..21000a9acc55cd3b235a96abd296c25a6a46cc03 100644
--- a/libavcodec/mpeg4audio.h
+++ b/libavcodec/mpeg4audio.h
@@ -42,14 +42,17 @@ typedef struct {
 
 extern const int avpriv_mpeg4audio_sample_rates[16];
 extern const uint8_t ff_mpeg4audio_channels[8];
+
 /**
  * Parse MPEG-4 systems extradata to retrieve audio configuration.
  * @param[in] c        MPEG4AudioConfig structure to fill.
  * @param[in] buf      Extradata from container.
- * @param[in] buf_size Extradata size.
+ * @param[in] bit_size Extradata size in bits.
+ * @param[in] sync_extension look for a sync extension after config if true.
  * @return On error -1 is returned, on success AudioSpecificConfig bit index in extradata.
  */
-int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int buf_size);
+int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf,
+                                 int bit_size, int sync_extension);
 
 enum AudioObjectType {
     AOT_NULL,
diff --git a/libavcodec/mpegaudiodec.c b/libavcodec/mpegaudiodec.c
index 7dded97cf448dcd7644824afd9d272b6c32003d7..5cc3c74339d7d4537612bd907f15f9900da5f75e 100644
--- a/libavcodec/mpegaudiodec.c
+++ b/libavcodec/mpegaudiodec.c
@@ -79,6 +79,7 @@ typedef struct MPADecodeContext {
     int err_recognition;
     AVCodecContext* avctx;
     MPADSPContext mpadsp;
+    AVFrame frame;
 } MPADecodeContext;
 
 #if CONFIG_FLOAT
@@ -479,6 +480,10 @@ static av_cold int decode_init(AVCodecContext * avctx)
 
     if (avctx->codec_id == CODEC_ID_MP3ADU)
         s->adu_mode = 1;
+
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
@@ -1581,7 +1586,7 @@ static int mp_decode_layer3(MPADecodeContext *s)
 static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples,
                            const uint8_t *buf, int buf_size)
 {
-    int i, nb_frames, ch;
+    int i, nb_frames, ch, ret;
     OUT_INT *samples_ptr;
 
     init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8);
@@ -1629,8 +1634,16 @@ static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples,
         assert(i <= buf_size - HEADER_SIZE && i >= 0);
         memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
         s->last_buf_size += i;
+    }
 
-        break;
+    /* get output buffer */
+    if (!samples) {
+        s->frame.nb_samples = s->avctx->frame_size;
+        if ((ret = s->avctx->get_buffer(s->avctx, &s->frame)) < 0) {
+            av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+            return ret;
+        }
+        samples = (OUT_INT *)s->frame.data[0];
     }
 
     /* apply the synthesis filter */
@@ -1650,7 +1663,7 @@ static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples,
     return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
 }
 
-static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
+static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
                         AVPacket *avpkt)
 {
     const uint8_t *buf  = avpkt->data;
@@ -1658,7 +1671,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
     MPADecodeContext *s = avctx->priv_data;
     uint32_t header;
     int out_size;
-    OUT_INT *out_samples = data;
 
     if (buf_size < HEADER_SIZE)
         return AVERROR_INVALIDDATA;
@@ -1681,10 +1693,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
         avctx->bit_rate = s->bit_rate;
     avctx->sub_id = s->layer;
 
-    if (*data_size < avctx->frame_size * avctx->channels * sizeof(OUT_INT))
-        return AVERROR(EINVAL);
-    *data_size = 0;
-
     if (s->frame_size <= 0 || s->frame_size > buf_size) {
         av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
         return AVERROR_INVALIDDATA;
@@ -1693,9 +1701,10 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
         buf_size= s->frame_size;
     }
 
-    out_size = mp_decode_frame(s, out_samples, buf, buf_size);
+    out_size = mp_decode_frame(s, NULL, buf, buf_size);
     if (out_size >= 0) {
-        *data_size         = out_size;
+        *got_frame_ptr   = 1;
+        *(AVFrame *)data = s->frame;
         avctx->sample_rate = s->sample_rate;
         //FIXME maybe move the other codec info stuff from above here too
     } else {
@@ -1704,6 +1713,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
            If there is more data in the packet, just consume the bad frame
            instead of returning an error, which would discard the whole
            packet. */
+        *got_frame_ptr = 0;
         if (buf_size == avpkt->size)
             return out_size;
     }
@@ -1719,15 +1729,14 @@ static void flush(AVCodecContext *avctx)
 }
 
 #if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER
-static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size,
-                            AVPacket *avpkt)
+static int decode_frame_adu(AVCodecContext *avctx, void *data,
+                            int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf  = avpkt->data;
     int buf_size        = avpkt->size;
     MPADecodeContext *s = avctx->priv_data;
     uint32_t header;
     int len, out_size;
-    OUT_INT *out_samples = data;
 
     len = buf_size;
 
@@ -1757,9 +1766,6 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size,
         avctx->bit_rate = s->bit_rate;
     avctx->sub_id = s->layer;
 
-    if (*data_size < avctx->frame_size * avctx->channels * sizeof(OUT_INT))
-        return AVERROR(EINVAL);
-
     s->frame_size = len;
 
 #if FF_API_PARSE_FRAME
@@ -1767,9 +1773,11 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size,
         out_size = buf_size;
     else
 #endif
-    out_size = mp_decode_frame(s, out_samples, buf, buf_size);
+    out_size = mp_decode_frame(s, NULL, buf, buf_size);
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
 
-    *data_size = out_size;
     return buf_size;
 }
 #endif /* CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER */
@@ -1780,6 +1788,7 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size,
  * Context for MP3On4 decoder
  */
 typedef struct MP3On4DecodeContext {
+    AVFrame *frame;
     int frames;                     ///< number of mp3 frames per block (number of mp3 decoder instances)
     int syncword;                   ///< syncword patch
     const uint8_t *coff;            ///< channel offsets in output buffer
@@ -1843,7 +1852,8 @@ static int decode_init_mp3on4(AVCodecContext * avctx)
         return AVERROR_INVALIDDATA;
     }
 
-    avpriv_mpeg4audio_get_config(&cfg, avctx->extradata, avctx->extradata_size);
+    avpriv_mpeg4audio_get_config(&cfg, avctx->extradata,
+                                 avctx->extradata_size * 8, 1);
     if (!cfg.chan_config || cfg.chan_config > 7) {
         av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n");
         return AVERROR_INVALIDDATA;
@@ -1870,6 +1880,7 @@ static int decode_init_mp3on4(AVCodecContext * avctx)
     // Put decoder context in place to make init_decode() happy
     avctx->priv_data = s->mp3decctx[0];
     decode_init(avctx);
+    s->frame = avctx->coded_frame;
     // Restore mp3on4 context pointer
     avctx->priv_data = s;
     s->mp3decctx[0]->adu_mode = 1; // Set adu mode
@@ -1914,9 +1925,8 @@ static void flush_mp3on4(AVCodecContext *avctx)
 }
 
 
-static int decode_frame_mp3on4(AVCodecContext * avctx,
-                        void *data, int *data_size,
-                        AVPacket *avpkt)
+static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
+                               int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf     = avpkt->data;
     int buf_size           = avpkt->size;
@@ -1924,14 +1934,17 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
     MPADecodeContext *m;
     int fsize, len = buf_size, out_size = 0;
     uint32_t header;
-    OUT_INT *out_samples = data;
+    OUT_INT *out_samples;
     OUT_INT *outptr, *bp;
-    int fr, j, n, ch;
+    int fr, j, n, ch, ret;
 
-    if (*data_size < MPA_FRAME_SIZE * avctx->channels * sizeof(OUT_INT)) {
-        av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    s->frame->nb_samples = MPA_FRAME_SIZE;
+    if ((ret = avctx->get_buffer(avctx, s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    out_samples = (OUT_INT *)s->frame->data[0];
 
     // Discard too short frames
     if (buf_size < HEADER_SIZE)
@@ -1990,7 +2003,10 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
     /* update codec info */
     avctx->sample_rate = s->mp3decctx[0]->sample_rate;
 
-    *data_size = out_size;
+    s->frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT));
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = *s->frame;
+
     return buf_size;
 }
 #endif /* CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER */
@@ -2005,7 +2021,9 @@ AVCodec ff_mp1_decoder = {
     .init           = decode_init,
     .decode         = decode_frame,
 #if FF_API_PARSE_FRAME
-    .capabilities   = CODEC_CAP_PARSE_ONLY,
+    .capabilities   = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+    .capabilities   = CODEC_CAP_DR1,
 #endif
     .flush          = flush,
     .long_name      = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
@@ -2020,7 +2038,9 @@ AVCodec ff_mp2_decoder = {
     .init           = decode_init,
     .decode         = decode_frame,
 #if FF_API_PARSE_FRAME
-    .capabilities   = CODEC_CAP_PARSE_ONLY,
+    .capabilities   = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+    .capabilities   = CODEC_CAP_DR1,
 #endif
     .flush          = flush,
     .long_name      = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
@@ -2035,7 +2055,9 @@ AVCodec ff_mp3_decoder = {
     .init           = decode_init,
     .decode         = decode_frame,
 #if FF_API_PARSE_FRAME
-    .capabilities   = CODEC_CAP_PARSE_ONLY,
+    .capabilities   = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+    .capabilities   = CODEC_CAP_DR1,
 #endif
     .flush          = flush,
     .long_name      = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
@@ -2050,7 +2072,9 @@ AVCodec ff_mp3adu_decoder = {
     .init           = decode_init,
     .decode         = decode_frame_adu,
 #if FF_API_PARSE_FRAME
-    .capabilities   = CODEC_CAP_PARSE_ONLY,
+    .capabilities   = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+    .capabilities   = CODEC_CAP_DR1,
 #endif
     .flush          = flush,
     .long_name      = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
@@ -2065,6 +2089,7 @@ AVCodec ff_mp3on4_decoder = {
     .init           = decode_init_mp3on4,
     .close          = decode_close_mp3on4,
     .decode         = decode_frame_mp3on4,
+    .capabilities   = CODEC_CAP_DR1,
     .flush          = flush_mp3on4,
     .long_name      = NULL_IF_CONFIG_SMALL("MP3onMP4"),
 };
diff --git a/libavcodec/mpegaudiodec_float.c b/libavcodec/mpegaudiodec_float.c
index 4482168a3e12737199f90b114a5de57fe60531c0..7b6dcd96a1848aacac8f9ccbe4f7a7891829cb71 100644
--- a/libavcodec/mpegaudiodec_float.c
+++ b/libavcodec/mpegaudiodec_float.c
@@ -31,7 +31,9 @@ AVCodec ff_mp1float_decoder = {
     .init           = decode_init,
     .decode         = decode_frame,
 #if FF_API_PARSE_FRAME
-    .capabilities   = CODEC_CAP_PARSE_ONLY,
+    .capabilities   = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+    .capabilities   = CODEC_CAP_DR1,
 #endif
     .flush          = flush,
     .long_name      = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
@@ -46,7 +48,9 @@ AVCodec ff_mp2float_decoder = {
     .init           = decode_init,
     .decode         = decode_frame,
 #if FF_API_PARSE_FRAME
-    .capabilities   = CODEC_CAP_PARSE_ONLY,
+    .capabilities   = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+    .capabilities   = CODEC_CAP_DR1,
 #endif
     .flush          = flush,
     .long_name      = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
@@ -61,7 +65,9 @@ AVCodec ff_mp3float_decoder = {
     .init           = decode_init,
     .decode         = decode_frame,
 #if FF_API_PARSE_FRAME
-    .capabilities   = CODEC_CAP_PARSE_ONLY,
+    .capabilities   = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+    .capabilities   = CODEC_CAP_DR1,
 #endif
     .flush          = flush,
     .long_name      = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
@@ -76,7 +82,9 @@ AVCodec ff_mp3adufloat_decoder = {
     .init           = decode_init,
     .decode         = decode_frame_adu,
 #if FF_API_PARSE_FRAME
-    .capabilities   = CODEC_CAP_PARSE_ONLY,
+    .capabilities   = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+    .capabilities   = CODEC_CAP_DR1,
 #endif
     .flush          = flush,
     .long_name      = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
@@ -91,6 +99,7 @@ AVCodec ff_mp3on4float_decoder = {
     .init           = decode_init_mp3on4,
     .close          = decode_close_mp3on4,
     .decode         = decode_frame_mp3on4,
+    .capabilities   = CODEC_CAP_DR1,
     .flush          = flush_mp3on4,
     .long_name      = NULL_IF_CONFIG_SMALL("MP3onMP4"),
 };
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 6893a3a8cd08d1507507ce5e309a2ebe515d97ef..1d3bb0cdf28384da1d84794f2e0a7946bbc3e1e3 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -2351,7 +2351,8 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
 
     if (s->avctx->draw_horiz_band) {
         AVFrame *src;
-        int offset[4];
+        int offset[AV_NUM_DATA_POINTERS];
+        int i;
 
         if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
             src= (AVFrame*)s->current_picture_ptr;
@@ -2361,15 +2362,14 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
             return;
 
         if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
-            offset[0]=
-            offset[1]=
-            offset[2]=
-            offset[3]= 0;
+            for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
+                offset[i] = 0;
         }else{
             offset[0]= y * s->linesize;
             offset[1]=
             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
-            offset[3]= 0;
+            for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
+                offset[i] = 0;
         }
 
         emms_c();
diff --git a/libavcodec/nellymoserdec.c b/libavcodec/nellymoserdec.c
index cf73121ef880395b075a37f68eb5dcb2fd8831ff..3386b8241e094495f465e1b4481d9213b648bfa9 100644
--- a/libavcodec/nellymoserdec.c
+++ b/libavcodec/nellymoserdec.c
@@ -47,6 +47,7 @@
 
 typedef struct NellyMoserDecodeContext {
     AVCodecContext* avctx;
+    AVFrame         frame;
     float          *float_buf;
     DECLARE_ALIGNED(16, float, state)[NELLY_BUF_LEN];
     AVLFG           random_state;
@@ -142,33 +143,31 @@ static av_cold int decode_init(AVCodecContext * avctx) {
         ff_init_ff_sine_windows(7);
 
     avctx->channel_layout = AV_CH_LAYOUT_MONO;
+
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
-static int decode_tag(AVCodecContext * avctx,
-                      void *data, int *data_size,
-                      AVPacket *avpkt) {
+static int decode_tag(AVCodecContext *avctx, void *data,
+                      int *got_frame_ptr, AVPacket *avpkt)
+{
     const uint8_t *buf = avpkt->data;
     const uint8_t *side=av_packet_get_side_data(avpkt, 'F', NULL);
     int buf_size = avpkt->size;
     NellyMoserDecodeContext *s = avctx->priv_data;
-    int data_max = *data_size;
-    int blocks, i, block_size;
-    int16_t *samples_s16 = data;
-    float   *samples_flt = data;
-    *data_size = 0;
+    int blocks, i, ret;
+    int16_t *samples_s16;
+    float   *samples_flt;
 
-    block_size = NELLY_SAMPLES * av_get_bytes_per_sample(avctx->sample_fmt);
     blocks     = buf_size / NELLY_BLOCK_LEN;
 
     if (blocks <= 0) {
         av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
         return AVERROR_INVALIDDATA;
     }
-    if (data_max < blocks * block_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
-    }
+
     if (buf_size % NELLY_BLOCK_LEN) {
         av_log(avctx, AV_LOG_WARNING, "Leftover bytes: %d.\n",
                buf_size % NELLY_BLOCK_LEN);
@@ -183,6 +182,15 @@ static int decode_tag(AVCodecContext * avctx,
     if(side && blocks>1 && avctx->sample_rate%11025==0 && (1<<((side[0]>>2)&3)) == blocks)
         avctx->sample_rate= 11025*(blocks/2);
 
+    /* get output buffer */
+    s->frame.nb_samples = NELLY_SAMPLES * blocks;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
+    }
+    samples_s16 = (int16_t *)s->frame.data[0];
+    samples_flt = (float   *)s->frame.data[0];
+
     for (i=0 ; i<blocks ; i++) {
         if (avctx->sample_fmt == SAMPLE_FMT_FLT) {
             nelly_decode_block(s, buf, samples_flt);
@@ -194,7 +202,9 @@ static int decode_tag(AVCodecContext * avctx,
         }
         buf += NELLY_BLOCK_LEN;
     }
-    *data_size = blocks * block_size;
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
 
     return buf_size;
 }
@@ -204,6 +214,7 @@ static av_cold int decode_end(AVCodecContext * avctx) {
 
     av_freep(&s->float_buf);
     ff_mdct_end(&s->imdct_ctx);
+
     return 0;
 }
 
@@ -215,6 +226,7 @@ AVCodec ff_nellymoser_decoder = {
     .init           = decode_init,
     .close          = decode_end,
     .decode         = decode_tag,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"),
     .sample_fmts    = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT,
                                                       AV_SAMPLE_FMT_S16,
diff --git a/libavcodec/pcm.c b/libavcodec/pcm.c
index 92519c0534d5c6dc56cdca59acca8d7d59ab1bfc..66b095370c27ad92fd24c34435161eacedf2f14b 100644
--- a/libavcodec/pcm.c
+++ b/libavcodec/pcm.c
@@ -192,6 +192,7 @@ static int pcm_encode_frame(AVCodecContext *avctx,
 }
 
 typedef struct PCMDecode {
+    AVFrame frame;
     short table[256];
 } PCMDecode;
 
@@ -223,6 +224,9 @@ static av_cold int pcm_decode_init(AVCodecContext * avctx)
     if (avctx->sample_fmt == AV_SAMPLE_FMT_S32)
         avctx->bits_per_raw_sample = av_get_bits_per_sample(avctx->codec->id);
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
@@ -243,22 +247,20 @@ static av_cold int pcm_decode_init(AVCodecContext * avctx)
         dst += size / 8; \
     }
 
-static int pcm_decode_frame(AVCodecContext *avctx,
-                            void *data, int *data_size,
-                            AVPacket *avpkt)
+static int pcm_decode_frame(AVCodecContext *avctx, void *data,
+                            int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *src = avpkt->data;
     int buf_size = avpkt->size;
     PCMDecode *s = avctx->priv_data;
-    int sample_size, c, n, out_size;
+    int sample_size, c, n, ret, samples_per_block;
     uint8_t *samples;
     int32_t *dst_int32_t;
 
-    samples = data;
-
     sample_size = av_get_bits_per_sample(avctx->codec_id)/8;
 
     /* av_get_bits_per_sample returns 0 for CODEC_ID_PCM_DVD */
+    samples_per_block = 1;
     if (CODEC_ID_PCM_DVD == avctx->codec_id) {
         if (avctx->bits_per_coded_sample != 20 &&
             avctx->bits_per_coded_sample != 24) {
@@ -268,10 +270,13 @@ static int pcm_decode_frame(AVCodecContext *avctx,
             return AVERROR(EINVAL);
         }
         /* 2 samples are interleaved per block in PCM_DVD */
+        samples_per_block = 2;
         sample_size = avctx->bits_per_coded_sample * 2 / 8;
-    } else if (avctx->codec_id == CODEC_ID_PCM_LXF)
+    } else if (avctx->codec_id == CODEC_ID_PCM_LXF) {
         /* we process 40-bit blocks per channel for LXF */
+        samples_per_block = 2;
         sample_size = 5;
+    }
 
     if (sample_size == 0) {
         av_log(avctx, AV_LOG_ERROR, "Invalid sample_size\n");
@@ -290,14 +295,13 @@ static int pcm_decode_frame(AVCodecContext *avctx,
 
     n = buf_size/sample_size;
 
-    out_size = n * av_get_bytes_per_sample(avctx->sample_fmt);
-    if (avctx->codec_id == CODEC_ID_PCM_DVD ||
-        avctx->codec_id == CODEC_ID_PCM_LXF)
-        out_size *= 2;
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "output buffer too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    s->frame.nb_samples = n * samples_per_block / avctx->channels;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    samples = s->frame.data[0];
 
     switch(avctx->codec->id) {
     case CODEC_ID_PCM_U32LE:
@@ -403,7 +407,7 @@ static int pcm_decode_frame(AVCodecContext *avctx,
     case CODEC_ID_PCM_DVD:
     {
         const uint8_t *src8;
-        dst_int32_t = data;
+        dst_int32_t = (int32_t *)s->frame.data[0];
         n /= avctx->channels;
         switch (avctx->bits_per_coded_sample) {
         case 20:
@@ -435,7 +439,7 @@ static int pcm_decode_frame(AVCodecContext *avctx,
     {
         int i;
         const uint8_t *src8;
-        dst_int32_t = data;
+        dst_int32_t = (int32_t *)s->frame.data[0];
         n /= avctx->channels;
         //unpack and de-planerize
         for (i = 0; i < n; i++) {
@@ -456,7 +460,10 @@ static int pcm_decode_frame(AVCodecContext *avctx,
     default:
         return -1;
     }
-    *data_size = out_size;
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return buf_size;
 }
 
@@ -485,6 +492,7 @@ AVCodec ff_ ## name_ ## _decoder = {            \
     .priv_data_size = sizeof(PCMDecode),        \
     .init           = pcm_decode_init,          \
     .decode         = pcm_decode_frame,         \
+    .capabilities   = CODEC_CAP_DR1,            \
     .sample_fmts = (const enum AVSampleFormat[]){sample_fmt_,AV_SAMPLE_FMT_NONE}, \
     .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
 }
diff --git a/libavcodec/qcelpdec.c b/libavcodec/qcelpdec.c
index 76480f0d858b2e88fe0e542d5dee182140b1bf45..47b1f68fe57dcbd662b9e15c510cc13268af85f2 100644
--- a/libavcodec/qcelpdec.c
+++ b/libavcodec/qcelpdec.c
@@ -56,6 +56,7 @@ typedef enum
 
 typedef struct
 {
+    AVFrame           avframe;
     GetBitContext     gb;
     qcelp_packet_rate bitrate;
     QCELPFrame        frame;    /**< unpacked data frame */
@@ -97,6 +98,9 @@ static av_cold int qcelp_decode_init(AVCodecContext *avctx)
     for(i=0; i<10; i++)
         q->prev_lspf[i] = (i+1)/11.;
 
+    avcodec_get_frame_defaults(&q->avframe);
+    avctx->coded_frame = &q->avframe;
+
     return 0;
 }
 
@@ -682,23 +686,25 @@ static void postfilter(QCELPContext *q, float *samples, float *lpc)
         160, 0.9375, &q->postfilter_agc_mem);
 }
 
-static int qcelp_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
-                              AVPacket *avpkt)
+static int qcelp_decode_frame(AVCodecContext *avctx, void *data,
+                              int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     QCELPContext *q = avctx->priv_data;
-    float *outbuffer = data;
-    int   i, out_size;
+    float *outbuffer;
+    int   i, ret;
     float quantized_lspf[10], lpc[10];
     float gain[16];
     float *formant_mem;
 
-    out_size = 160 * av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    q->avframe.nb_samples = 160;
+    if ((ret = avctx->get_buffer(avctx, &q->avframe)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    outbuffer = (float *)q->avframe.data[0];
 
     if ((q->bitrate = determine_bitrate(avctx, buf_size, &buf)) == I_F_Q) {
         warn_insufficient_frame_quality(avctx, "bitrate cannot be determined.");
@@ -783,7 +789,8 @@ erasure:
     memcpy(q->prev_lspf, quantized_lspf, sizeof(q->prev_lspf));
     q->prev_bitrate = q->bitrate;
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = q->avframe;
 
     return buf_size;
 }
@@ -795,6 +802,7 @@ AVCodec ff_qcelp_decoder =
     .id     = CODEC_ID_QCELP,
     .init   = qcelp_decode_init,
     .decode = qcelp_decode_frame,
+    .capabilities = CODEC_CAP_DR1,
     .priv_data_size = sizeof(QCELPContext),
     .long_name = NULL_IF_CONFIG_SMALL("QCELP / PureVoice"),
 };
diff --git a/libavcodec/qdm2.c b/libavcodec/qdm2.c
index e2eb0eacb85d3c1616aa1586a2fc45613fdf8b15..cc71825838c1649d18231fb54f1f81fae2650ea7 100644
--- a/libavcodec/qdm2.c
+++ b/libavcodec/qdm2.c
@@ -130,6 +130,8 @@ typedef struct {
  * QDM2 decoder context
  */
 typedef struct {
+    AVFrame frame;
+
     /// Parameters from codec header, do not change during playback
     int nb_channels;         ///< number of channels
     int channels;            ///< number of channels
@@ -1876,6 +1878,9 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
 
     avctx->sample_fmt = AV_SAMPLE_FMT_S16;
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
 //    dump_context(s);
     return 0;
 }
@@ -1956,30 +1961,27 @@ static int qdm2_decode (QDM2Context *q, const uint8_t *in, int16_t *out)
 }
 
 
-static int qdm2_decode_frame(AVCodecContext *avctx,
-            void *data, int *data_size,
-            AVPacket *avpkt)
+static int qdm2_decode_frame(AVCodecContext *avctx, void *data,
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     QDM2Context *s = avctx->priv_data;
-    int16_t *out = data;
-    int i, out_size;
+    int16_t *out;
+    int i, ret;
 
     if(!buf)
         return 0;
     if(buf_size < s->checksum_size)
         return -1;
 
-    out_size = 16 * s->channels * s->frame_size *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    s->frame.nb_samples = 16 * s->frame_size;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
-
-    av_log(avctx, AV_LOG_DEBUG, "decode(%d): %p[%d] -> %p[%d]\n",
-       buf_size, buf, s->checksum_size, data, *data_size);
+    out = (int16_t *)s->frame.data[0];
 
     for (i = 0; i < 16; i++) {
         if (qdm2_decode(s, buf, out) < 0)
@@ -1987,7 +1989,8 @@ static int qdm2_decode_frame(AVCodecContext *avctx,
         out += s->channels * s->frame_size;
     }
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
 
     return s->checksum_size;
 }
@@ -2001,5 +2004,6 @@ AVCodec ff_qdm2_decoder =
     .init = qdm2_decode_init,
     .close = qdm2_decode_close,
     .decode = qdm2_decode_frame,
+    .capabilities = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("QDesign Music Codec 2"),
 };
diff --git a/libavcodec/ra144.h b/libavcodec/ra144.h
index 722b42e68aa7c9a4e07ea55e7a912bb94efe392e..9665534f7b8a3cb4153960ffa815cbda05b0cf13 100644
--- a/libavcodec/ra144.h
+++ b/libavcodec/ra144.h
@@ -34,6 +34,7 @@
 
 typedef struct {
     AVCodecContext *avctx;
+    AVFrame frame;
     LPCContext lpc_ctx;
 
     unsigned int     old_energy;        ///< previous frame energy
diff --git a/libavcodec/ra144dec.c b/libavcodec/ra144dec.c
index 2e57e5054c668b6a625180c6618d4681036b3700..428f78873353cf7ccaf4e7005000f4957ce6d49d 100644
--- a/libavcodec/ra144dec.c
+++ b/libavcodec/ra144dec.c
@@ -38,6 +38,10 @@ static av_cold int ra144_decode_init(AVCodecContext * avctx)
     ractx->lpc_coef[1] = ractx->lpc_tables[1];
 
     avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+
+    avcodec_get_frame_defaults(&ractx->frame);
+    avctx->coded_frame = &ractx->frame;
+
     return 0;
 }
 
@@ -54,8 +58,8 @@ static void do_output_subblock(RA144Context *ractx, const uint16_t  *lpc_coefs,
 }
 
 /** Uncompress one block (20 bytes -> 160*2 bytes). */
-static int ra144_decode_frame(AVCodecContext * avctx, void *vdata,
-                              int *data_size, AVPacket *avpkt)
+static int ra144_decode_frame(AVCodecContext * avctx, void *data,
+                              int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
@@ -64,23 +68,25 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata,
     uint16_t block_coefs[NBLOCKS][LPC_ORDER]; // LPC coefficients of each sub-block
     unsigned int lpc_refl[LPC_ORDER];         // LPC reflection coefficients of the frame
     int i, j;
-    int out_size;
-    int16_t *data = vdata;
+    int ret;
+    int16_t *samples;
     unsigned int energy;
 
     RA144Context *ractx = avctx->priv_data;
     GetBitContext gb;
 
-    out_size = NBLOCKS * BLOCKSIZE * av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    ractx->frame.nb_samples = NBLOCKS * BLOCKSIZE;
+    if ((ret = avctx->get_buffer(avctx, &ractx->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    samples = (int16_t *)ractx->frame.data[0];
 
     if(buf_size < FRAMESIZE) {
         av_log(avctx, AV_LOG_ERROR,
                "Frame too small (%d bytes). Truncated file?\n", buf_size);
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return buf_size;
     }
     init_get_bits(&gb, buf, FRAMESIZE * 8);
@@ -106,7 +112,7 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata,
         do_output_subblock(ractx, block_coefs[i], refl_rms[i], &gb);
 
         for (j=0; j < BLOCKSIZE; j++)
-            *data++ = av_clip_int16(ractx->curr_sblock[j + 10] << 2);
+            *samples++ = av_clip_int16(ractx->curr_sblock[j + 10] << 2);
     }
 
     ractx->old_energy = energy;
@@ -114,7 +120,9 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata,
 
     FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]);
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = ractx->frame;
+
     return FRAMESIZE;
 }
 
@@ -125,5 +133,6 @@ AVCodec ff_ra_144_decoder = {
     .priv_data_size = sizeof(RA144Context),
     .init           = ra144_decode_init,
     .decode         = ra144_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name      = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K)"),
 };
diff --git a/libavcodec/ra288.c b/libavcodec/ra288.c
index a91a06cfa1ce40e89d4396b269739d0f0855e492..3a9f409f4aa8bc2eefd3d544459120f10319366b 100644
--- a/libavcodec/ra288.c
+++ b/libavcodec/ra288.c
@@ -36,6 +36,7 @@
 #define RA288_BLOCKS_PER_FRAME 32
 
 typedef struct {
+    AVFrame frame;
     DSPContext dsp;
     DECLARE_ALIGNED(16, float,   sp_lpc)[FFALIGN(36, 8)];   ///< LPC coefficients for speech data (spec: A)
     DECLARE_ALIGNED(16, float, gain_lpc)[FFALIGN(10, 8)];   ///< LPC coefficients for gain        (spec: GB)
@@ -62,6 +63,10 @@ static av_cold int ra288_decode_init(AVCodecContext *avctx)
     RA288Context *ractx = avctx->priv_data;
     avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
     dsputil_init(&ractx->dsp, avctx);
+
+    avcodec_get_frame_defaults(&ractx->frame);
+    avctx->coded_frame = &ractx->frame;
+
     return 0;
 }
 
@@ -165,12 +170,12 @@ static void backward_filter(RA288Context *ractx,
 }
 
 static int ra288_decode_frame(AVCodecContext * avctx, void *data,
-                              int *data_size, AVPacket *avpkt)
+                              int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
-    float *out = data;
-    int i, out_size;
+    float *out;
+    int i, ret;
     RA288Context *ractx = avctx->priv_data;
     GetBitContext gb;
 
@@ -181,12 +186,13 @@ static int ra288_decode_frame(AVCodecContext * avctx, void *data,
         return AVERROR_INVALIDDATA;
     }
 
-    out_size = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    ractx->frame.nb_samples = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME;
+    if ((ret = avctx->get_buffer(avctx, &ractx->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    out = (float *)ractx->frame.data[0];
 
     init_get_bits(&gb, buf, avctx->block_align * 8);
 
@@ -208,7 +214,9 @@ static int ra288_decode_frame(AVCodecContext * avctx, void *data,
         }
     }
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = ractx->frame;
+
     return avctx->block_align;
 }
 
@@ -219,5 +227,6 @@ AVCodec ff_ra_288_decoder = {
     .priv_data_size = sizeof(RA288Context),
     .init           = ra288_decode_init,
     .decode         = ra288_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name      = NULL_IF_CONFIG_SMALL("RealAudio 2.0 (28.8K)"),
 };
diff --git a/libavcodec/s302m.c b/libavcodec/s302m.c
index 713d80f5d9af3034ad324438cf0f02d301874b7f..4a17fa102e6f0f8def990c7f4a0423d61124607c 100644
--- a/libavcodec/s302m.c
+++ b/libavcodec/s302m.c
@@ -25,6 +25,10 @@
 
 #define AES3_HEADER_LEN 4
 
+typedef struct S302MDecodeContext {
+    AVFrame frame;
+} S302MDecodeContext;
+
 static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
                                     int buf_size)
 {
@@ -83,10 +87,12 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
 }
 
 static int s302m_decode_frame(AVCodecContext *avctx, void *data,
-                              int *data_size, AVPacket *avpkt)
+                              int *got_frame_ptr, AVPacket *avpkt)
 {
+    S302MDecodeContext *s = avctx->priv_data;
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
+    int block_size, ret;
 
     int frame_size = s302m_parse_frame_header(avctx, buf, buf_size);
     if (frame_size < 0)
@@ -95,11 +101,18 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
     buf_size -= AES3_HEADER_LEN;
     buf      += AES3_HEADER_LEN;
 
-    if (*data_size < 4 * buf_size * 8 / (avctx->bits_per_coded_sample + 4))
-        return -1;
+    /* get output buffer */
+    block_size = (avctx->bits_per_coded_sample + 4) / 4;
+    s->frame.nb_samples = 2 * (buf_size / block_size) / avctx->channels;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
+    }
+
+    buf_size = (s->frame.nb_samples * avctx->channels / 2) * block_size;
 
     if (avctx->bits_per_coded_sample == 24) {
-        uint32_t *o = data;
+        uint32_t *o = (uint32_t *)s->frame.data[0];
         for (; buf_size > 6; buf_size -= 7) {
             *o++ = (av_reverse[buf[2]]        << 24) |
                    (av_reverse[buf[1]]        << 16) |
@@ -110,9 +123,8 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
                    (av_reverse[buf[3] & 0x0f] <<  4);
             buf += 7;
         }
-        *data_size = (uint8_t*) o - (uint8_t*) data;
     } else if (avctx->bits_per_coded_sample == 20) {
-        uint32_t *o = data;
+        uint32_t *o = (uint32_t *)s->frame.data[0];
         for (; buf_size > 5; buf_size -= 6) {
             *o++ = (av_reverse[buf[2] & 0xf0] << 28) |
                    (av_reverse[buf[1]]        << 20) |
@@ -122,9 +134,8 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
                    (av_reverse[buf[3]]        << 12);
             buf += 6;
         }
-        *data_size = (uint8_t*) o - (uint8_t*) data;
     } else {
-        uint16_t *o = data;
+        uint16_t *o = (uint16_t *)s->frame.data[0];
         for (; buf_size > 4; buf_size -= 5) {
             *o++ = (av_reverse[buf[1]]        <<  8) |
                     av_reverse[buf[0]];
@@ -133,10 +144,22 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
                    (av_reverse[buf[2]]        >>  4);
             buf += 5;
         }
-        *data_size = (uint8_t*) o - (uint8_t*) data;
     }
 
-    return buf - avpkt->data;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
+    return avpkt->size;
+}
+
+static int s302m_decode_init(AVCodecContext *avctx)
+{
+    S302MDecodeContext *s = avctx->priv_data;
+
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
+    return 0;
 }
 
 
@@ -144,6 +167,9 @@ AVCodec ff_s302m_decoder = {
     .name           = "s302m",
     .type           = AVMEDIA_TYPE_AUDIO,
     .id             = CODEC_ID_S302M,
+    .priv_data_size = sizeof(S302MDecodeContext),
+    .init           = s302m_decode_init,
     .decode         = s302m_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name      = NULL_IF_CONFIG_SMALL("SMPTE 302M"),
 };
diff --git a/libavcodec/shorten.c b/libavcodec/shorten.c
index ee01e886a979ffecb3e31e6069653b6e9ff729d2..26ce6fe885b8e5b38435304d9d03954939477739 100644
--- a/libavcodec/shorten.c
+++ b/libavcodec/shorten.c
@@ -79,6 +79,7 @@ static const uint8_t is_audio_command[10] = { 1, 1, 1, 1, 0, 0, 0, 1, 1, 0 };
 
 typedef struct ShortenContext {
     AVCodecContext *avctx;
+    AVFrame frame;
     GetBitContext gb;
 
     int min_framesize, max_framesize;
@@ -112,6 +113,9 @@ static av_cold int shorten_decode_init(AVCodecContext * avctx)
     s->avctx = avctx;
     avctx->sample_fmt = AV_SAMPLE_FMT_S16;
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
@@ -394,15 +398,13 @@ static int read_header(ShortenContext *s)
     return 0;
 }
 
-static int shorten_decode_frame(AVCodecContext *avctx,
-        void *data, int *data_size,
-        AVPacket *avpkt)
+static int shorten_decode_frame(AVCodecContext *avctx, void *data,
+                                int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     ShortenContext *s = avctx->priv_data;
     int i, input_buf_size = 0;
-    int16_t *samples = data;
     int ret;
 
     /* allocate internal bitstream buffer */
@@ -436,7 +438,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
         /* do not decode until buffer has at least max_framesize bytes or
            the end of the file has been reached */
         if (buf_size < s->max_framesize && avpkt->data) {
-            *data_size = 0;
+            *got_frame_ptr = 0;
             return input_buf_size;
         }
     }
@@ -448,13 +450,13 @@ static int shorten_decode_frame(AVCodecContext *avctx,
     if (!s->got_header) {
         if ((ret = read_header(s)) < 0)
             return ret;
-        *data_size = 0;
+        *got_frame_ptr = 0;
         goto finish_frame;
     }
 
     /* if quit command was read previously, don't decode anything */
     if (s->got_quit_command) {
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return avpkt->size;
     }
 
@@ -464,7 +466,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
         int len;
 
         if (get_bits_left(&s->gb) < 3+FNSIZE) {
-            *data_size = 0;
+            *got_frame_ptr = 0;
             break;
         }
 
@@ -472,7 +474,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
 
         if (cmd > FN_VERBATIM) {
             av_log(avctx, AV_LOG_ERROR, "unknown shorten function %d\n", cmd);
-            *data_size = 0;
+            *got_frame_ptr = 0;
             break;
         }
 
@@ -507,7 +509,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
                     break;
             }
             if (cmd == FN_BLOCKSIZE || cmd == FN_QUIT) {
-                *data_size = 0;
+                *got_frame_ptr = 0;
                 break;
             }
         } else {
@@ -571,19 +573,23 @@ static int shorten_decode_frame(AVCodecContext *avctx,
             /* if this is the last channel in the block, output the samples */
             s->cur_chan++;
             if (s->cur_chan == s->channels) {
-                int out_size = s->blocksize * s->channels *
-                               av_get_bytes_per_sample(avctx->sample_fmt);
-                if (*data_size < out_size) {
-                    av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-                    return AVERROR(EINVAL);
+                /* get output buffer */
+                s->frame.nb_samples = s->blocksize;
+                if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+                    av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+                    return ret;
                 }
-                interleave_buffer(samples, s->channels, s->blocksize, s->decoded);
-                *data_size = out_size;
+                /* interleave output */
+                interleave_buffer((int16_t *)s->frame.data[0], s->channels,
+                                  s->blocksize, s->decoded);
+
+                *got_frame_ptr   = 1;
+                *(AVFrame *)data = s->frame;
             }
         }
     }
     if (s->cur_chan < s->channels)
-        *data_size = 0;
+        *got_frame_ptr = 0;
 
 finish_frame:
     s->bitindex = get_bits_count(&s->gb) - 8*((get_bits_count(&s->gb))/8);
@@ -614,6 +620,7 @@ static av_cold int shorten_decode_close(AVCodecContext *avctx)
     }
     av_freep(&s->bitstream);
     av_freep(&s->coeffs);
+
     return 0;
 }
 
@@ -625,6 +632,6 @@ AVCodec ff_shorten_decoder = {
     .init           = shorten_decode_init,
     .close          = shorten_decode_close,
     .decode         = shorten_decode_frame,
-    .capabilities   = CODEC_CAP_DELAY,
+    .capabilities   = CODEC_CAP_DELAY | CODEC_CAP_DR1,
     .long_name= NULL_IF_CONFIG_SMALL("Shorten"),
 };
diff --git a/libavcodec/sipr.c b/libavcodec/sipr.c
index 20d9da15e6cd7326e97ac0ab46b7bcd3ebc56c8b..d66c14fb128213cddba23077571bb6b8d205a713 100644
--- a/libavcodec/sipr.c
+++ b/libavcodec/sipr.c
@@ -507,20 +507,23 @@ static av_cold int sipr_decoder_init(AVCodecContext * avctx)
 
     avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
 
+    avcodec_get_frame_defaults(&ctx->frame);
+    avctx->coded_frame = &ctx->frame;
+
     return 0;
 }
 
-static int sipr_decode_frame(AVCodecContext *avctx, void *datap,
-                             int *data_size, AVPacket *avpkt)
+static int sipr_decode_frame(AVCodecContext *avctx, void *data,
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
     SiprContext *ctx = avctx->priv_data;
     const uint8_t *buf=avpkt->data;
     SiprParameters parm;
     const SiprModeParam *mode_par = &modes[ctx->mode];
     GetBitContext gb;
-    float *data = datap;
+    float *samples;
     int subframe_size = ctx->mode == MODE_16k ? L_SUBFR_16k : SUBFR_SIZE;
-    int i, out_size;
+    int i, ret;
 
     ctx->avctx = avctx;
     if (avpkt->size < (mode_par->bits_per_frame >> 3)) {
@@ -530,27 +533,27 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *datap,
         return -1;
     }
 
-    out_size = mode_par->frames_per_packet * subframe_size *
-               mode_par->subframe_count *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR,
-               "Error processing packet: output buffer (%d) too small\n",
-               *data_size);
-        return -1;
+    /* get output buffer */
+    ctx->frame.nb_samples = mode_par->frames_per_packet * subframe_size *
+                            mode_par->subframe_count;
+    if ((ret = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    samples = (float *)ctx->frame.data[0];
 
     init_get_bits(&gb, buf, mode_par->bits_per_frame);
 
     for (i = 0; i < mode_par->frames_per_packet; i++) {
         decode_parameters(&parm, &gb, mode_par);
 
-        ctx->decode_frame(ctx, &parm, data);
+        ctx->decode_frame(ctx, &parm, samples);
 
-        data += subframe_size * mode_par->subframe_count;
+        samples += subframe_size * mode_par->subframe_count;
     }
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = ctx->frame;
 
     return mode_par->bits_per_frame >> 3;
 }
@@ -562,5 +565,6 @@ AVCodec ff_sipr_decoder = {
     .priv_data_size = sizeof(SiprContext),
     .init           = sipr_decoder_init,
     .decode         = sipr_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("RealAudio SIPR / ACELP.NET"),
 };
diff --git a/libavcodec/smacker.c b/libavcodec/smacker.c
index 19bfab4ffd7fee677c3f9a45a350dc6f1c27e5f4..a983922fc75a10314e92b3565bafd1f9baa4fb92 100644
--- a/libavcodec/smacker.c
+++ b/libavcodec/smacker.c
@@ -559,31 +559,43 @@ static av_cold int decode_end(AVCodecContext *avctx)
 }
 
 
+typedef struct SmackerAudioContext {
+    AVFrame frame;
+} SmackerAudioContext;
+
 static av_cold int smka_decode_init(AVCodecContext *avctx)
 {
+    SmackerAudioContext *s = avctx->priv_data;
+
     if (avctx->channels < 1 || avctx->channels > 2) {
         av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
         return AVERROR(EINVAL);
     }
     avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
     avctx->sample_fmt = avctx->bits_per_coded_sample == 8 ? AV_SAMPLE_FMT_U8 : AV_SAMPLE_FMT_S16;
+
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
 /**
  * Decode Smacker audio data
  */
-static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
+static int smka_decode_frame(AVCodecContext *avctx, void *data,
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
+    SmackerAudioContext *s = avctx->priv_data;
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     GetBitContext gb;
     HuffContext h[4];
     VLC vlc[4];
-    int16_t *samples = data;
-    uint8_t *samples8 = data;
+    int16_t *samples;
+    uint8_t *samples8;
     int val;
-    int i, res;
+    int i, res, ret;
     int unp_size;
     int bits, stereo;
     int pred[2] = {0, 0};
@@ -599,15 +611,11 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
 
     if(!get_bits1(&gb)){
         av_log(avctx, AV_LOG_INFO, "Sound: no data\n");
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return 1;
     }
     stereo = get_bits1(&gb);
     bits = get_bits1(&gb);
-    if (unp_size & 0xC0000000 || unp_size > *data_size) {
-        av_log(avctx, AV_LOG_ERROR, "Frame is too large to fit in buffer\n");
-        return -1;
-    }
     if (stereo ^ (avctx->channels != 1)) {
         av_log(avctx, AV_LOG_ERROR, "channels mismatch\n");
         return AVERROR(EINVAL);
@@ -617,6 +625,15 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
         return AVERROR(EINVAL);
     }
 
+    /* get output buffer */
+    s->frame.nb_samples = unp_size / (avctx->channels * (bits + 1));
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
+    }
+    samples  = (int16_t *)s->frame.data[0];
+    samples8 =            s->frame.data[0];
+
     memset(vlc, 0, sizeof(VLC) * 4);
     memset(h, 0, sizeof(HuffContext) * 4);
     // Initialize
@@ -706,7 +723,9 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
         av_free(h[i].values);
     }
 
-    *data_size = unp_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return buf_size;
 }
 
@@ -726,8 +745,10 @@ AVCodec ff_smackaud_decoder = {
     .name           = "smackaud",
     .type           = AVMEDIA_TYPE_AUDIO,
     .id             = CODEC_ID_SMACKAUDIO,
+    .priv_data_size = sizeof(SmackerAudioContext),
     .init           = smka_decode_init,
     .decode         = smka_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("Smacker audio"),
 };
 
diff --git a/libavcodec/svq1dec.c b/libavcodec/svq1dec.c
index 431913c57df86243c65e7a88e61c8952f88397f8..83fe07e4c7c463ac871675108e30fca1979b575a 100644
--- a/libavcodec/svq1dec.c
+++ b/libavcodec/svq1dec.c
@@ -195,7 +195,8 @@ static const uint8_t string_table[256] = {
 
 #define SVQ1_CALC_CODEBOOK_ENTRIES(cbook)\
       codebook = (const uint32_t *) cbook[level];\
-      bit_cache = get_bits (bitbuf, 4*stages);\
+      if (stages > 0)\
+        bit_cache = get_bits (bitbuf, 4*stages);\
       /* calculate codebook entries for this vector */\
       for (j=0; j < stages; j++) {\
         entries[j] = (((bit_cache >> (4*(stages - j - 1))) & 0xF) + 16*j) << (level + 1);\
diff --git a/libavcodec/truespeech.c b/libavcodec/truespeech.c
index b0a8e441508685e2cfe5a8cccfb3579079839086..bb4ce6f219753d6e22e708dd2a62f89d9ca49a17 100644
--- a/libavcodec/truespeech.c
+++ b/libavcodec/truespeech.c
@@ -34,6 +34,7 @@
  * TrueSpeech decoder context
  */
 typedef struct {
+    AVFrame frame;
     DSPContext dsp;
     /* input data */
     uint8_t buffer[32];
@@ -69,6 +70,9 @@ static av_cold int truespeech_decode_init(AVCodecContext * avctx)
 
     dsputil_init(&c->dsp, avctx);
 
+    avcodec_get_frame_defaults(&c->frame);
+    avctx->coded_frame = &c->frame;
+
     return 0;
 }
 
@@ -299,17 +303,16 @@ static void truespeech_save_prevvec(TSContext *c)
         c->prevfilt[i] = c->cvector[i];
 }
 
-static int truespeech_decode_frame(AVCodecContext *avctx,
-                void *data, int *data_size,
-                AVPacket *avpkt)
+static int truespeech_decode_frame(AVCodecContext *avctx, void *data,
+                                   int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     TSContext *c = avctx->priv_data;
 
     int i, j;
-    short *samples = data;
-    int iterations, out_size;
+    int16_t *samples;
+    int iterations, ret;
 
     iterations = buf_size / 32;
 
@@ -319,13 +322,15 @@ static int truespeech_decode_frame(AVCodecContext *avctx,
         return -1;
     }
 
-    out_size = iterations * 240 * av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    c->frame.nb_samples = iterations * 240;
+    if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    samples = (int16_t *)c->frame.data[0];
 
-    memset(samples, 0, out_size);
+    memset(samples, 0, iterations * 240 * sizeof(*samples));
 
     for(j = 0; j < iterations; j++) {
         truespeech_read_frame(c, buf);
@@ -345,7 +350,8 @@ static int truespeech_decode_frame(AVCodecContext *avctx,
         truespeech_save_prevvec(c);
     }
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = c->frame;
 
     return buf_size;
 }
@@ -357,5 +363,6 @@ AVCodec ff_truespeech_decoder = {
     .priv_data_size = sizeof(TSContext),
     .init           = truespeech_decode_init,
     .decode         = truespeech_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("DSP Group TrueSpeech"),
 };
diff --git a/libavcodec/tta.c b/libavcodec/tta.c
index 7ff5116ffbbb10efac3df7338b296ca3c4e84859..3c4c152213ca914afadf9c4b5988d7fa11b6645d 100644
--- a/libavcodec/tta.c
+++ b/libavcodec/tta.c
@@ -56,6 +56,7 @@ typedef struct TTAChannel {
 
 typedef struct TTAContext {
     AVCodecContext *avctx;
+    AVFrame frame;
     GetBitContext gb;
 
     int format, channels, bps, data_length;
@@ -288,17 +289,19 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
         return -1;
     }
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
-static int tta_decode_frame(AVCodecContext *avctx,
-        void *data, int *data_size,
-        AVPacket *avpkt)
+static int tta_decode_frame(AVCodecContext *avctx, void *data,
+                            int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     TTAContext *s = avctx->priv_data;
-    int i, out_size;
+    int i, ret;
     int cur_chan = 0, framelen = s->frame_length;
     int32_t *p;
 
@@ -309,10 +312,11 @@ static int tta_decode_frame(AVCodecContext *avctx,
     if (!s->total_frames && s->last_frame_length)
         framelen = s->last_frame_length;
 
-    out_size = framelen * s->channels * av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "Output buffer size is too small.\n");
-        return -1;
+    /* get output buffer */
+    s->frame.nb_samples = framelen;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
 
     // decode directly to output buffer for 24-bit sample format
@@ -409,20 +413,20 @@ static int tta_decode_frame(AVCodecContext *avctx,
         // convert to output buffer
         switch(s->bps) {
             case 1: {
-                uint8_t *samples = data;
+                uint8_t *samples = (int16_t *)s->frame.data[0];
                 for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
                     *samples++ = *p + 0x80;
                 break;
             }
             case 2: {
-                uint16_t *samples = data;
+                uint16_t *samples = (int16_t *)s->frame.data[0];
                 for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
                     *samples++ = *p;
                 break;
             }
             case 3: {
                 // shift samples for 24-bit sample format
-                int32_t *samples = data;
+                int32_t *samples = (int16_t *)s->frame.data[0];
                 for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
                     *samples++ <<= 8;
                 // reset decode buffer
@@ -433,7 +437,8 @@ static int tta_decode_frame(AVCodecContext *avctx,
                 av_log(s->avctx, AV_LOG_ERROR, "Error, only 16bit samples supported!\n");
         }
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
 
     return buf_size;
 }
@@ -455,5 +460,6 @@ AVCodec ff_tta_decoder = {
     .init           = tta_decode_init,
     .close          = tta_decode_close,
     .decode         = tta_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("True Audio (TTA)"),
 };
diff --git a/libavcodec/twinvq.c b/libavcodec/twinvq.c
index bf8b48b15ebb5725b4b4488944e602447f2b8cb0..1810ce8c88c7be29f047dfec8177f49050b3c9f1 100644
--- a/libavcodec/twinvq.c
+++ b/libavcodec/twinvq.c
@@ -174,6 +174,7 @@ static const ModeTab mode_44_48 = {
 
 typedef struct TwinContext {
     AVCodecContext *avctx;
+    AVFrame frame;
     DSPContext      dsp;
     FFTContext mdct_ctx[3];
 
@@ -195,6 +196,7 @@ typedef struct TwinContext {
     float *curr_frame;               ///< non-interleaved output
     float *prev_frame;               ///< non-interleaved previous frame
     int last_block_pos[2];
+    int discarded_packets;
 
     float *cos_tabs[3];
 
@@ -676,6 +678,9 @@ static void imdct_output(TwinContext *tctx, enum FrameType ftype, int wtype,
                          i);
     }
 
+    if (!out)
+        return;
+
     size2 = tctx->last_block_pos[0];
     size1 = mtab->size - size2;
     if (tctx->avctx->channels == 2) {
@@ -811,16 +816,16 @@ static void read_and_decode_spectrum(TwinContext *tctx, GetBitContext *gb,
 }
 
 static int twin_decode_frame(AVCodecContext * avctx, void *data,
-                             int *data_size, AVPacket *avpkt)
+                             int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     TwinContext *tctx = avctx->priv_data;
     GetBitContext gb;
     const ModeTab *mtab = tctx->mtab;
-    float *out = data;
+    float *out = NULL;
     enum FrameType ftype;
-    int window_type, out_size;
+    int window_type, ret;
     static const enum FrameType wtype_to_ftype_table[] = {
         FT_LONG,   FT_LONG, FT_SHORT, FT_LONG,
         FT_MEDIUM, FT_LONG, FT_LONG,  FT_MEDIUM, FT_MEDIUM
@@ -832,11 +837,14 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
         return AVERROR(EINVAL);
     }
 
-    out_size = mtab->size * avctx->channels *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    if (tctx->discarded_packets >= 2) {
+        tctx->frame.nb_samples = mtab->size;
+        if ((ret = avctx->get_buffer(avctx, &tctx->frame)) < 0) {
+            av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+            return ret;
+        }
+        out = (float *)tctx->frame.data[0];
     }
 
     init_get_bits(&gb, buf, buf_size * 8);
@@ -856,12 +864,14 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
 
     FFSWAP(float*, tctx->curr_frame, tctx->prev_frame);
 
-    if (tctx->avctx->frame_number < 2) {
-        *data_size=0;
+    if (tctx->discarded_packets < 2) {
+        tctx->discarded_packets++;
+        *got_frame_ptr = 0;
         return buf_size;
     }
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = tctx->frame;;
 
     return buf_size;
 }
@@ -1153,6 +1163,9 @@ static av_cold int twin_decode_init(AVCodecContext *avctx)
 
     memset_float(tctx->bark_hist[0][0], 0.1, FF_ARRAY_ELEMS(tctx->bark_hist));
 
+    avcodec_get_frame_defaults(&tctx->frame);
+    avctx->coded_frame = &tctx->frame;
+
     return 0;
 }
 
@@ -1164,5 +1177,6 @@ AVCodec ff_twinvq_decoder = {
     .init           = twin_decode_init,
     .close          = twin_decode_close,
     .decode         = twin_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name      = NULL_IF_CONFIG_SMALL("VQF TwinVQ"),
 };
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index f023a8967c8b127b24911b33d8c1462a993a4de9..fa94b7c124e84c35fc0a17b4b61bcf02e5e6f7fa 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -127,7 +127,10 @@ void avcodec_set_dimensions(AVCodecContext *s, int width, int height){
 
 #define INTERNAL_BUFFER_SIZE (32+1)
 
-void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[4]){
+void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
+                               int linesize_align[AV_NUM_DATA_POINTERS])
+{
+    int i;
     int w_align= 1;
     int h_align= 1;
 
@@ -213,10 +216,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int l
         *height+=2; // some of the optimized chroma MC reads one line too much
                     // which is also done in mpeg decoders with lowres > 0
 
-    linesize_align[0] =
-    linesize_align[1] =
-    linesize_align[2] =
-    linesize_align[3] = STRIDE_ALIGN;
+    for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
+        linesize_align[i] = STRIDE_ALIGN;
 //STRIDE_ALIGN is 8 for SSE* but this does not work for SVQ1 chroma planes
 //we could change STRIDE_ALIGN to 16 for x86/sse but it would increase the
 //picture size unneccessarily in some cases. The solution here is not
@@ -225,16 +226,15 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int l
     if(s->codec_id == CODEC_ID_SVQ1 || s->codec_id == CODEC_ID_VP5 ||
        s->codec_id == CODEC_ID_VP6 || s->codec_id == CODEC_ID_VP6F ||
        s->codec_id == CODEC_ID_VP6A || s->codec_id == CODEC_ID_DIRAC) {
-        linesize_align[0] =
-        linesize_align[1] =
-        linesize_align[2] = 16;
+        for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
+            linesize_align[i] = 16;
     }
 #endif
 }
 
 void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){
     int chroma_shift = av_pix_fmt_descriptors[s->pix_fmt].log2_chroma_w;
-    int linesize_align[4];
+    int linesize_align[AV_NUM_DATA_POINTERS];
     int align;
     avcodec_align_dimensions2(s, width, height, linesize_align);
     align = FFMAX(linesize_align[0], linesize_align[3]);
@@ -260,7 +260,108 @@ void ff_init_buffer_info(AVCodecContext *s, AVFrame *pic)
     pic->format              = s->pix_fmt;
 }
 
-int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
+static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+    AVCodecInternal *avci = avctx->internal;
+    InternalBuffer *buf;
+    int buf_size, ret, i, needs_extended_data;
+
+    buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
+                                          frame->nb_samples, avctx->sample_fmt,
+                                          32);
+    if (buf_size < 0)
+        return AVERROR(EINVAL);
+
+    needs_extended_data = av_sample_fmt_is_planar(avctx->sample_fmt) &&
+                          avctx->channels > AV_NUM_DATA_POINTERS;
+
+    /* allocate InternalBuffer if needed */
+    if (!avci->buffer) {
+        avci->buffer = av_mallocz(sizeof(InternalBuffer));
+        if (!avci->buffer)
+            return AVERROR(ENOMEM);
+    }
+    buf = avci->buffer;
+
+    /* if there is a previously-used internal buffer, check its size and
+       channel count to see if we can reuse it */
+    if (buf->extended_data) {
+        /* if current buffer is too small, free it */
+        if (buf->extended_data[0] && buf_size > buf->audio_data_size) {
+            av_free(buf->extended_data[0]);
+            if (buf->extended_data != buf->data)
+                av_free(&buf->extended_data);
+            buf->extended_data = NULL;
+            buf->data[0] = NULL;
+        }
+        /* if number of channels has changed, reset and/or free extended data
+           pointers but leave data buffer in buf->data[0] for reuse */
+        if (buf->nb_channels != avctx->channels) {
+            if (buf->extended_data != buf->data)
+                av_free(buf->extended_data);
+            buf->extended_data = NULL;
+        }
+    }
+
+    /* if there is no previous buffer or the previous buffer cannot be used
+       as-is, allocate a new buffer and/or rearrange the channel pointers */
+    if (!buf->extended_data) {
+        /* if the channel pointers will fit, just set extended_data to data,
+           otherwise allocate the extended_data channel pointers */
+        if (needs_extended_data) {
+            buf->extended_data = av_mallocz(avctx->channels *
+                                            sizeof(*buf->extended_data));
+            if (!buf->extended_data)
+                return AVERROR(ENOMEM);
+        } else {
+            buf->extended_data = buf->data;
+        }
+
+        /* if there is a previous buffer and it is large enough, reuse it and
+           just fill-in new channel pointers and linesize, otherwise allocate
+           a new buffer */
+        if (buf->extended_data[0]) {
+            ret = av_samples_fill_arrays(buf->extended_data, &buf->linesize[0],
+                                         buf->extended_data[0], avctx->channels,
+                                         frame->nb_samples, avctx->sample_fmt,
+                                         32);
+        } else {
+            ret = av_samples_alloc(buf->extended_data, &buf->linesize[0],
+                                   avctx->channels, frame->nb_samples,
+                                   avctx->sample_fmt, 32);
+        }
+        if (ret)
+            return ret;
+
+        /* if data was not used for extended_data, we need to copy as many of
+           the extended_data channel pointers as will fit */
+        if (needs_extended_data) {
+            for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
+                buf->data[i] = buf->extended_data[i];
+        }
+        buf->audio_data_size = buf_size;
+        buf->nb_channels     = avctx->channels;
+    }
+
+    /* copy InternalBuffer info to the AVFrame */
+    frame->type          = FF_BUFFER_TYPE_INTERNAL;
+    frame->extended_data = buf->extended_data;
+    frame->linesize[0]   = buf->linesize[0];
+    memcpy(frame->data, buf->data, sizeof(frame->data));
+
+    if (avctx->pkt) frame->pkt_pts = avctx->pkt->pts;
+    else            frame->pkt_pts = AV_NOPTS_VALUE;
+    frame->reordered_opaque = avctx->reordered_opaque;
+
+    if (avctx->debug & FF_DEBUG_BUFFERS)
+        av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p, "
+               "internal audio buffer used\n", frame);
+
+    return 0;
+}
+
+static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
+{
     int i;
     int w= s->width;
     int h= s->height;
@@ -295,7 +396,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
             return -1;
         }
 
-        for(i=0; i<4; i++){
+        for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
             av_freep(&buf->base[i]);
             buf->data[i]= NULL;
         }
@@ -310,7 +411,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
         int tmpsize;
         int unaligned;
         AVPicture picture;
-        int stride_align[4];
+        int stride_align[AV_NUM_DATA_POINTERS];
         const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
 
         avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
@@ -363,6 +464,10 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
             else
                 buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i]*EDGE_WIDTH>>v_shift) + (pixel_size*EDGE_WIDTH>>h_shift), stride_align[i]);
         }
+        for (; i < AV_NUM_DATA_POINTERS; i++) {
+            buf->base[i] = buf->data[i] = NULL;
+            buf->linesize[i] = 0;
+        }
         if(size[1] && !size[2])
             ff_set_systematic_pal2((uint32_t*)buf->data[1], s->pix_fmt);
         buf->width  = s->width;
@@ -372,11 +477,12 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
     }
     pic->type= FF_BUFFER_TYPE_INTERNAL;
 
-    for(i=0; i<4; i++){
+    for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
         pic->base[i]= buf->base[i];
         pic->data[i]= buf->data[i];
         pic->linesize[i]= buf->linesize[i];
     }
+    pic->extended_data = pic->data;
     avci->buffer_count++;
 
     if (s->pkt) {
@@ -399,11 +505,25 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
     return 0;
 }
 
+int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+    switch (avctx->codec_type) {
+    case AVMEDIA_TYPE_VIDEO:
+        return video_get_buffer(avctx, frame);
+    case AVMEDIA_TYPE_AUDIO:
+        return audio_get_buffer(avctx, frame);
+    default:
+        return -1;
+    }
+}
+
 void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){
     int i;
     InternalBuffer *buf, *last;
     AVCodecInternal *avci = s->internal;
 
+    assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
+
     assert(pic->type==FF_BUFFER_TYPE_INTERNAL);
     assert(avci->buffer_count);
 
@@ -421,7 +541,7 @@ void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){
         FFSWAP(InternalBuffer, *buf, *last);
     }
 
-    for(i=0; i<4; i++){
+    for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
         pic->data[i]=NULL;
 //        pic->base[i]=NULL;
     }
@@ -436,6 +556,8 @@ int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){
     AVFrame temp_pic;
     int i;
 
+    assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
+
     /* If no picture return a new buffer */
     if(pic->data[0] == NULL) {
         /* We will copy from buffer, so must be readable */
@@ -455,7 +577,7 @@ int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){
      * Not internal type and reget_buffer not overridden, emulate cr buffer
      */
     temp_pic = *pic;
-    for(i = 0; i < 4; i++)
+    for(i = 0; i < AV_NUM_DATA_POINTERS; i++)
         pic->data[i] = pic->base[i] = NULL;
     pic->opaque = NULL;
     /* Allocate new frame */
@@ -862,36 +984,73 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi
     return ret;
 }
 
+#if FF_API_OLD_DECODE_AUDIO
 int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
                          int *frame_size_ptr,
                          AVPacket *avpkt)
 {
-    int ret;
+    AVFrame frame;
+    int ret, got_frame = 0;
+
+    if (avctx->get_buffer != avcodec_default_get_buffer) {
+        av_log(avctx, AV_LOG_ERROR, "A custom get_buffer() cannot be used with "
+               "avcodec_decode_audio3()\n");
+        return AVERROR(EINVAL);
+    }
+
+    ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt);
+
+    if (ret >= 0 && got_frame) {
+        int ch, plane_size;
+        int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
+        int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels,
+                                                   frame.nb_samples,
+                                                   avctx->sample_fmt, 1);
+        if (*frame_size_ptr < data_size) {
+            av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for "
+                   "the current frame (%d < %d)\n", *frame_size_ptr, data_size);
+            return AVERROR(EINVAL);
+        }
+
+        memcpy(samples, frame.extended_data[0], plane_size);
+
+        if (planar && avctx->channels > 1) {
+            uint8_t *out = ((uint8_t *)samples) + plane_size;
+            for (ch = 1; ch < avctx->channels; ch++) {
+                memcpy(out, frame.extended_data[ch], plane_size);
+                out += plane_size;
+            }
+        }
+        *frame_size_ptr = data_size;
+    } else {
+        *frame_size_ptr = 0;
+    }
+    return ret;
+}
+#endif
+
+int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
+                                              AVFrame *frame,
+                                              int *got_frame_ptr,
+                                              AVPacket *avpkt)
+{
+    int ret = 0;
+
+    *got_frame_ptr = 0;
 
     if (!avpkt->data && avpkt->size) {
         av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
         return AVERROR(EINVAL);
     }
 
-    if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size){
+    if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) {
         av_packet_split_side_data(avpkt);
         avctx->pkt = avpkt;
-        //FIXME remove the check below _after_ ensuring that all audio check that the available space is enough
-        if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){
-            av_log(avctx, AV_LOG_ERROR, "buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\n");
-            return -1;
-        }
-        if(*frame_size_ptr < FF_MIN_BUFFER_SIZE ||
-        *frame_size_ptr < avctx->channels * avctx->frame_size * sizeof(int16_t)){
-            av_log(avctx, AV_LOG_ERROR, "buffer %d too small\n", *frame_size_ptr);
-            return -1;
+        ret = avctx->codec->decode(avctx, frame, got_frame_ptr, avpkt);
+        if (ret >= 0 && *got_frame_ptr) {
+            avctx->frame_number++;
+            frame->pkt_dts = avpkt->dts;
         }
-
-        ret = avctx->codec->decode(avctx, samples, frame_size_ptr, avpkt);
-        avctx->frame_number++;
-    }else{
-        ret= 0;
-        *frame_size_ptr=0;
     }
     return ret;
 }
@@ -1230,7 +1389,8 @@ void avcodec_flush_buffers(AVCodecContext *avctx)
         avctx->codec->flush(avctx);
 }
 
-void avcodec_default_free_buffers(AVCodecContext *s){
+static void video_free_buffers(AVCodecContext *s)
+{
     AVCodecInternal *avci = s->internal;
     int i, j;
 
@@ -1252,6 +1412,37 @@ void avcodec_default_free_buffers(AVCodecContext *s){
     avci->buffer_count=0;
 }
 
+static void audio_free_buffers(AVCodecContext *avctx)
+{
+    AVCodecInternal *avci = avctx->internal;
+    InternalBuffer *buf;
+
+    if (!avci->buffer)
+        return;
+    buf = avci->buffer;
+
+    if (buf->extended_data) {
+        av_free(buf->extended_data[0]);
+        if (buf->extended_data != buf->data)
+            av_free(buf->extended_data);
+    }
+    av_freep(&avci->buffer);
+}
+
+void avcodec_default_free_buffers(AVCodecContext *avctx)
+{
+    switch (avctx->codec_type) {
+    case AVMEDIA_TYPE_VIDEO:
+        video_free_buffers(avctx);
+        break;
+    case AVMEDIA_TYPE_AUDIO:
+        audio_free_buffers(avctx);
+        break;
+    default:
+        break;
+    }
+}
+
 #if FF_API_OLD_FF_PICT_TYPES
 char av_get_pict_type_char(int pict_type){
     return av_get_picture_type_char(pict_type);
diff --git a/libavcodec/version.h b/libavcodec/version.h
index b955116c6a2704b2a24e2de898feadf4a04528d9..70dbd0001e6d0b5a1d3930439b3619781b2b2acf 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -21,8 +21,8 @@
 #define AVCODEC_VERSION_H
 
 #define LIBAVCODEC_VERSION_MAJOR 53
-#define LIBAVCODEC_VERSION_MINOR 39
-#define LIBAVCODEC_VERSION_MICRO  1
+#define LIBAVCODEC_VERSION_MINOR 40
+#define LIBAVCODEC_VERSION_MICRO  0
 
 #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
                                                LIBAVCODEC_VERSION_MINOR, \
@@ -110,6 +110,11 @@
 #ifndef FF_API_TIFFENC_COMPLEVEL
 #define FF_API_TIFFENC_COMPLEVEL (LIBAVCODEC_VERSION_MAJOR < 54)
 #endif
-
+#ifndef FF_API_DATA_POINTERS
+#define FF_API_DATA_POINTERS (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_OLD_DECODE_AUDIO
+#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
 
 #endif /* AVCODEC_VERSION_H */
diff --git a/libavcodec/vmdav.c b/libavcodec/vmdav.c
index 06c6255cc5cdcbd2136f57119d488b524ea0c8c1..ec7c967f743b0fd5a69024d95bd938998dc50a9c 100644
--- a/libavcodec/vmdav.c
+++ b/libavcodec/vmdav.c
@@ -466,6 +466,7 @@ static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
 #define BLOCK_TYPE_SILENCE  3
 
 typedef struct VmdAudioContext {
+    AVFrame frame;
     int out_bps;
     int chunk_size;
 } VmdAudioContext;
@@ -507,6 +508,9 @@ static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
 
     s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2);
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, "
            "block align = %d, sample rate = %d\n",
            avctx->channels, avctx->bits_per_coded_sample, avctx->block_align,
@@ -544,22 +548,21 @@ static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size,
     }
 }
 
-static int vmdaudio_decode_frame(AVCodecContext *avctx,
-                                 void *data, int *data_size,
-                                 AVPacket *avpkt)
+static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
+                                 int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     const uint8_t *buf_end;
     int buf_size = avpkt->size;
     VmdAudioContext *s = avctx->priv_data;
     int block_type, silent_chunks, audio_chunks;
-    int nb_samples, out_size;
-    uint8_t *output_samples_u8  = data;
-    int16_t *output_samples_s16 = data;
+    int ret;
+    uint8_t *output_samples_u8;
+    int16_t *output_samples_s16;
 
     if (buf_size < 16) {
         av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n");
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return buf_size;
     }
 
@@ -590,10 +593,15 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx,
 
     /* ensure output buffer is large enough */
     audio_chunks = buf_size / s->chunk_size;
-    nb_samples   = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels;
-    out_size     = nb_samples * avctx->channels * s->out_bps;
-    if (*data_size < out_size)
-        return -1;
+
+    /* get output buffer */
+    s->frame.nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
+    }
+    output_samples_u8  = s->frame.data[0];
+    output_samples_s16 = (int16_t *)s->frame.data[0];
 
     /* decode silent chunks */
     if (silent_chunks > 0) {
@@ -623,7 +631,9 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx,
         }
     }
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return avpkt->size;
 }
 
@@ -651,5 +661,6 @@ AVCodec ff_vmdaudio_decoder = {
     .priv_data_size = sizeof(VmdAudioContext),
     .init           = vmdaudio_decode_init,
     .decode         = vmdaudio_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"),
 };
diff --git a/libavcodec/vorbisdec.c b/libavcodec/vorbisdec.c
index 0457d8b45447ad7b7029ea1200be8a573948686a..03ecc38ed4df8918d0abffa048978932fbe15042 100644
--- a/libavcodec/vorbisdec.c
+++ b/libavcodec/vorbisdec.c
@@ -125,6 +125,7 @@ typedef struct {
 
 typedef struct vorbis_context_s {
     AVCodecContext *avccontext;
+    AVFrame frame;
     GetBitContext gb;
     DSPContext dsp;
     FmtConvertContext fmt_conv;
@@ -1037,6 +1038,9 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext)
     avccontext->sample_rate = vc->audio_samplerate;
     avccontext->frame_size  = FFMIN(vc->blocksize[0], vc->blocksize[1]) >> 2;
 
+    avcodec_get_frame_defaults(&vc->frame);
+    avccontext->coded_frame = &vc->frame;
+
     return 0;
 }
 
@@ -1609,16 +1613,15 @@ static int vorbis_parse_audio_packet(vorbis_context *vc)
 
 // Return the decoded audio packet through the standard api
 
-static int vorbis_decode_frame(AVCodecContext *avccontext,
-                               void *data, int *data_size,
-                               AVPacket *avpkt)
+static int vorbis_decode_frame(AVCodecContext *avccontext, void *data,
+                               int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
     vorbis_context *vc = avccontext->priv_data;
     GetBitContext *gb = &(vc->gb);
     const float *channel_ptrs[255];
-    int i, len, out_size;
+    int i, len, ret;
 
     av_dlog(NULL, "packet length %d \n", buf_size);
 
@@ -1629,18 +1632,18 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
 
     if (!vc->first_frame) {
         vc->first_frame = 1;
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return buf_size;
     }
 
     av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n",
             get_bits_count(gb) / 8, get_bits_count(gb) % 8, len);
 
-    out_size = len * vc->audio_channels *
-               av_get_bytes_per_sample(avccontext->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(avccontext, AV_LOG_ERROR, "output buffer is too small\n");
-        return AVERROR(EINVAL);
+    /* get output buffer */
+    vc->frame.nb_samples = len;
+    if ((ret = avccontext->get_buffer(avccontext, &vc->frame)) < 0) {
+        av_log(avccontext, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
 
     if (vc->audio_channels > 8) {
@@ -1653,12 +1656,15 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
     }
 
     if (avccontext->sample_fmt == AV_SAMPLE_FMT_FLT)
-        vc->fmt_conv.float_interleave(data, channel_ptrs, len, vc->audio_channels);
+        vc->fmt_conv.float_interleave((float *)vc->frame.data[0], channel_ptrs,
+                                      len, vc->audio_channels);
     else
-        vc->fmt_conv.float_to_int16_interleave(data, channel_ptrs, len,
+        vc->fmt_conv.float_to_int16_interleave((int16_t *)vc->frame.data[0],
+                                               channel_ptrs, len,
                                                vc->audio_channels);
 
-    *data_size = out_size;
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = vc->frame;
 
     return buf_size;
 }
@@ -1682,6 +1688,7 @@ AVCodec ff_vorbis_decoder = {
     .init           = vorbis_decode_init,
     .close          = vorbis_decode_close,
     .decode         = vorbis_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("Vorbis"),
     .channel_layouts = ff_vorbis_channel_layouts,
     .sample_fmts = (const enum AVSampleFormat[]) {
diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c
index 83ec4d80f0398b5c3507115c87a0f15a23eb2b16..a6a3109dba637cbfcb219fcccd4ba1c93cbe8552 100644
--- a/libavcodec/vp3.c
+++ b/libavcodec/vp3.c
@@ -1335,8 +1335,8 @@ end:
  */
 static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
 {
-    int h, cy;
-    int offset[4];
+    int h, cy, i;
+    int offset[AV_NUM_DATA_POINTERS];
 
     if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
         int y_flipped = s->flipped_image ? s->avctx->height-y : y;
@@ -1362,7 +1362,8 @@ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
     offset[0] = s->current_frame.linesize[0]*y;
     offset[1] = s->current_frame.linesize[1]*cy;
     offset[2] = s->current_frame.linesize[2]*cy;
-    offset[3] = 0;
+    for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
+        offset[i] = 0;
 
     emms_c();
     s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h);
diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c
index b374fa4222ef474530c5220392fe2a46bf62c6ed..8cf67ba491441ffb81bfadc38169d65f62cf906e 100644
--- a/libavcodec/vp8.c
+++ b/libavcodec/vp8.c
@@ -51,8 +51,7 @@ static int vp8_alloc_frame(VP8Context *s, AVFrame *f)
     int ret;
     if ((ret = ff_thread_get_buffer(s->avctx, f)) < 0)
         return ret;
-    if (s->num_maps_to_be_freed) {
-        assert(!s->maps_are_invalid);
+    if (s->num_maps_to_be_freed && !s->maps_are_invalid) {
         f->ref_index[0] = s->segmentation_maps[--s->num_maps_to_be_freed];
     } else if (!(f->ref_index[0] = av_mallocz(s->mb_width * s->mb_height))) {
         ff_thread_release_buffer(s->avctx, f);
@@ -1568,13 +1567,15 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
     VP8Context *s = avctx->priv_data;
     int ret, mb_x, mb_y, i, y, referenced;
     enum AVDiscard skip_thresh;
-    AVFrame *av_uninit(curframe), *prev_frame = s->framep[VP56_FRAME_CURRENT];
+    AVFrame *av_uninit(curframe), *prev_frame;
 
     release_queued_segmaps(s, 0);
 
     if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0)
         return ret;
 
+    prev_frame = s->framep[VP56_FRAME_CURRENT];
+
     referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT
                                 || s->update_altref == VP56_FRAME_CURRENT;
 
@@ -1815,6 +1816,7 @@ static int vp8_decode_update_thread_context(AVCodecContext *dst, const AVCodecCo
     if (s->macroblocks_base &&
         (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
         free_buffers(s);
+        s->maps_are_invalid = 1;
     }
 
     s->prob[0] = s_src->prob[!s_src->update_probabilities];
diff --git a/libavcodec/wavpack.c b/libavcodec/wavpack.c
index 5ff9795ef831e5115251cce2934085ffa3de9a70..1ccaec665d411966885ae7ff3a42ece3b9753add 100644
--- a/libavcodec/wavpack.c
+++ b/libavcodec/wavpack.c
@@ -115,8 +115,6 @@ typedef struct WavpackFrameContext {
     int float_shift;
     int float_max_exp;
     WvChannel ch[2];
-    int samples_left;
-    int max_samples;
     int pos;
     SavedContext sc, extra_sc;
 } WavpackFrameContext;
@@ -125,6 +123,7 @@ typedef struct WavpackFrameContext {
 
 typedef struct WavpackContext {
     AVCodecContext *avctx;
+    AVFrame frame;
 
     WavpackFrameContext *fdec[WV_MAX_FRAME_DECODERS];
     int fdec_num;
@@ -133,7 +132,6 @@ typedef struct WavpackContext {
     int mkv_mode;
     int block;
     int samples;
-    int samples_left;
     int ch_offset;
 } WavpackContext;
 
@@ -485,7 +483,6 @@ static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S)
 static void wv_reset_saved_context(WavpackFrameContext *s)
 {
     s->pos = 0;
-    s->samples_left = 0;
     s->sc.crc = s->extra_sc.crc = 0xFFFFFFFF;
 }
 
@@ -502,8 +499,7 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
     float   *dstfl = dst;
     const int channel_pad = s->avctx->channels - 2;
 
-    if(s->samples_left == s->samples)
-        s->one = s->zero = s->zeroes = 0;
+    s->one = s->zero = s->zeroes = 0;
     do{
         L = wv_get_value(s, gb, 0, &last);
         if(last) break;
@@ -594,13 +590,8 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
             dst16 += channel_pad;
         }
         count++;
-    }while(!last && count < s->max_samples);
+    } while (!last && count < s->samples);
 
-    if (last)
-        s->samples_left = 0;
-    else
-        s->samples_left -= count;
-    if(!s->samples_left){
         wv_reset_saved_context(s);
         if(crc != s->CRC){
             av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
@@ -610,15 +601,7 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
             av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
             return -1;
         }
-    }else{
-        s->pos = pos;
-        s->sc.crc = crc;
-        s->sc.bits_used = get_bits_count(&s->gb);
-        if(s->got_extra_bits){
-            s->extra_sc.crc = crc_extra_bits;
-            s->extra_sc.bits_used = get_bits_count(&s->gb_extra_bits);
-        }
-    }
+
     return count * 2;
 }
 
@@ -635,8 +618,7 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
     float   *dstfl = dst;
     const int channel_stride = s->avctx->channels;
 
-    if(s->samples_left == s->samples)
-        s->one = s->zero = s->zeroes = 0;
+    s->one = s->zero = s->zeroes = 0;
     do{
         T = wv_get_value(s, gb, 0, &last);
         S = 0;
@@ -675,13 +657,8 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
             dst16 += channel_stride;
         }
         count++;
-    }while(!last && count < s->max_samples);
+    } while (!last && count < s->samples);
 
-    if (last)
-        s->samples_left = 0;
-    else
-        s->samples_left -= count;
-    if(!s->samples_left){
         wv_reset_saved_context(s);
         if(crc != s->CRC){
             av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
@@ -691,15 +668,7 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
             av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
             return -1;
         }
-    }else{
-        s->pos = pos;
-        s->sc.crc = crc;
-        s->sc.bits_used = get_bits_count(&s->gb);
-        if(s->got_extra_bits){
-            s->extra_sc.crc = crc_extra_bits;
-            s->extra_sc.bits_used = get_bits_count(&s->gb_extra_bits);
-        }
-    }
+
     return count;
 }
 
@@ -743,6 +712,9 @@ static av_cold int wavpack_decode_init(AVCodecContext *avctx)
 
     s->fdec_num = 0;
 
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
@@ -759,7 +731,7 @@ static av_cold int wavpack_decode_end(AVCodecContext *avctx)
 }
 
 static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
-                                void *data, int *data_size,
+                                void *data, int *got_frame_ptr,
                                 const uint8_t *buf, int buf_size)
 {
     WavpackContext *wc = avctx->priv_data;
@@ -774,7 +746,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
     int bpp, chan, chmask;
 
     if (buf_size == 0){
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return 0;
     }
 
@@ -789,18 +761,16 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
         return -1;
     }
 
-    if(!s->samples_left){
         memset(s->decorr, 0, MAX_TERMS * sizeof(Decorr));
         memset(s->ch, 0, sizeof(s->ch));
         s->extra_bits = 0;
         s->and = s->or = s->shift = 0;
         s->got_extra_bits = 0;
-    }
 
     if(!wc->mkv_mode){
         s->samples = AV_RL32(buf); buf += 4;
         if(!s->samples){
-            *data_size = 0;
+            *got_frame_ptr = 0;
             return 0;
         }
     }else{
@@ -829,13 +799,6 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
 
     wc->ch_offset += 1 + s->stereo;
 
-    s->max_samples = *data_size / (bpp * avctx->channels);
-    s->max_samples = FFMIN(s->max_samples, s->samples);
-    if(s->samples_left > 0){
-        s->max_samples = FFMIN(s->max_samples, s->samples_left);
-        buf = buf_end;
-    }
-
     // parse metadata blocks
     while(buf < buf_end){
         id = *buf++;
@@ -1064,7 +1027,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
         }
         if(id & WP_IDF_ODD) buf++;
     }
-    if(!s->samples_left){
+
         if(!got_terms){
             av_log(avctx, AV_LOG_ERROR, "No block with decorrelation terms\n");
             return -1;
@@ -1101,16 +1064,6 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
                 s->got_extra_bits = 0;
             }
         }
-        s->samples_left = s->samples;
-    }else{
-        init_get_bits(&s->gb, orig_buf + s->sc.offset, s->sc.size);
-        skip_bits_long(&s->gb, s->sc.bits_used);
-        if(s->got_extra_bits){
-            init_get_bits(&s->gb_extra_bits, orig_buf + s->extra_sc.offset,
-                          s->extra_sc.size);
-            skip_bits_long(&s->gb_extra_bits, s->extra_sc.bits_used);
-        }
-    }
 
     if(s->stereo_in){
         if(avctx->sample_fmt == AV_SAMPLE_FMT_S16)
@@ -1167,7 +1120,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
         }
     }
 
-    wc->samples_left = s->samples_left;
+    *got_frame_ptr = 1;
 
     return samplecount * bpp;
 }
@@ -1181,23 +1134,40 @@ static void wavpack_decode_flush(AVCodecContext *avctx)
         wv_reset_saved_context(s->fdec[i]);
 }
 
-static int wavpack_decode_frame(AVCodecContext *avctx,
-                            void *data, int *data_size,
-                            AVPacket *avpkt)
+static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
+                                int *got_frame_ptr, AVPacket *avpkt)
 {
     WavpackContext *s = avctx->priv_data;
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
-    int frame_size;
+    int frame_size, ret;
     int samplecount = 0;
 
     s->block = 0;
-    s->samples_left = 0;
     s->ch_offset = 0;
 
+    /* determine number of samples */
     if(s->mkv_mode){
         s->samples = AV_RL32(buf); buf += 4;
+    } else {
+        if (s->multichannel)
+            s->samples = AV_RL32(buf + 4);
+        else
+            s->samples = AV_RL32(buf);
+    }
+    if (s->samples <= 0) {
+        av_log(avctx, AV_LOG_ERROR, "Invalid number of samples: %d\n",
+               s->samples);
+        return AVERROR(EINVAL);
+    }
+
+    /* get output buffer */
+    s->frame.nb_samples = s->samples;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+
     while(buf_size > 0){
         if(!s->multichannel){
             frame_size = buf_size;
@@ -1216,17 +1186,19 @@ static int wavpack_decode_frame(AVCodecContext *avctx,
             wavpack_decode_flush(avctx);
             return -1;
         }
-        if((samplecount = wavpack_decode_block(avctx, s->block, data,
-                                               data_size, buf, frame_size)) < 0) {
+        if((samplecount = wavpack_decode_block(avctx, s->block, s->frame.data[0],
+                                               got_frame_ptr, buf, frame_size)) < 0) {
             wavpack_decode_flush(avctx);
             return -1;
         }
         s->block++;
         buf += frame_size; buf_size -= frame_size;
     }
-    *data_size = samplecount * avctx->channels;
 
-    return s->samples_left > 0 ? 0 : avpkt->size;
+    if (*got_frame_ptr)
+        *(AVFrame *)data = s->frame;
+
+    return avpkt->size;
 }
 
 AVCodec ff_wavpack_decoder = {
@@ -1238,6 +1210,6 @@ AVCodec ff_wavpack_decoder = {
     .close          = wavpack_decode_end,
     .decode         = wavpack_decode_frame,
     .flush          = wavpack_decode_flush,
-    .capabilities = CODEC_CAP_SUBFRAMES,
+    .capabilities   = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("WavPack"),
 };
diff --git a/libavcodec/wma.h b/libavcodec/wma.h
index fd9d5d8d3baa6e73107d63288b88a22007206dd9..6c8e944b79e9efa62d0bb146dff6158a19fa95f6 100644
--- a/libavcodec/wma.h
+++ b/libavcodec/wma.h
@@ -65,6 +65,7 @@ typedef struct CoefVLCTable {
 
 typedef struct WMACodecContext {
     AVCodecContext* avctx;
+    AVFrame frame;
     GetBitContext gb;
     PutBitContext pb;
     int sample_rate;
diff --git a/libavcodec/wmadec.c b/libavcodec/wmadec.c
index 40315d429117ab86737ea181f2ba0e6f6d899227..0b2e49981d50c5d40bfd12e54acb7e6bab398dc6 100644
--- a/libavcodec/wmadec.c
+++ b/libavcodec/wmadec.c
@@ -136,6 +136,10 @@ static int wma_decode_init(AVCodecContext * avctx)
     }
 
     avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
@@ -814,14 +818,13 @@ static int wma_decode_frame(WMACodecContext *s, int16_t *samples)
     return 0;
 }
 
-static int wma_decode_superframe(AVCodecContext *avctx,
-                                 void *data, int *data_size,
-                                 AVPacket *avpkt)
+static int wma_decode_superframe(AVCodecContext *avctx, void *data,
+                                 int *got_frame_ptr, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     WMACodecContext *s = avctx->priv_data;
-    int nb_frames, bit_offset, i, pos, len, out_size;
+    int nb_frames, bit_offset, i, pos, len, ret;
     uint8_t *q;
     int16_t *samples;
 
@@ -836,8 +839,6 @@ static int wma_decode_superframe(AVCodecContext *avctx,
     if(s->block_align)
         buf_size = s->block_align;
 
-    samples = data;
-
     init_get_bits(&s->gb, buf, buf_size*8);
 
     if (s->use_bit_reservoir) {
@@ -848,12 +849,13 @@ static int wma_decode_superframe(AVCodecContext *avctx,
         nb_frames = 1;
     }
 
-    out_size = nb_frames * s->frame_len * s->nb_channels *
-               av_get_bytes_per_sample(avctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(s->avctx, AV_LOG_ERROR, "Insufficient output space\n");
-        goto fail;
+    /* get output buffer */
+    s->frame.nb_samples = nb_frames * s->frame_len;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
     }
+    samples = (int16_t *)s->frame.data[0];
 
     if (s->use_bit_reservoir) {
         bit_offset = get_bits(&s->gb, s->byte_offset_bits + 3);
@@ -920,7 +922,10 @@ static int wma_decode_superframe(AVCodecContext *avctx,
     }
 
 //av_log(NULL, AV_LOG_ERROR, "%d %d %d %d outbytes:%d eaten:%d\n", s->frame_len_bits, s->block_len_bits, s->frame_len, s->block_len,        (int8_t *)samples - (int8_t *)data, s->block_align);
-    *data_size = out_size;
+
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
+
     return buf_size;
  fail:
     /* when error, we reset the bit reservoir */
@@ -945,6 +950,7 @@ AVCodec ff_wmav1_decoder = {
     .close          = ff_wma_end,
     .decode         = wma_decode_superframe,
     .flush          = flush,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name      = NULL_IF_CONFIG_SMALL("Windows Media Audio 1"),
 };
 
@@ -957,5 +963,6 @@ AVCodec ff_wmav2_decoder = {
     .close          = ff_wma_end,
     .decode         = wma_decode_superframe,
     .flush          = flush,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name      = NULL_IF_CONFIG_SMALL("Windows Media Audio 2"),
 };
diff --git a/libavcodec/wmaprodec.c b/libavcodec/wmaprodec.c
index 868a28393d22d68ec4970d6a7f6c10d5040d6be1..3f022c7208937e8acc3f81bb46e27c29ac5cacc1 100644
--- a/libavcodec/wmaprodec.c
+++ b/libavcodec/wmaprodec.c
@@ -167,6 +167,7 @@ typedef struct {
 typedef struct WMAProDecodeCtx {
     /* generic decoder variables */
     AVCodecContext*  avctx;                         ///< codec context for av_log
+    AVFrame          frame;                         ///< AVFrame for decoded output
     DSPContext       dsp;                           ///< accelerated DSP functions
     FmtConvertContext fmt_conv;
     uint8_t          frame_data[MAX_FRAMESIZE +
@@ -209,8 +210,6 @@ typedef struct WMAProDecodeCtx {
     uint32_t         frame_num;                     ///< current frame number (not used for decoding)
     GetBitContext    gb;                            ///< bitstream reader context
     int              buf_bit_size;                  ///< buffer size in bits
-    float*           samples;                       ///< current samplebuffer pointer
-    float*           samples_end;                   ///< maximum samplebuffer pointer
     uint8_t          drc_gain;                      ///< gain for the DRC tool
     int8_t           skip_frame;                    ///< skip output step
     int8_t           parsed_all_subframes;          ///< all subframes decoded?
@@ -453,6 +452,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
         dump_context(s);
 
     avctx->channel_layout = channel_mask;
+
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
@@ -1279,22 +1282,15 @@ static int decode_subframe(WMAProDecodeCtx *s)
  *@return 0 if the trailer bit indicates that this is the last frame,
  *        1 if there are additional frames
  */
-static int decode_frame(WMAProDecodeCtx *s)
+static int decode_frame(WMAProDecodeCtx *s, int *got_frame_ptr)
 {
+    AVCodecContext *avctx = s->avctx;
     GetBitContext* gb = &s->gb;
     int more_frames = 0;
     int len = 0;
-    int i;
+    int i, ret;
     const float *out_ptr[WMAPRO_MAX_CHANNELS];
-
-    /** check for potential output buffer overflow */
-    if (s->num_channels * s->samples_per_frame > s->samples_end - s->samples) {
-        /** return an error if no frame could be decoded at all */
-        av_log(s->avctx, AV_LOG_ERROR,
-               "not enough space for the output samples\n");
-        s->packet_loss = 1;
-        return 0;
-    }
+    float *samples;
 
     /** get frame length */
     if (s->len_prefix)
@@ -1360,10 +1356,19 @@ static int decode_frame(WMAProDecodeCtx *s)
         }
     }
 
+    /* get output buffer */
+    s->frame.nb_samples = s->samples_per_frame;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        s->packet_loss = 1;
+        return 0;
+    }
+    samples = (float *)s->frame.data[0];
+
     /** interleave samples and write them to the output buffer */
     for (i = 0; i < s->num_channels; i++)
         out_ptr[i] = s->channel[i].out;
-    s->fmt_conv.float_interleave(s->samples, out_ptr, s->samples_per_frame,
+    s->fmt_conv.float_interleave(samples, out_ptr, s->samples_per_frame,
                                  s->num_channels);
 
     for (i = 0; i < s->num_channels; i++) {
@@ -1375,8 +1380,10 @@ static int decode_frame(WMAProDecodeCtx *s)
 
     if (s->skip_frame) {
         s->skip_frame = 0;
-    } else
-        s->samples += s->num_channels * s->samples_per_frame;
+        *got_frame_ptr = 0;
+    } else {
+        *got_frame_ptr = 1;
+    }
 
     if (s->len_prefix) {
         if (len != (get_bits_count(gb) - s->frame_offset) + 2) {
@@ -1473,8 +1480,8 @@ static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len,
  *@param avpkt input packet
  *@return number of bytes that were read from the input buffer
  */
-static int decode_packet(AVCodecContext *avctx,
-                         void *data, int *data_size, AVPacket* avpkt)
+static int decode_packet(AVCodecContext *avctx, void *data,
+                         int *got_frame_ptr, AVPacket* avpkt)
 {
     WMAProDecodeCtx *s = avctx->priv_data;
     GetBitContext* gb  = &s->pgb;
@@ -1483,9 +1490,7 @@ static int decode_packet(AVCodecContext *avctx,
     int num_bits_prev_frame;
     int packet_sequence_number;
 
-    s->samples       = data;
-    s->samples_end   = (float*)((int8_t*)data + *data_size);
-    *data_size = 0;
+    *got_frame_ptr = 0;
 
     if (s->packet_done || s->packet_loss) {
         s->packet_done = 0;
@@ -1532,7 +1537,7 @@ static int decode_packet(AVCodecContext *avctx,
 
             /** decode the cross packet frame if it is valid */
             if (!s->packet_loss)
-                decode_frame(s);
+                decode_frame(s, got_frame_ptr);
         } else if (s->num_saved_bits - s->frame_offset) {
             av_dlog(avctx, "ignoring %x previously saved bits\n",
                     s->num_saved_bits - s->frame_offset);
@@ -1555,7 +1560,7 @@ static int decode_packet(AVCodecContext *avctx,
             (frame_size = show_bits(gb, s->log2_frame_size)) &&
             frame_size <= remaining_bits(s, gb)) {
             save_bits(s, gb, frame_size, 0);
-            s->packet_done = !decode_frame(s);
+            s->packet_done = !decode_frame(s, got_frame_ptr);
         } else if (!s->len_prefix
                    && s->num_saved_bits > get_bits_count(&s->gb)) {
             /** when the frames do not have a length prefix, we don't know
@@ -1565,7 +1570,7 @@ static int decode_packet(AVCodecContext *avctx,
                 therefore we save the incoming packet first, then we append
                 the "previous frame" data from the next packet so that
                 we get a buffer that only contains full frames */
-            s->packet_done = !decode_frame(s);
+            s->packet_done = !decode_frame(s, got_frame_ptr);
         } else
             s->packet_done = 1;
     }
@@ -1577,10 +1582,14 @@ static int decode_packet(AVCodecContext *avctx,
         save_bits(s, gb, remaining_bits(s, gb), 0);
     }
 
-    *data_size = (int8_t *)s->samples - (int8_t *)data;
     s->packet_offset = get_bits_count(gb) & 7;
+    if (s->packet_loss)
+        return AVERROR_INVALIDDATA;
+
+    if (*got_frame_ptr)
+        *(AVFrame *)data = s->frame;
 
-    return (s->packet_loss) ? AVERROR_INVALIDDATA : get_bits_count(gb) >> 3;
+    return get_bits_count(gb) >> 3;
 }
 
 /**
@@ -1611,7 +1620,7 @@ AVCodec ff_wmapro_decoder = {
     .init           = decode_init,
     .close          = decode_end,
     .decode         = decode_packet,
-    .capabilities = CODEC_CAP_SUBFRAMES,
+    .capabilities   = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
     .flush= flush,
     .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"),
 };
diff --git a/libavcodec/wmavoice.c b/libavcodec/wmavoice.c
index 244b63092241b7e23ba4484d176c5ef0906689ac..45383b033bcc26304d6e710ae2611a4e76c4135f 100644
--- a/libavcodec/wmavoice.c
+++ b/libavcodec/wmavoice.c
@@ -131,6 +131,7 @@ typedef struct {
      * @name Global values specified in the stream header / extradata or used all over.
      * @{
      */
+    AVFrame frame;
     GetBitContext gb;             ///< packet bitreader. During decoder init,
                                   ///< it contains the extradata from the
                                   ///< demuxer. During decoding, it contains
@@ -438,6 +439,9 @@ static av_cold int wmavoice_decode_init(AVCodecContext *ctx)
 
     ctx->sample_fmt             = AV_SAMPLE_FMT_FLT;
 
+    avcodec_get_frame_defaults(&s->frame);
+    ctx->coded_frame = &s->frame;
+
     return 0;
 }
 
@@ -1725,17 +1729,17 @@ static int check_bits_for_superframe(GetBitContext *orig_gb,
  * @return 0 on success, <0 on error or 1 if there was not enough data to
  *         fully parse the superframe
  */
-static int synth_superframe(AVCodecContext *ctx,
-                            float *samples, int *data_size)
+static int synth_superframe(AVCodecContext *ctx, int *got_frame_ptr)
 {
     WMAVoiceContext *s = ctx->priv_data;
     GetBitContext *gb = &s->gb, s_gb;
-    int n, res, out_size, n_samples = 480;
+    int n, res, n_samples = 480;
     double lsps[MAX_FRAMES][MAX_LSPS];
     const double *mean_lsf = s->lsps == 16 ?
         wmavoice_mean_lsf16[s->lsp_def_mode] : wmavoice_mean_lsf10[s->lsp_def_mode];
     float excitation[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE + 12];
     float synth[MAX_LSPS + MAX_SFRAMESIZE];
+    float *samples;
 
     memcpy(synth,      s->synth_history,
            s->lsps             * sizeof(*synth));
@@ -1749,7 +1753,7 @@ static int synth_superframe(AVCodecContext *ctx,
     }
 
     if ((res = check_bits_for_superframe(gb, s)) == 1) {
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return 1;
     }
 
@@ -1792,13 +1796,14 @@ static int synth_superframe(AVCodecContext *ctx,
             stabilize_lsps(lsps[n], s->lsps);
     }
 
-    out_size = n_samples * av_get_bytes_per_sample(ctx->sample_fmt);
-    if (*data_size < out_size) {
-        av_log(ctx, AV_LOG_ERROR,
-               "Output buffer too small (%d given - %d needed)\n",
-               *data_size, out_size);
-        return -1;
+    /* get output buffer */
+    s->frame.nb_samples = 480;
+    if ((res = ctx->get_buffer(ctx, &s->frame)) < 0) {
+        av_log(ctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return res;
     }
+    s->frame.nb_samples = n_samples;
+    samples = (float *)s->frame.data[0];
 
     /* Parse frames, optionally preceeded by per-frame (independent) LSPs. */
     for (n = 0; n < 3; n++) {
@@ -1820,7 +1825,7 @@ static int synth_superframe(AVCodecContext *ctx,
                                lsps[n], n == 0 ? s->prev_lsps : lsps[n - 1],
                                &excitation[s->history_nsamples + n * MAX_FRAMESIZE],
                                &synth[s->lsps + n * MAX_FRAMESIZE]))) {
-            *data_size = 0;
+            *got_frame_ptr = 0;
             return res;
         }
     }
@@ -1833,8 +1838,7 @@ static int synth_superframe(AVCodecContext *ctx,
         skip_bits(gb, 10 * (res + 1));
     }
 
-    /* Specify nr. of output samples */
-    *data_size = out_size;
+    *got_frame_ptr = 1;
 
     /* Update history */
     memcpy(s->prev_lsps,           lsps[2],
@@ -1922,7 +1926,7 @@ static void copy_bits(PutBitContext *pb,
  * For more information about frames, see #synth_superframe().
  */
 static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
-                                  int *data_size, AVPacket *avpkt)
+                                  int *got_frame_ptr, AVPacket *avpkt)
 {
     WMAVoiceContext *s = ctx->priv_data;
     GetBitContext *gb = &s->gb;
@@ -1935,7 +1939,7 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
      * capping the packet size at ctx->block_align. */
     for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align);
     if (!size) {
-        *data_size = 0;
+        *got_frame_ptr = 0;
         return 0;
     }
     init_get_bits(&s->gb, avpkt->data, size << 3);
@@ -1956,10 +1960,11 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
                 copy_bits(&s->pb, avpkt->data, size, gb, s->spillover_nbits);
                 flush_put_bits(&s->pb);
                 s->sframe_cache_size += s->spillover_nbits;
-                if ((res = synth_superframe(ctx, data, data_size)) == 0 &&
-                    *data_size > 0) {
+                if ((res = synth_superframe(ctx, got_frame_ptr)) == 0 &&
+                    *got_frame_ptr) {
                     cnt += s->spillover_nbits;
                     s->skip_bits_next = cnt & 7;
+                    *(AVFrame *)data = s->frame;
                     return cnt >> 3;
                 } else
                     skip_bits_long (gb, s->spillover_nbits - cnt +
@@ -1974,11 +1979,12 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
     s->sframe_cache_size = 0;
     s->skip_bits_next = 0;
     pos = get_bits_left(gb);
-    if ((res = synth_superframe(ctx, data, data_size)) < 0) {
+    if ((res = synth_superframe(ctx, got_frame_ptr)) < 0) {
         return res;
-    } else if (*data_size > 0) {
+    } else if (*got_frame_ptr) {
         int cnt = get_bits_count(gb);
         s->skip_bits_next = cnt & 7;
+        *(AVFrame *)data = s->frame;
         return cnt >> 3;
     } else if ((s->sframe_cache_size = pos) > 0) {
         /* rewind bit reader to start of last (incomplete) superframe... */
@@ -2046,7 +2052,7 @@ AVCodec ff_wmavoice_decoder = {
     .init           = wmavoice_decode_init,
     .close          = wmavoice_decode_end,
     .decode         = wmavoice_decode_packet,
-    .capabilities   = CODEC_CAP_SUBFRAMES,
+    .capabilities   = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
     .flush     = wmavoice_flush,
     .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"),
 };
diff --git a/libavcodec/ws-snd1.c b/libavcodec/ws-snd1.c
index 17b8cbfa3a8d85ea8545d0bf5d512990b4f5517a..b7e28f5e56ed7762b15c9783c7daaee8fa13289e 100644
--- a/libavcodec/ws-snd1.c
+++ b/libavcodec/ws-snd1.c
@@ -37,26 +37,37 @@ static const int8_t ws_adpcm_4bit[] = {
      0,  1,  2,  3,  4,  5,  6,  8
 };
 
+typedef struct WSSndContext {
+    AVFrame frame;
+} WSSndContext;
+
 static av_cold int ws_snd_decode_init(AVCodecContext *avctx)
 {
+    WSSndContext *s = avctx->priv_data;
+
     if (avctx->channels != 1) {
         av_log_ask_for_sample(avctx, "unsupported number of channels\n");
         return AVERROR(EINVAL);
     }
 
     avctx->sample_fmt = AV_SAMPLE_FMT_U8;
+
+    avcodec_get_frame_defaults(&s->frame);
+    avctx->coded_frame = &s->frame;
+
     return 0;
 }
 
 static int ws_snd_decode_frame(AVCodecContext *avctx, void *data,
-                               int *data_size, AVPacket *avpkt)
+                               int *got_frame_ptr, AVPacket *avpkt)
 {
+    WSSndContext *s = avctx->priv_data;
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
 
-    int in_size, out_size;
+    int in_size, out_size, ret;
     int sample = 128;
-    uint8_t *samples = data;
+    uint8_t *samples;
     uint8_t *samples_end;
 
     if (!buf_size)
@@ -71,19 +82,24 @@ static int ws_snd_decode_frame(AVCodecContext *avctx, void *data,
     in_size  = AV_RL16(&buf[2]);
     buf += 4;
 
-    if (out_size > *data_size) {
-        av_log(avctx, AV_LOG_ERROR, "Frame is too large to fit in buffer\n");
-        return -1;
-    }
     if (in_size > buf_size) {
         av_log(avctx, AV_LOG_ERROR, "Frame data is larger than input buffer\n");
         return -1;
     }
+
+    /* get output buffer */
+    s->frame.nb_samples = out_size;
+    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return ret;
+    }
+    samples     = s->frame.data[0];
     samples_end = samples + out_size;
 
     if (in_size == out_size) {
         memcpy(samples, buf, out_size);
-        *data_size = out_size;
+        *got_frame_ptr   = 1;
+        *(AVFrame *)data = s->frame;
         return buf_size;
     }
 
@@ -159,7 +175,9 @@ static int ws_snd_decode_frame(AVCodecContext *avctx, void *data,
         }
     }
 
-    *data_size = samples - (uint8_t *)data;
+    s->frame.nb_samples = samples - s->frame.data[0];
+    *got_frame_ptr   = 1;
+    *(AVFrame *)data = s->frame;
 
     return buf_size;
 }
@@ -168,7 +186,9 @@ AVCodec ff_ws_snd1_decoder = {
     .name           = "ws_snd1",
     .type           = AVMEDIA_TYPE_AUDIO,
     .id             = CODEC_ID_WESTWOOD_SND1,
+    .priv_data_size = sizeof(WSSndContext),
     .init           = ws_snd_decode_init,
     .decode         = ws_snd_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
     .long_name = NULL_IF_CONFIG_SMALL("Westwood Audio (SND1)"),
 };
diff --git a/libavformat/adtsenc.c b/libavformat/adtsenc.c
index ed1d913347d67014cf18a2f03e51fff370ed6060..18a055a1e5712ac79a1ddbb5f4c8f3fc718d8c1b 100644
--- a/libavformat/adtsenc.c
+++ b/libavformat/adtsenc.c
@@ -37,7 +37,7 @@ int ff_adts_decode_extradata(AVFormatContext *s, ADTSContext *adts, uint8_t *buf
     int off;
 
     init_get_bits(&gb, buf, size * 8);
-    off = avpriv_mpeg4audio_get_config(&m4ac, buf, size);
+    off = avpriv_mpeg4audio_get_config(&m4ac, buf, size * 8, 1);
     if (off < 0)
         return off;
     skip_bits_long(&gb, off);
diff --git a/libavformat/asfdec.c b/libavformat/asfdec.c
index 167ab03e8c83fdeeed0b9b17cba491b762bfb8ca..0bb210bcd6992df1a0b429b111057b29e0e96a29 100644
--- a/libavformat/asfdec.c
+++ b/libavformat/asfdec.c
@@ -1182,7 +1182,7 @@ static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos,
             return AV_NOPTS_VALUE;
         }
 
-        pts= pkt->dts;
+        pts = pkt->dts;
 
         av_free_packet(pkt);
         if(pkt->flags&AV_PKT_FLAG_KEY){
diff --git a/libavformat/flvdec.c b/libavformat/flvdec.c
index 4bca6a6580930c8257cc4523a4540b8dc721d4da..65c30da1ba5bd9a962fd58d4b932ad36888d7686 100644
--- a/libavformat/flvdec.c
+++ b/libavformat/flvdec.c
@@ -550,7 +550,7 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
             if (st->codec->codec_id == CODEC_ID_AAC) {
                 MPEG4AudioConfig cfg;
                 avpriv_mpeg4audio_get_config(&cfg, st->codec->extradata,
-                                         st->codec->extradata_size);
+                                             st->codec->extradata_size * 8, 1);
                 st->codec->channels = cfg.channels;
                 if (cfg.ext_sample_rate)
                     st->codec->sample_rate = cfg.ext_sample_rate;
diff --git a/libavformat/isom.c b/libavformat/isom.c
index 457cc117e29944c51b4f2f35a2182029b6ebaeab..8e5e14514d7e8a040f7f4583d9da18f05262db3d 100644
--- a/libavformat/isom.c
+++ b/libavformat/isom.c
@@ -438,7 +438,7 @@ int ff_mp4_read_dec_config_descr(AVFormatContext *fc, AVStream *st, AVIOContext
         if (st->codec->codec_id == CODEC_ID_AAC) {
             MPEG4AudioConfig cfg;
             avpriv_mpeg4audio_get_config(&cfg, st->codec->extradata,
-                                     st->codec->extradata_size);
+                                         st->codec->extradata_size * 8, 1);
             st->codec->channels = cfg.channels;
             if (cfg.object_type == 29 && cfg.sampling_index < 3) // old mp3on4
                 st->codec->sample_rate = avpriv_mpa_freq_tab[cfg.sampling_index];
diff --git a/libavformat/latmenc.c b/libavformat/latmenc.c
index 56522106d69f21f33f291596b4948e52db3c83be..c71f78b78a62f9a32a862a350ea9d8eed6d25578 100644
--- a/libavformat/latmenc.c
+++ b/libavformat/latmenc.c
@@ -55,7 +55,7 @@ static int latm_decode_extradata(LATMContext *ctx, uint8_t *buf, int size)
     MPEG4AudioConfig m4ac;
 
     init_get_bits(&gb, buf, size * 8);
-    ctx->off = avpriv_mpeg4audio_get_config(&m4ac, buf, size);
+    ctx->off = avpriv_mpeg4audio_get_config(&m4ac, buf, size * 8, 1);
     if (ctx->off < 0)
         return ctx->off;
     skip_bits_long(&gb, ctx->off);
diff --git a/libavformat/matroskaenc.c b/libavformat/matroskaenc.c
index 332d1ca572957bd29e559e8a39f08c049941aea6..b8c4667a4efd09f6252cb9ad042070039f18e7ce 100644
--- a/libavformat/matroskaenc.c
+++ b/libavformat/matroskaenc.c
@@ -448,7 +448,8 @@ static void get_aac_sample_rates(AVFormatContext *s, AVCodecContext *codec, int
 {
     MPEG4AudioConfig mp4ac;
 
-    if (avpriv_mpeg4audio_get_config(&mp4ac, codec->extradata, codec->extradata_size) < 0) {
+    if (avpriv_mpeg4audio_get_config(&mp4ac, codec->extradata,
+                                     codec->extradata_size * 8, 1) < 0) {
         av_log(s, AV_LOG_WARNING, "Error parsing AAC extradata, unable to determine samplerate.\n");
         return;
     }
diff --git a/libavformat/nullenc.c b/libavformat/nullenc.c
index 3f4107923f256bc9bc1d7b93079ffd6134510fe6..9edbf20fc8f1d1daff1e0a7cf011a6739b9b9057 100644
--- a/libavformat/nullenc.c
+++ b/libavformat/nullenc.c
@@ -32,5 +32,5 @@ AVOutputFormat ff_null_muxer = {
     .audio_codec       = AV_NE(CODEC_ID_PCM_S16BE, CODEC_ID_PCM_S16LE),
     .video_codec       = CODEC_ID_RAWVIDEO,
     .write_packet      = null_write_packet,
-    .flags = AVFMT_NOFILE | AVFMT_NOTIMESTAMPS,
+    .flags = AVFMT_NOFILE | AVFMT_NOTIMESTAMPS | AVFMT_RAWPICTURE,
 };
diff --git a/libavformat/rtsp.c b/libavformat/rtsp.c
index 24be912045e207e6da1e3c1c6febffc2dfff6e19..46315a923aacaeb8703aabb4b3ed1fe0cad8b254 100644
--- a/libavformat/rtsp.c
+++ b/libavformat/rtsp.c
@@ -1934,6 +1934,7 @@ static int rtp_read_header(AVFormatContext *s,
     struct sockaddr_storage addr;
     AVIOContext pb;
     socklen_t addrlen = sizeof(addr);
+    RTSPState *rt = s->priv_data;
 
     if (!ff_network_init())
         return AVERROR(EIO);
@@ -1997,6 +1998,8 @@ static int rtp_read_header(AVFormatContext *s,
     /* sdp_read_header initializes this again */
     ff_network_close();
 
+    rt->media_type_mask = (1 << (AVMEDIA_TYPE_DATA+1)) - 1;
+
     ret = sdp_read_header(s, ap);
     s->pb = NULL;
     return ret;