diff --git a/doc/APIchanges b/doc/APIchanges
index 1e94be42b22d6e1679a398b838a40bb47fed7d68..9c2bc97ad0c481f71404990acf1b4299802a3e2c 100644
--- a/doc/APIchanges
+++ b/doc/APIchanges
@@ -32,6 +32,12 @@ API changes, most recent first:
 2012-01-24 - xxxxxxx - lavfi 2.60.100
   Add avfilter_graph_dump.
 
+2012-xx-xx - xxxxxxx - lavc 54.x.x
+  Add duration field to AVCodecParserContext
+
+2012-02-xx - xxxxxxx - lavu 51.23.1 - mathematics.h
+  Add av_rescale_q_rnd()
+
 2012-02-xx - xxxxxxx - lavu 51.22.1 - pixdesc.h
   Add PIX_FMT_PSEUDOPAL flag.
 
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 0018dbb04f185a894f1534f4b9292a4213b84191..0b144c86350eef72afc7a61afd5d57749437b4f8 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -630,7 +630,7 @@ OBJS-$(CONFIG_LIBGSM_DECODER)             += libgsm.o
 OBJS-$(CONFIG_LIBGSM_ENCODER)             += libgsm.o
 OBJS-$(CONFIG_LIBGSM_MS_DECODER)          += libgsm.o
 OBJS-$(CONFIG_LIBGSM_MS_ENCODER)          += libgsm.o
-OBJS-$(CONFIG_LIBMP3LAME_ENCODER)         += libmp3lame.o
+OBJS-$(CONFIG_LIBMP3LAME_ENCODER)         += libmp3lame.o mpegaudiodecheader.o
 OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER)  += libopencore-amr.o
 OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER)  += libopencore-amr.o
 OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER)  += libopencore-amr.o
diff --git a/libavcodec/aac_ac3_parser.c b/libavcodec/aac_ac3_parser.c
index 6f6ed895f047b251367a9b1f6f908ff32a149e24..7545c8553bec5cdb4ce136b2d8aa03d1afeff808 100644
--- a/libavcodec/aac_ac3_parser.c
+++ b/libavcodec/aac_ac3_parser.c
@@ -93,7 +93,7 @@ get_next:
             avctx->channels = s->channels;
             avctx->channel_layout = s->channel_layout;
         }
-        avctx->frame_size = s->samples;
+        s1->duration = s->samples;
         avctx->audio_service_type = s->service_type;
     }
 
diff --git a/libavcodec/adpcmenc.c b/libavcodec/adpcmenc.c
index de85054288aa4a75d35ecdd84a99596745805235..246b5887d9ea578a55a7551ccc5ece94e6d41d46 100644
--- a/libavcodec/adpcmenc.c
+++ b/libavcodec/adpcmenc.c
@@ -125,7 +125,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
         }
         break;
     case CODEC_ID_ADPCM_YAMAHA:
-        avctx->frame_size  = BLKSIZE * avctx->channels;
+        avctx->frame_size  = BLKSIZE * 2 / avctx->channels;
         avctx->block_align = BLKSIZE;
         break;
     case CODEC_ID_ADPCM_SWF:
diff --git a/libavcodec/adx_parser.c b/libavcodec/adx_parser.c
index de3b1b073f521d53a5b8a8cb8fa84912def8f9fb..8dc87fcc5816bd182487296987c6fa75e3f1e649 100644
--- a/libavcodec/adx_parser.c
+++ b/libavcodec/adx_parser.c
@@ -80,6 +80,9 @@ static int adx_parse(AVCodecParserContext *s1,
         *poutbuf_size = 0;
         return buf_size;
     }
+
+    s1->duration = BLOCK_SAMPLES;
+
     *poutbuf = buf;
     *poutbuf_size = buf_size;
     return next;
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index c9e1e1dc463b8ffc07d3637b35e519cbde947005..4051bd8b73050d86c4c205574e6463f94ac4ed9a 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -4182,6 +4182,13 @@ typedef struct AVCodecParserContext {
      * Previous frame byte position.
      */
     int64_t last_pos;
+
+    /**
+     * Duration of the current frame.
+     * For audio, this is in units of 1 / AVCodecContext.sample_rate.
+     * For all other types, this is in units of AVCodecContext.time_base.
+     */
+    int duration;
 } AVCodecParserContext;
 
 typedef struct AVCodecParser {
diff --git a/libavcodec/flac_parser.c b/libavcodec/flac_parser.c
index ae7edaa052fd95c8f5efcf7a3848e47734410822..83c03c3ba8771543dacde38a22cef086f1742e1d 100644
--- a/libavcodec/flac_parser.c
+++ b/libavcodec/flac_parser.c
@@ -71,6 +71,7 @@ typedef struct FLACHeaderMarker {
 } FLACHeaderMarker;
 
 typedef struct FLACParseContext {
+    AVCodecParserContext *pc;      /**< parent context                        */
     AVCodecContext *avctx;         /**< codec context pointer for logging     */
     FLACHeaderMarker *headers;     /**< linked-list that starts at the first
                                         CRC-8 verified header within buffer   */
@@ -458,7 +459,7 @@ static int get_best_header(FLACParseContext* fpc, const uint8_t **poutbuf,
 
     fpc->avctx->sample_rate = header->fi.samplerate;
     fpc->avctx->channels    = header->fi.channels;
-    fpc->avctx->frame_size  = header->fi.blocksize;
+    fpc->pc->duration       = header->fi.blocksize;
     *poutbuf = flac_fifo_read_wrap(fpc, header->offset, *poutbuf_size,
                                         &fpc->wrap_buf,
                                         &fpc->wrap_buf_allocated_size);
@@ -484,7 +485,7 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
     if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
         FLACFrameInfo fi;
         if (frame_header_is_valid(avctx, buf, &fi))
-            avctx->frame_size = fi.blocksize;
+            s->duration = fi.blocksize;
         *poutbuf      = buf;
         *poutbuf_size = buf_size;
         return buf_size;
@@ -630,8 +631,8 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
             av_log(avctx, AV_LOG_DEBUG, "Junk frame till offset %i\n",
                    fpc->best_header->offset);
 
-            /* Set frame_size to 0. It is unknown or invalid in a junk frame. */
-            avctx->frame_size = 0;
+            /* Set duration to 0. It is unknown or invalid in a junk frame. */
+            s->duration = 0;
             *poutbuf_size     = fpc->best_header->offset;
             *poutbuf          = flac_fifo_read_wrap(fpc, 0, *poutbuf_size,
                                                     &fpc->wrap_buf,
@@ -652,6 +653,7 @@ handle_error:
 static int flac_parse_init(AVCodecParserContext *c)
 {
     FLACParseContext *fpc = c->priv_data;
+    fpc->pc = c;
     /* There will generally be FLAC_MIN_HEADERS buffered in the fifo before
        it drains.  This is allocated early to avoid slow reallocation. */
     fpc->fifo_buf = av_fifo_alloc(FLAC_AVG_FRAME_SIZE * (FLAC_MIN_HEADERS + 3));
diff --git a/libavcodec/gsm_parser.c b/libavcodec/gsm_parser.c
index a2965d3fb52f43bc6e22bfa2aa14151ea229e420..89afe80ef0ae8dc5cb3ae64d4e9c28a10ab214a2 100644
--- a/libavcodec/gsm_parser.c
+++ b/libavcodec/gsm_parser.c
@@ -31,6 +31,7 @@
 typedef struct GSMParseContext {
     ParseContext pc;
     int block_size;
+    int duration;
     int remaining;
 } GSMParseContext;
 
@@ -44,8 +45,14 @@ static int gsm_parse(AVCodecParserContext *s1, AVCodecContext *avctx,
 
     if (!s->block_size) {
         switch (avctx->codec_id) {
-        case CODEC_ID_GSM:    s->block_size = GSM_BLOCK_SIZE;    break;
-        case CODEC_ID_GSM_MS: s->block_size = GSM_MS_BLOCK_SIZE; break;
+        case CODEC_ID_GSM:
+            s->block_size = GSM_BLOCK_SIZE;
+            s->duration   = GSM_FRAME_SIZE;
+            break;
+        case CODEC_ID_GSM_MS:
+            s->block_size = GSM_MS_BLOCK_SIZE;
+            s->duration   = GSM_FRAME_SIZE * 2;
+            break;
         default:
             return AVERROR(EINVAL);
         }
@@ -66,6 +73,9 @@ static int gsm_parse(AVCodecParserContext *s1, AVCodecContext *avctx,
         *poutbuf_size = 0;
         return buf_size;
     }
+
+    s1->duration = s->duration;
+
     *poutbuf      = buf;
     *poutbuf_size = buf_size;
     return next;
diff --git a/libavcodec/huffyuv.c b/libavcodec/huffyuv.c
index 625d8fb1869aed939d3306c29edf68ee684777d7..ca94fc90424e55222e01d6ebb2e3cd48d32de5a2 100644
--- a/libavcodec/huffyuv.c
+++ b/libavcodec/huffyuv.c
@@ -1261,9 +1261,10 @@ static av_cold int decode_end(AVCodecContext *avctx)
 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
 
 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
-static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
+static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+                        const AVFrame *pict, int *got_packet)
+{
     HYuvContext *s = avctx->priv_data;
-    AVFrame *pict = data;
     const int width= s->width;
     const int width2= s->width>>1;
     const int height= s->height;
@@ -1271,7 +1272,13 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
     const int fake_ustride= s->interlaced ? pict->linesize[1]*2  : pict->linesize[1];
     const int fake_vstride= s->interlaced ? pict->linesize[2]*2  : pict->linesize[2];
     AVFrame * const p= &s->picture;
-    int i, j, size=0;
+    int i, j, size = 0, ret;
+
+    if (!pkt->data &&
+        (ret = av_new_packet(pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "Error allocating output packet.\n");
+        return ret;
+    }
 
     *p = *pict;
     p->pict_type= AV_PICTURE_TYPE_I;
@@ -1282,7 +1289,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
             generate_len_table(s->len[i], s->stats[i]);
             if(generate_bits_table(s->bits[i], s->len[i])<0)
                 return -1;
-            size+= store_table(s, s->len[i], &buf[size]);
+            size += store_table(s, s->len[i], &pkt->data[size]);
         }
 
         for(i=0; i<3; i++)
@@ -1290,7 +1297,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
                 s->stats[i][j] >>= 1;
     }
 
-    init_put_bits(&s->pb, buf+size, buf_size-size);
+    init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
 
     if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
         int lefty, leftu, leftv, y, cy;
@@ -1473,12 +1480,16 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
         avctx->stats_out[0] = '\0';
     if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
         flush_put_bits(&s->pb);
-        s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
+        s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
     }
 
     s->picture_number++;
 
-    return size*4;
+    pkt->size   = size*4;
+    pkt->flags |= AV_PKT_FLAG_KEY;
+    *got_packet = 1;
+
+    return 0;
 }
 
 static av_cold int encode_end(AVCodecContext *avctx)
@@ -1531,7 +1542,7 @@ AVCodec ff_huffyuv_encoder = {
     .id             = CODEC_ID_HUFFYUV,
     .priv_data_size = sizeof(HYuvContext),
     .init           = encode_init,
-    .encode         = encode_frame,
+    .encode2        = encode_frame,
     .close          = encode_end,
     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
@@ -1545,7 +1556,7 @@ AVCodec ff_ffvhuff_encoder = {
     .id             = CODEC_ID_FFVHUFF,
     .priv_data_size = sizeof(HYuvContext),
     .init           = encode_init,
-    .encode         = encode_frame,
+    .encode2        = encode_frame,
     .close          = encode_end,
     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
diff --git a/libavcodec/internal.h b/libavcodec/internal.h
index b7d4a6e1394aa6368d81486846128921a351623c..cd321c368854b00b44caacc7a0886819dc81a3f8 100644
--- a/libavcodec/internal.h
+++ b/libavcodec/internal.h
@@ -26,6 +26,7 @@
 
 #include <stdint.h>
 
+#include "libavutil/mathematics.h"
 #include "libavutil/pixfmt.h"
 #include "avcodec.h"
 
@@ -137,4 +138,14 @@ int avpriv_unlock_avformat(void);
  */
 int ff_alloc_packet(AVPacket *avpkt, int size);
 
+/**
+ * Rescale from sample rate to AVCodecContext.time_base.
+ */
+static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx,
+                                                        int64_t samples)
+{
+    return av_rescale_q(samples, (AVRational){ 1, avctx->sample_rate },
+                        avctx->time_base);
+}
+
 #endif /* AVCODEC_INTERNAL_H */
diff --git a/libavcodec/libmp3lame.c b/libavcodec/libmp3lame.c
index d75183e9c04099da719865801acb1175d59e9252..e8accedc00c6d60ea159d6d03caaee202f81c106 100644
--- a/libavcodec/libmp3lame.c
+++ b/libavcodec/libmp3lame.c
@@ -24,261 +24,227 @@
  * Interface to libmp3lame for mp3 encoding.
  */
 
+#include <lame/lame.h>
+
 #include "libavutil/intreadwrite.h"
 #include "libavutil/log.h"
 #include "libavutil/opt.h"
 #include "avcodec.h"
+#include "internal.h"
 #include "mpegaudio.h"
-#include <lame/lame.h>
+#include "mpegaudiodecheader.h"
 
 #define BUFFER_SIZE (7200 + 2 * MPA_FRAME_SIZE + MPA_FRAME_SIZE / 4+1000) // FIXME: Buffer size to small? Adding 1000 to make up for it.
-typedef struct Mp3AudioContext {
+
+typedef struct LAMEContext {
     AVClass *class;
+    AVCodecContext *avctx;
     lame_global_flags *gfp;
-    int stereo;
     uint8_t buffer[BUFFER_SIZE];
     int buffer_index;
-    struct {
-        int *left;
-        int *right;
-    } s32_data;
     int reservoir;
-} Mp3AudioContext;
+    void *planar_samples[2];
+} LAMEContext;
+
 
-static av_cold int MP3lame_encode_init(AVCodecContext *avctx)
+static av_cold int mp3lame_encode_close(AVCodecContext *avctx)
 {
-    Mp3AudioContext *s = avctx->priv_data;
+    LAMEContext *s = avctx->priv_data;
 
+    av_freep(&avctx->coded_frame);
+    av_freep(&s->planar_samples[0]);
+    av_freep(&s->planar_samples[1]);
+
+    lame_close(s->gfp);
+    return 0;
+}
+
+static av_cold int mp3lame_encode_init(AVCodecContext *avctx)
+{
+    LAMEContext *s = avctx->priv_data;
+    int ret;
+
+    s->avctx = avctx;
+
+    /* initialize LAME and get defaults */
+    if ((s->gfp = lame_init()) == NULL)
+        return AVERROR(ENOMEM);
+
+    /* channels */
     if (avctx->channels > 2) {
         av_log(avctx, AV_LOG_ERROR,
                "Invalid number of channels %d, must be <= 2\n", avctx->channels);
-        return AVERROR(EINVAL);
+        ret =  AVERROR(EINVAL);
+        goto error;
     }
+    lame_set_num_channels(s->gfp, avctx->channels);
+    lame_set_mode(s->gfp, avctx->channels > 1 ? JOINT_STEREO : MONO);
 
-    s->stereo = avctx->channels > 1 ? 1 : 0;
-
-    if ((s->gfp = lame_init()) == NULL)
-        goto err;
-    lame_set_in_samplerate(s->gfp, avctx->sample_rate);
+    /* sample rate */
+    lame_set_in_samplerate (s->gfp, avctx->sample_rate);
     lame_set_out_samplerate(s->gfp, avctx->sample_rate);
-    lame_set_num_channels(s->gfp, avctx->channels);
-    if (avctx->compression_level == FF_COMPRESSION_DEFAULT) {
+
+    /* algorithmic quality */
+    if (avctx->compression_level == FF_COMPRESSION_DEFAULT)
         lame_set_quality(s->gfp, 5);
-    } else {
+    else
         lame_set_quality(s->gfp, avctx->compression_level);
-    }
-    lame_set_mode(s->gfp, s->stereo ? JOINT_STEREO : MONO);
-    lame_set_brate(s->gfp, avctx->bit_rate / 1000);
+
+    /* rate control */
     if (avctx->flags & CODEC_FLAG_QSCALE) {
-        lame_set_brate(s->gfp, 0);
         lame_set_VBR(s->gfp, vbr_default);
         lame_set_VBR_quality(s->gfp, avctx->global_quality / (float)FF_QP2LAMBDA);
+    } else {
+        if (avctx->bit_rate)
+            lame_set_brate(s->gfp, avctx->bit_rate / 1000);
     }
-    lame_set_bWriteVbrTag(s->gfp,0);
-    lame_set_disable_reservoir(s->gfp, !s->reservoir);
-    if (lame_init_params(s->gfp) < 0)
-        goto err_close;
 
-    avctx->frame_size             = lame_get_framesize(s->gfp);
+    /* do not get a Xing VBR header frame from LAME */
+    lame_set_bWriteVbrTag(s->gfp,0);
 
-    if(!(avctx->coded_frame= avcodec_alloc_frame())) {
-        lame_close(s->gfp);
+    /* bit reservoir usage */
+    lame_set_disable_reservoir(s->gfp, !s->reservoir);
 
-        return AVERROR(ENOMEM);
+    /* set specified parameters */
+    if (lame_init_params(s->gfp) < 0) {
+        ret = -1;
+        goto error;
     }
 
-    if(AV_SAMPLE_FMT_S32 == avctx->sample_fmt && s->stereo) {
-        int nelem = 2 * avctx->frame_size;
-
-        if(! (s->s32_data.left = av_malloc(nelem * sizeof(int)))) {
-            av_freep(&avctx->coded_frame);
-            lame_close(s->gfp);
+    avctx->frame_size  = lame_get_framesize(s->gfp);
+    avctx->coded_frame = avcodec_alloc_frame();
+    if (!avctx->coded_frame) {
+        ret = AVERROR(ENOMEM);
+        goto error;
+    }
 
-            return AVERROR(ENOMEM);
+    /* sample format */
+    if (avctx->sample_fmt == AV_SAMPLE_FMT_S32 ||
+        avctx->sample_fmt == AV_SAMPLE_FMT_FLT) {
+        int ch;
+        for (ch = 0; ch < avctx->channels; ch++) {
+            s->planar_samples[ch] = av_malloc(avctx->frame_size *
+                                              av_get_bytes_per_sample(avctx->sample_fmt));
+            if (!s->planar_samples[ch]) {
+                ret = AVERROR(ENOMEM);
+                goto error;
+            }
         }
-
-        s->s32_data.right = s->s32_data.left + avctx->frame_size;
     }
 
     return 0;
-
-err_close:
-    lame_close(s->gfp);
-err:
-    return -1;
+error:
+    mp3lame_encode_close(avctx);
+    return ret;
 }
 
-static const int sSampleRates[] = {
-    44100, 48000,  32000, 22050, 24000, 16000, 11025, 12000, 8000, 0
-};
-
-static const int sBitRates[2][3][15] = {
-    {
-        { 0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448 },
-        { 0, 32, 48, 56, 64,  80,  96,  112, 128, 160, 192, 224, 256, 320, 384 },
-        { 0, 32, 40, 48, 56,  64,  80,  96,  112, 128, 160, 192, 224, 256, 320 }
-    },
-    {
-        { 0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256 },
-        { 0,  8, 16, 24, 32, 40, 48,  56,  64,  80,  96, 112, 128, 144, 160 },
-        { 0,  8, 16, 24, 32, 40, 48,  56,  64,  80,  96, 112, 128, 144, 160 }
-    },
-};
-
-static const int sSamplesPerFrame[2][3] = {
-    { 384, 1152, 1152 },
-    { 384, 1152,  576 }
-};
-
-static const int sBitsPerSlot[3] = { 32, 8, 8 };
-
-static int mp3len(void *data, int *samplesPerFrame, int *sampleRate)
+#define DEINTERLEAVE(type, scale) do {                  \
+    int ch, i;                                          \
+    for (ch = 0; ch < s->avctx->channels; ch++) {       \
+        const type *input = samples;                    \
+        type      *output = s->planar_samples[ch];      \
+        input += ch;                                    \
+        for (i = 0; i < s->avctx->frame_size; i++) {    \
+            output[i] = *input * scale;                 \
+            input += s->avctx->channels;                \
+        }                                               \
+    }                                                   \
+} while (0)
+
+static int encode_frame_int16(LAMEContext *s, void *samples)
 {
-    uint32_t header  = AV_RB32(data);
-    int layerID      = 3 - ((header >> 17) & 0x03);
-    int bitRateID    = ((header >> 12) & 0x0f);
-    int sampleRateID = ((header >> 10) & 0x03);
-    int bitsPerSlot  = sBitsPerSlot[layerID];
-    int isPadded     = ((header >> 9) & 0x01);
-    static int const mode_tab[4] = { 2, 3, 1, 0 };
-    int mode    = mode_tab[(header >> 19) & 0x03];
-    int mpeg_id = mode > 0;
-    int temp0, temp1, bitRate;
-
-    if (((header >> 21) & 0x7ff) != 0x7ff || mode == 3 || layerID == 3 ||
-        sampleRateID == 3) {
-        return -1;
+    if (s->avctx->channels > 1) {
+        return lame_encode_buffer_interleaved(s->gfp, samples,
+                                              s->avctx->frame_size,
+                                              s->buffer + s->buffer_index,
+                                              BUFFER_SIZE - s->buffer_index);
+    } else {
+        return lame_encode_buffer(s->gfp, samples, NULL, s->avctx->frame_size,
+                                  s->buffer + s->buffer_index,
+                                  BUFFER_SIZE - s->buffer_index);
     }
+}
 
-    if (!samplesPerFrame)
-        samplesPerFrame = &temp0;
-    if (!sampleRate)
-        sampleRate      = &temp1;
+static int encode_frame_int32(LAMEContext *s, void *samples)
+{
+    DEINTERLEAVE(int32_t, 1);
 
-    //*isMono = ((header >>  6) & 0x03) == 0x03;
+    return lame_encode_buffer_int(s->gfp,
+                                  s->planar_samples[0], s->planar_samples[1],
+                                  s->avctx->frame_size,
+                                  s->buffer + s->buffer_index,
+                                  BUFFER_SIZE - s->buffer_index);
+}
 
-    *sampleRate      = sSampleRates[sampleRateID] >> mode;
-    bitRate          = sBitRates[mpeg_id][layerID][bitRateID] * 1000;
-    *samplesPerFrame = sSamplesPerFrame[mpeg_id][layerID];
-    //av_log(NULL, AV_LOG_DEBUG,
-    //       "sr:%d br:%d spf:%d l:%d m:%d\n",
-    //       *sampleRate, bitRate, *samplesPerFrame, layerID, mode);
+static int encode_frame_float(LAMEContext *s, void *samples)
+{
+    DEINTERLEAVE(float, 32768.0f);
 
-    return *samplesPerFrame * bitRate / (bitsPerSlot * *sampleRate) + isPadded;
+    return lame_encode_buffer_float(s->gfp,
+                                    s->planar_samples[0], s->planar_samples[1],
+                                    s->avctx->frame_size,
+                                    s->buffer + s->buffer_index,
+                                    BUFFER_SIZE - s->buffer_index);
 }
 
-static int MP3lame_encode_frame(AVCodecContext *avctx, unsigned char *frame,
+static int mp3lame_encode_frame(AVCodecContext *avctx, unsigned char *frame,
                                 int buf_size, void *data)
 {
-    Mp3AudioContext *s = avctx->priv_data;
+    LAMEContext *s = avctx->priv_data;
+    MPADecodeHeader hdr;
     int len;
     int lame_result;
 
-    /* lame 3.91 dies on '1-channel interleaved' data */
-
-    if (!data){
-        lame_result= lame_encode_flush(
-                s->gfp,
-                s->buffer + s->buffer_index,
-                BUFFER_SIZE - s->buffer_index
-                );
-#if 2147483647 == INT_MAX
-    }else if(AV_SAMPLE_FMT_S32 == avctx->sample_fmt){
-        if (s->stereo) {
-            int32_t *rp = data;
-            int32_t *mp = rp + 2*avctx->frame_size;
-            int *wpl = s->s32_data.left;
-            int *wpr = s->s32_data.right;
-
-            while (rp < mp) {
-                *wpl++ = *rp++;
-                *wpr++ = *rp++;
-            }
-
-            lame_result = lame_encode_buffer_int(
-                s->gfp,
-                s->s32_data.left,
-                s->s32_data.right,
-                avctx->frame_size,
-                s->buffer + s->buffer_index,
-                BUFFER_SIZE - s->buffer_index
-                );
-        } else {
-            lame_result = lame_encode_buffer_int(
-                s->gfp,
-                data,
-                data,
-                avctx->frame_size,
-                s->buffer + s->buffer_index,
-                BUFFER_SIZE - s->buffer_index
-                );
-        }
-#endif
-    }else{
-        if (s->stereo) {
-            lame_result = lame_encode_buffer_interleaved(
-                s->gfp,
-                data,
-                avctx->frame_size,
-                s->buffer + s->buffer_index,
-                BUFFER_SIZE - s->buffer_index
-                );
-        } else {
-            lame_result = lame_encode_buffer(
-                s->gfp,
-                data,
-                data,
-                avctx->frame_size,
-                s->buffer + s->buffer_index,
-                BUFFER_SIZE - s->buffer_index
-                );
+    if (data) {
+        switch (avctx->sample_fmt) {
+        case AV_SAMPLE_FMT_S16:
+            lame_result = encode_frame_int16(s, data);
+            break;
+        case AV_SAMPLE_FMT_S32:
+            lame_result = encode_frame_int32(s, data);
+            break;
+        case AV_SAMPLE_FMT_FLT:
+            lame_result = encode_frame_float(s, data);
+            break;
+        default:
+            return AVERROR_BUG;
         }
+    } else {
+        lame_result = lame_encode_flush(s->gfp, s->buffer + s->buffer_index,
+                                        BUFFER_SIZE - s->buffer_index);
     }
-
     if (lame_result < 0) {
         if (lame_result == -1) {
-            /* output buffer too small */
             av_log(avctx, AV_LOG_ERROR,
                    "lame: output buffer too small (buffer index: %d, free bytes: %d)\n",
                    s->buffer_index, BUFFER_SIZE - s->buffer_index);
         }
         return -1;
     }
-
     s->buffer_index += lame_result;
 
+    /* Move 1 frame from the LAME buffer to the output packet, if available.
+       We have to parse the first frame header in the output buffer to
+       determine the frame size. */
     if (s->buffer_index < 4)
         return 0;
-
-    len = mp3len(s->buffer, NULL, NULL);
-    //av_log(avctx, AV_LOG_DEBUG, "in:%d packet-len:%d index:%d\n",
-    //       avctx->frame_size, len, s->buffer_index);
+    if (avpriv_mpegaudio_decode_header(&hdr, AV_RB32(s->buffer))) {
+        av_log(avctx, AV_LOG_ERROR, "free format output not supported\n");
+        return -1;
+    }
+    len = hdr.frame_size;
+    av_dlog(avctx, "in:%d packet-len:%d index:%d\n", avctx->frame_size, len,
+            s->buffer_index);
     if (len <= s->buffer_index) {
         memcpy(frame, s->buffer, len);
         s->buffer_index -= len;
-
         memmove(s->buffer, s->buffer + len, s->buffer_index);
-        // FIXME fix the audio codec API, so we do not need the memcpy()
-        /*for(i=0; i<len; i++) {
-            av_log(avctx, AV_LOG_DEBUG, "%2X ", frame[i]);
-        }*/
         return len;
     } else
         return 0;
 }
 
-static av_cold int MP3lame_encode_close(AVCodecContext *avctx)
-{
-    Mp3AudioContext *s = avctx->priv_data;
-
-    av_freep(&s->s32_data.left);
-    av_freep(&avctx->coded_frame);
-
-    lame_close(s->gfp);
-    return 0;
-}
-
-#define OFFSET(x) offsetof(Mp3AudioContext, x)
+#define OFFSET(x) offsetof(LAMEContext, x)
 #define AE AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
 static const AVOption options[] = {
     { "reservoir", "Use bit reservoir.", OFFSET(reservoir), AV_OPT_TYPE_INT, { 1 }, 0, 1, AE },
@@ -292,21 +258,30 @@ static const AVClass libmp3lame_class = {
     .version    = LIBAVUTIL_VERSION_INT,
 };
 
+static const AVCodecDefault libmp3lame_defaults[] = {
+    { "b",          "0" },
+    { NULL },
+};
+
+static const int libmp3lame_sample_rates[] = {
+    44100, 48000,  32000, 22050, 24000, 16000, 11025, 12000, 8000, 0
+};
+
 AVCodec ff_libmp3lame_encoder = {
     .name                  = "libmp3lame",
     .type                  = AVMEDIA_TYPE_AUDIO,
     .id                    = CODEC_ID_MP3,
-    .priv_data_size        = sizeof(Mp3AudioContext),
-    .init                  = MP3lame_encode_init,
-    .encode                = MP3lame_encode_frame,
-    .close                 = MP3lame_encode_close,
+    .priv_data_size        = sizeof(LAMEContext),
+    .init                  = mp3lame_encode_init,
+    .encode                = mp3lame_encode_frame,
+    .close                 = mp3lame_encode_close,
     .capabilities          = CODEC_CAP_DELAY,
-    .sample_fmts           = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
-#if 2147483647 == INT_MAX
-    AV_SAMPLE_FMT_S32,
-#endif
+    .sample_fmts           = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32,
+                                                             AV_SAMPLE_FMT_FLT,
+                                                             AV_SAMPLE_FMT_S16,
                                                              AV_SAMPLE_FMT_NONE },
-    .supported_samplerates = sSampleRates,
+    .supported_samplerates = libmp3lame_sample_rates,
     .long_name             = NULL_IF_CONFIG_SMALL("libmp3lame MP3 (MPEG audio layer 3)"),
     .priv_class            = &libmp3lame_class,
+    .defaults              = libmp3lame_defaults,
 };
diff --git a/libavcodec/libspeexenc.c b/libavcodec/libspeexenc.c
index 8cdc911c2a0b0bc670c82193bfe7cf2adb76b679..7deb98b6c3f0f0535d6474191fdcf32bcf3b2efd 100644
--- a/libavcodec/libspeexenc.c
+++ b/libavcodec/libspeexenc.c
@@ -67,7 +67,6 @@
 #include <speex/speex.h>
 #include <speex/speex_header.h>
 #include <speex/speex_stereo.h>
-#include "libavutil/mathematics.h"
 #include "libavutil/opt.h"
 #include "avcodec.h"
 #include "internal.h"
@@ -258,9 +257,7 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size,
     /* write output if all frames for the packet have been encoded */
     if (s->pkt_frame_count == s->frames_per_packet) {
         s->pkt_frame_count = 0;
-        avctx->coded_frame->pts =
-            av_rescale_q(s->next_pts, (AVRational){ 1, avctx->sample_rate },
-                         avctx->time_base);
+        avctx->coded_frame->pts = ff_samples_to_time_base(avctx, s->next_pts);
         s->next_pts += s->pkt_sample_count;
         s->pkt_sample_count = 0;
         if (buf_size > speex_bits_nbytes(&s->bits)) {
diff --git a/libavcodec/libvorbis.c b/libavcodec/libvorbis.c
index 5c35b35bc4ef79153e39adeb159d8997b504b876..ca9376baf10118254d4bf7020728466f8dcfed93 100644
--- a/libavcodec/libvorbis.c
+++ b/libavcodec/libvorbis.c
@@ -29,8 +29,8 @@
 #include "libavutil/opt.h"
 #include "avcodec.h"
 #include "bytestream.h"
+#include "internal.h"
 #include "vorbis.h"
-#include "libavutil/mathematics.h"
 
 #undef NDEBUG
 #include <assert.h>
@@ -268,7 +268,8 @@ static int oggvorbis_encode_frame(AVCodecContext *avccontext,
         op2->packet = context->buffer + sizeof(ogg_packet);
 
         l = op2->bytes;
-        avccontext->coded_frame->pts = av_rescale_q(op2->granulepos, (AVRational) { 1, avccontext->sample_rate }, avccontext->time_base);
+        avccontext->coded_frame->pts = ff_samples_to_time_base(avccontext,
+                                                               op2->granulepos);
         //FIXME we should reorder the user supplied pts and not assume that they are spaced by 1/sample_rate
 
         if (l > buf_size) {
diff --git a/libavcodec/mlp_parser.c b/libavcodec/mlp_parser.c
index f31e2d572782ad5b3a63b60b639c250eef5c6068..64938da9327e76dbbc6ae217f2f79c0f22992aba 100644
--- a/libavcodec/mlp_parser.c
+++ b/libavcodec/mlp_parser.c
@@ -314,7 +314,7 @@ static int mlp_parse(AVCodecParserContext *s,
         else
             avctx->sample_fmt = AV_SAMPLE_FMT_S16;
         avctx->sample_rate = mh.group1_samplerate;
-        avctx->frame_size = mh.access_unit_size;
+        s->duration = mh.access_unit_size;
 
         if (mh.stream_type == 0xbb) {
             /* MLP stream */
diff --git a/libavcodec/mpegaudio_parser.c b/libavcodec/mpegaudio_parser.c
index ec7e882c312682bd3fcca3f577dafa59bd85a44f..b8089b0b59a95448391b6aabca7dd25fda78c325 100644
--- a/libavcodec/mpegaudio_parser.c
+++ b/libavcodec/mpegaudio_parser.c
@@ -78,7 +78,7 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
                     if(s->header_count > 1){
                         avctx->sample_rate= sr;
                         avctx->channels   = channels;
-                        avctx->frame_size = frame_size;
+                        s1->duration      = frame_size;
                         avctx->bit_rate   = bit_rate;
                     }
                     break;
diff --git a/libavcodec/pcxenc.c b/libavcodec/pcxenc.c
index a39e221805499eb292552e6e438dfa6e061707ac..65b3935476b4d59aa29c92effd06c33e71b3ba73 100644
--- a/libavcodec/pcxenc.c
+++ b/libavcodec/pcxenc.c
@@ -29,6 +29,7 @@
 #include "avcodec.h"
 #include "bytestream.h"
 #include "libavutil/imgutils.h"
+#include "internal.h"
 
 typedef struct PCXContext {
     AVFrame picture;
@@ -96,20 +97,20 @@ static int pcx_rle_encode(      uint8_t *dst, int dst_size,
     return dst - dst_start;
 }
 
-static int pcx_encode_frame(AVCodecContext *avctx,
-                            unsigned char *buf, int buf_size, void *data)
+static int pcx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+                            const AVFrame *frame, int *got_packet)
 {
     PCXContext *s = avctx->priv_data;
     AVFrame *const pict = &s->picture;
-    const uint8_t *buf_start = buf;
-    const uint8_t *buf_end   = buf + buf_size;
+    const uint8_t *buf_end;
+    uint8_t *buf;
 
-    int bpp, nplanes, i, y, line_bytes, written;
+    int bpp, nplanes, i, y, line_bytes, written, ret, max_pkt_size;
     const uint32_t *pal = NULL;
     uint32_t palette256[256];
     const uint8_t *src;
 
-    *pict = *(AVFrame *)data;
+    *pict = *frame;
     pict->pict_type = AV_PICTURE_TYPE_I;
     pict->key_frame = 1;
 
@@ -151,6 +152,14 @@ static int pcx_encode_frame(AVCodecContext *avctx,
     line_bytes = (avctx->width * bpp + 7) >> 3;
     line_bytes = (line_bytes + 1) & ~1;
 
+    max_pkt_size = 128 + avctx->height * 2 * line_bytes * nplanes + (pal ? 256*3 + 1 : 0);
+    if ((ret = ff_alloc_packet(pkt, max_pkt_size)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", max_pkt_size);
+        return ret;
+    }
+    buf     = pkt->data;
+    buf_end = pkt->data + pkt->size;
+
     bytestream_put_byte(&buf, 10);                  // manufacturer
     bytestream_put_byte(&buf, 5);                   // version
     bytestream_put_byte(&buf, 1);                   // encoding
@@ -167,7 +176,7 @@ static int pcx_encode_frame(AVCodecContext *avctx,
     bytestream_put_byte(&buf, nplanes);             // number of planes
     bytestream_put_le16(&buf, line_bytes);          // scanline plane size in bytes
 
-    while (buf - buf_start < 128)
+    while (buf - pkt->data < 128)
         *buf++= 0;
 
     src = pict->data[0];
@@ -193,7 +202,11 @@ static int pcx_encode_frame(AVCodecContext *avctx,
         }
     }
 
-    return buf - buf_start;
+    pkt->size   = buf - pkt->data;
+    pkt->flags |= AV_PKT_FLAG_KEY;
+    *got_packet = 1;
+
+    return 0;
 }
 
 AVCodec ff_pcx_encoder = {
@@ -202,7 +215,7 @@ AVCodec ff_pcx_encoder = {
     .id             = CODEC_ID_PCX,
     .priv_data_size = sizeof(PCXContext),
     .init           = pcx_encode_init,
-    .encode         = pcx_encode_frame,
+    .encode2        = pcx_encode_frame,
     .pix_fmts = (const enum PixelFormat[]){
         PIX_FMT_RGB24,
         PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8,
diff --git a/libavcodec/pnmdec.c b/libavcodec/pnmdec.c
index c3f7bbda56be3f17abd8a7aae287b05c7d4f9e03..0be7ec9326f6f55bcc93349cc4624ad2550614de 100644
--- a/libavcodec/pnmdec.c
+++ b/libavcodec/pnmdec.c
@@ -201,7 +201,6 @@ AVCodec ff_pgm_decoder = {
     .close          = ff_pnm_end,
     .decode         = pnm_decode_frame,
     .capabilities   = CODEC_CAP_DR1,
-    .pix_fmts  = (const enum PixelFormat[]){PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"),
 };
 #endif
@@ -216,7 +215,6 @@ AVCodec ff_pgmyuv_decoder = {
     .close          = ff_pnm_end,
     .decode         = pnm_decode_frame,
     .capabilities   = CODEC_CAP_DR1,
-    .pix_fmts  = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"),
 };
 #endif
@@ -231,7 +229,6 @@ AVCodec ff_ppm_decoder = {
     .close          = ff_pnm_end,
     .decode         = pnm_decode_frame,
     .capabilities   = CODEC_CAP_DR1,
-    .pix_fmts  = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB48BE, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"),
 };
 #endif
@@ -246,7 +243,6 @@ AVCodec ff_pbm_decoder = {
     .close          = ff_pnm_end,
     .decode         = pnm_decode_frame,
     .capabilities   = CODEC_CAP_DR1,
-    .pix_fmts  = (const enum PixelFormat[]){PIX_FMT_MONOWHITE, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"),
 };
 #endif
@@ -261,7 +257,6 @@ AVCodec ff_pam_decoder = {
     .close          = ff_pnm_end,
     .decode         = pnm_decode_frame,
     .capabilities   = CODEC_CAP_DR1,
-    .pix_fmts  = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_GRAY8, PIX_FMT_MONOBLACK, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"),
 };
 #endif
diff --git a/libavcodec/pnmenc.c b/libavcodec/pnmenc.c
index e31bfee5205a54739cf4723b2340e2d7bfa0041b..1f96db51b94c33ac8a5160af1ba22e8a2681eb1a 100644
--- a/libavcodec/pnmenc.c
+++ b/libavcodec/pnmenc.c
@@ -20,21 +20,23 @@
  */
 
 #include "avcodec.h"
+#include "internal.h"
 #include "pnm.h"
 
 
-static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf,
-                            int buf_size, void *data)
+static int pnm_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+                            const AVFrame *pict, int *got_packet)
 {
     PNMContext *s     = avctx->priv_data;
-    AVFrame *pict     = data;
     AVFrame * const p = (AVFrame*)&s->picture;
-    int i, h, h1, c, n, linesize;
+    int i, h, h1, c, n, linesize, ret;
     uint8_t *ptr, *ptr1, *ptr2;
 
-    if (buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200) {
+    if ((ret = ff_alloc_packet(pkt, avpicture_get_size(avctx->pix_fmt,
+                                                       avctx->width,
+                                                       avctx->height) + 200)) < 0) {
         av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
-        return -1;
+        return ret;
     }
 
     *p           = *pict;
@@ -42,8 +44,8 @@ static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf,
     p->key_frame = 1;
 
     s->bytestream_start =
-    s->bytestream       = outbuf;
-    s->bytestream_end   = outbuf + buf_size;
+    s->bytestream       = pkt->data;
+    s->bytestream_end   = pkt->data + pkt->size;
 
     h  = avctx->height;
     h1 = h;
@@ -107,7 +109,11 @@ static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf,
                 ptr2 += p->linesize[2];
         }
     }
-    return s->bytestream - s->bytestream_start;
+    pkt->size   = s->bytestream - s->bytestream_start;
+    pkt->flags |= AV_PKT_FLAG_KEY;
+    *got_packet = 1;
+
+    return 0;
 }
 
 
@@ -118,7 +124,7 @@ AVCodec ff_pgm_encoder = {
     .id             = CODEC_ID_PGM,
     .priv_data_size = sizeof(PNMContext),
     .init           = ff_pnm_init,
-    .encode         = pnm_encode_frame,
+    .encode2        = pnm_encode_frame,
     .pix_fmts  = (const enum PixelFormat[]){PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"),
 };
@@ -131,7 +137,7 @@ AVCodec ff_pgmyuv_encoder = {
     .id             = CODEC_ID_PGMYUV,
     .priv_data_size = sizeof(PNMContext),
     .init           = ff_pnm_init,
-    .encode         = pnm_encode_frame,
+    .encode2        = pnm_encode_frame,
     .pix_fmts  = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"),
 };
@@ -144,7 +150,7 @@ AVCodec ff_ppm_encoder = {
     .id             = CODEC_ID_PPM,
     .priv_data_size = sizeof(PNMContext),
     .init           = ff_pnm_init,
-    .encode         = pnm_encode_frame,
+    .encode2        = pnm_encode_frame,
     .pix_fmts  = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB48BE, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"),
 };
@@ -157,7 +163,7 @@ AVCodec ff_pbm_encoder = {
     .id             = CODEC_ID_PBM,
     .priv_data_size = sizeof(PNMContext),
     .init           = ff_pnm_init,
-    .encode         = pnm_encode_frame,
+    .encode2        = pnm_encode_frame,
     .pix_fmts  = (const enum PixelFormat[]){PIX_FMT_MONOWHITE, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"),
 };
diff --git a/libavcodec/roqvideo.h b/libavcodec/roqvideo.h
index 3fe11c670bd23913ae74be8c345c98334f36a4ee..66c8bebe584fa28e08c622fffff9935b2a534632 100644
--- a/libavcodec/roqvideo.h
+++ b/libavcodec/roqvideo.h
@@ -69,7 +69,7 @@ typedef struct RoqContext {
 
     unsigned int framesSinceKeyframe;
 
-    AVFrame *frame_to_enc;
+    const AVFrame *frame_to_enc;
     uint8_t *out_buf;
     struct RoqTempData *tmpData;
 } RoqContext;
diff --git a/libavcodec/roqvideoenc.c b/libavcodec/roqvideoenc.c
index 88d6224eacc98f11aa8e8ceba4ce6f4bfc3c68ab..38008b0b2cdcf7722678f681f1e9dbc50660dd54 100644
--- a/libavcodec/roqvideoenc.c
+++ b/libavcodec/roqvideoenc.c
@@ -59,6 +59,7 @@
 #include "roqvideo.h"
 #include "bytestream.h"
 #include "elbg.h"
+#include "internal.h"
 #include "mathops.h"
 
 #define CHROMA_BIAS 1
@@ -112,7 +113,7 @@ static inline int square(int x)
     return x*x;
 }
 
-static inline int eval_sse(uint8_t *a, uint8_t *b, int count)
+static inline int eval_sse(const uint8_t *a, const uint8_t *b, int count)
 {
     int diff=0;
 
@@ -124,8 +125,8 @@ static inline int eval_sse(uint8_t *a, uint8_t *b, int count)
 
 // FIXME Could use DSPContext.sse, but it is not so speed critical (used
 // just for motion estimation).
-static int block_sse(uint8_t **buf1, uint8_t **buf2, int x1, int y1, int x2,
-                     int y2, int *stride1, int *stride2, int size)
+static int block_sse(uint8_t * const *buf1, uint8_t * const *buf2, int x1, int y1,
+                     int x2, int y2, const int *stride1, const int *stride2, int size)
 {
     int i, k;
     int sse=0;
@@ -260,7 +261,7 @@ static void create_cel_evals(RoqContext *enc, RoqTempdata *tempData)
 /**
  * Get macroblocks from parts of the image
  */
-static void get_frame_mb(AVFrame *frame, int x, int y, uint8_t mb[], int dim)
+static void get_frame_mb(const AVFrame *frame, int x, int y, uint8_t mb[], int dim)
 {
     int i, j, cp;
 
@@ -754,8 +755,8 @@ static void reconstruct_and_encode_image(RoqContext *enc, RoqTempdata *tempData,
 /**
  * Create a single YUV cell from a 2x2 section of the image
  */
-static inline void frame_block_to_cell(uint8_t *block, uint8_t **data,
-                                       int top, int left, int *stride)
+static inline void frame_block_to_cell(uint8_t *block, uint8_t * const *data,
+                                       int top, int left, const int *stride)
 {
     int i, j, u=0, v=0;
 
@@ -775,7 +776,7 @@ static inline void frame_block_to_cell(uint8_t *block, uint8_t **data,
 /**
  * Create YUV clusters for the entire image
  */
-static void create_clusters(AVFrame *frame, int w, int h, uint8_t *yuvClusters)
+static void create_clusters(const AVFrame *frame, int w, int h, uint8_t *yuvClusters)
 {
     int i, j, k, l;
 
@@ -1001,13 +1002,12 @@ static void roq_write_video_info_chunk(RoqContext *enc)
     bytestream_put_byte(&enc->out_buf, 0x00);
 }
 
-static int roq_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data)
+static int roq_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+                            const AVFrame *frame, int *got_packet)
 {
     RoqContext *enc = avctx->priv_data;
-    AVFrame *frame= data;
-    uint8_t *buf_start = buf;
+    int size, ret;
 
-    enc->out_buf = buf;
     enc->avctx = avctx;
 
     enc->frame_to_enc = frame;
@@ -1019,10 +1019,12 @@ static int roq_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_s
 
     /* 138 bits max per 8x8 block +
      *     256 codebooks*(6 bytes 2x2 + 4 bytes 4x4) + 8 bytes frame header */
-    if (((enc->width*enc->height/64)*138+7)/8 + 256*(6+4) + 8 > buf_size) {
-        av_log(avctx, AV_LOG_ERROR, "  RoQ: Output buffer too small!\n");
-        return -1;
+    size = ((enc->width * enc->height / 64) * 138 + 7) / 8 + 256 * (6 + 4) + 8;
+    if ((ret = ff_alloc_packet(pkt, size)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "Error getting output packet with size %d.\n", size);
+        return ret;
     }
+    enc->out_buf = pkt->data;
 
     /* Check for I frame */
     if (enc->framesSinceKeyframe == avctx->gop_size)
@@ -1046,7 +1048,12 @@ static int roq_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_s
     /* Encode the actual frame */
     roq_encode_video(enc);
 
-    return enc->out_buf - buf_start;
+    pkt->size   = enc->out_buf - pkt->data;
+    if (enc->framesSinceKeyframe == 1)
+        pkt->flags |= AV_PKT_FLAG_KEY;
+    *got_packet = 1;
+
+    return 0;
 }
 
 static int roq_encode_end(AVCodecContext *avctx)
@@ -1071,7 +1078,7 @@ AVCodec ff_roq_encoder = {
     .id                   = CODEC_ID_ROQ,
     .priv_data_size       = sizeof(RoqContext),
     .init                 = roq_encode_init,
-    .encode               = roq_encode_frame,
+    .encode2              = roq_encode_frame,
     .close                = roq_encode_end,
     .supported_framerates = (const AVRational[]){{30,1}, {0,0}},
     .pix_fmts             = (const enum PixelFormat[]){PIX_FMT_YUV444P, PIX_FMT_NONE},
diff --git a/libavcodec/sgienc.c b/libavcodec/sgienc.c
index 1b7fbbfbc9388556943b3c1497307584889aa00d..f72a32086216e1d7052b96efa7cc6dbad967d27d 100644
--- a/libavcodec/sgienc.c
+++ b/libavcodec/sgienc.c
@@ -21,6 +21,7 @@
 
 #include "avcodec.h"
 #include "bytestream.h"
+#include "internal.h"
 #include "sgi.h"
 #include "rle.h"
 
@@ -41,17 +42,17 @@ static av_cold int encode_init(AVCodecContext *avctx)
     return 0;
 }
 
-static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
-                        int buf_size, void *data)
+static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+                        const AVFrame *frame, int *got_packet)
 {
     SgiContext *s = avctx->priv_data;
     AVFrame * const p = &s->picture;
-    uint8_t *offsettab, *lengthtab, *in_buf, *encode_buf;
-    int x, y, z, length, tablesize;
+    uint8_t *offsettab, *lengthtab, *in_buf, *encode_buf, *buf;
+    int x, y, z, length, tablesize, ret;
     unsigned int width, height, depth, dimension, bytes_per_channel, pixmax, put_be;
-    unsigned char *orig_buf = buf, *end_buf = buf + buf_size;
+    unsigned char *end_buf;
 
-    *p = *(AVFrame*)data;
+    *p = *frame;
     p->pict_type = AV_PICTURE_TYPE_I;
     p->key_frame = 1;
 
@@ -106,12 +107,18 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
     }
 
     tablesize = depth * height * 4;
-    length = tablesize * 2 + SGI_HEADER_SIZE;
-
-    if (buf_size < length) {
-        av_log(avctx, AV_LOG_ERROR, "buf_size too small(need %d, got %d)\n", length, buf_size);
-        return -1;
+    length = SGI_HEADER_SIZE;
+    if (avctx->coder_type == FF_CODER_TYPE_RAW)
+        length += depth * height * width;
+    else // assume ff_rl_encode() produces at most 2x size of input
+        length += tablesize * 2 + depth * height * (2 * width + 1);
+
+    if ((ret = ff_alloc_packet(pkt, length)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", length);
+        return ret;
     }
+    buf     = pkt->data;
+    end_buf = pkt->data + pkt->size;
 
     /* Encode header. */
     bytestream_put_be16(&buf, SGI_MAGIC);
@@ -153,7 +160,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
             in_buf = p->data[0] + p->linesize[0] * (height - 1) + z;
 
             for (y = 0; y < height; y++) {
-                bytestream_put_be32(&offsettab, buf - orig_buf);
+                bytestream_put_be32(&offsettab, buf - pkt->data);
 
                 for (x = 0; x < width; x++)
                     encode_buf[x] = in_buf[depth * x];
@@ -193,7 +200,11 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
     }
 
     /* total length */
-    return buf - orig_buf;
+    pkt->size = buf - pkt->data;
+    pkt->flags |= AV_PKT_FLAG_KEY;
+    *got_packet = 1;
+
+    return 0;
 }
 
 AVCodec ff_sgi_encoder = {
@@ -202,7 +213,7 @@ AVCodec ff_sgi_encoder = {
     .id             = CODEC_ID_SGI,
     .priv_data_size = sizeof(SgiContext),
     .init           = encode_init,
-    .encode         = encode_frame,
+    .encode2        = encode_frame,
     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA,
                                           PIX_FMT_RGB48LE, PIX_FMT_RGB48BE,
                                           PIX_FMT_RGBA64LE, PIX_FMT_RGBA64BE,
diff --git a/libavcodec/targaenc.c b/libavcodec/targaenc.c
index 1171f605a214200dba479b73d148a84a3c820d9d..b03e643cbe02c12af78603660ae325a72f007ab9 100644
--- a/libavcodec/targaenc.c
+++ b/libavcodec/targaenc.c
@@ -22,6 +22,7 @@
 #include "libavutil/intreadwrite.h"
 #include "libavutil/pixdesc.h"
 #include "avcodec.h"
+#include "internal.h"
 #include "rle.h"
 #include "targa.h"
 
@@ -39,7 +40,7 @@ typedef struct TargaContext {
  * @param h Image height
  * @return Size of output in bytes, or -1 if larger than out_size
  */
-static int targa_encode_rle(uint8_t *outbuf, int out_size, AVFrame *pic,
+static int targa_encode_rle(uint8_t *outbuf, int out_size, const AVFrame *pic,
                             int bpp, int w, int h)
 {
     int y,ret;
@@ -59,7 +60,7 @@ static int targa_encode_rle(uint8_t *outbuf, int out_size, AVFrame *pic,
     return out - outbuf;
 }
 
-static int targa_encode_normal(uint8_t *outbuf, AVFrame *pic, int bpp, int w, int h)
+static int targa_encode_normal(uint8_t *outbuf, const AVFrame *pic, int bpp, int w, int h)
 {
     int i, n = bpp * w;
     uint8_t *out = outbuf;
@@ -74,11 +75,10 @@ static int targa_encode_normal(uint8_t *outbuf, AVFrame *pic, int bpp, int w, in
     return out - outbuf;
 }
 
-static int targa_encode_frame(AVCodecContext *avctx,
-                              unsigned char *outbuf,
-                              int buf_size, void *data){
-    AVFrame *p = data;
-    int bpp, picsize, datasize = -1;
+static int targa_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+                              const AVFrame *p, int *got_packet)
+{
+    int bpp, picsize, datasize = -1, ret;
     uint8_t *out;
 
     if(avctx->width > 0xffff || avctx->height > 0xffff) {
@@ -86,46 +86,43 @@ static int targa_encode_frame(AVCodecContext *avctx,
         return AVERROR(EINVAL);
     }
     picsize = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
-    if(buf_size < picsize + 45) {
+    if ((ret = ff_alloc_packet(pkt, picsize + 45)) < 0) {
         av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
-        return AVERROR(EINVAL);
+        return ret;
     }
 
-    p->pict_type= AV_PICTURE_TYPE_I;
-    p->key_frame= 1;
-
     /* zero out the header and only set applicable fields */
-    memset(outbuf, 0, 12);
-    AV_WL16(outbuf+12, avctx->width);
-    AV_WL16(outbuf+14, avctx->height);
+    memset(pkt->data, 0, 12);
+    AV_WL16(pkt->data+12, avctx->width);
+    AV_WL16(pkt->data+14, avctx->height);
     /* image descriptor byte: origin is always top-left, bits 0-3 specify alpha */
-    outbuf[17] = 0x20 | (avctx->pix_fmt == PIX_FMT_BGRA ? 8 : 0);
+    pkt->data[17] = 0x20 | (avctx->pix_fmt == PIX_FMT_BGRA ? 8 : 0);
 
     switch(avctx->pix_fmt) {
     case PIX_FMT_GRAY8:
-        outbuf[2] = TGA_BW;      /* uncompressed grayscale image */
-        outbuf[16] = 8;          /* bpp */
+        pkt->data[2]  = TGA_BW;     /* uncompressed grayscale image */
+        pkt->data[16] = 8;          /* bpp */
         break;
     case PIX_FMT_RGB555LE:
-        outbuf[2] = TGA_RGB;     /* uncompresses true-color image */
-        outbuf[16] = 16;         /* bpp */
+        pkt->data[2]  = TGA_RGB;    /* uncompresses true-color image */
+        pkt->data[16] = 16;         /* bpp */
         break;
     case PIX_FMT_BGR24:
-        outbuf[2] = TGA_RGB;     /* uncompressed true-color image */
-        outbuf[16] = 24;         /* bpp */
+        pkt->data[2]  = TGA_RGB;    /* uncompressed true-color image */
+        pkt->data[16] = 24;         /* bpp */
         break;
     case PIX_FMT_BGRA:
-        outbuf[2] = TGA_RGB;     /* uncompressed true-color image */
-        outbuf[16] = 32;         /* bpp */
+        pkt->data[2]  = TGA_RGB;    /* uncompressed true-color image */
+        pkt->data[16] = 32;         /* bpp */
         break;
     default:
         av_log(avctx, AV_LOG_ERROR, "Pixel format '%s' not supported.\n",
                av_get_pix_fmt_name(avctx->pix_fmt));
         return AVERROR(EINVAL);
     }
-    bpp = outbuf[16] >> 3;
+    bpp = pkt->data[16] >> 3;
 
-    out = outbuf + 18;  /* skip past the header we just output */
+    out = pkt->data + 18;  /* skip past the header we just output */
 
     /* try RLE compression */
     if (avctx->coder_type != FF_CODER_TYPE_RAW)
@@ -133,7 +130,7 @@ static int targa_encode_frame(AVCodecContext *avctx,
 
     /* if that worked well, mark the picture as RLE compressed */
     if(datasize >= 0)
-        outbuf[2] |= 8;
+        pkt->data[2] |= 8;
 
     /* if RLE didn't make it smaller, go back to no compression */
     else datasize = targa_encode_normal(out, p, bpp, avctx->width, avctx->height);
@@ -145,7 +142,11 @@ static int targa_encode_frame(AVCodecContext *avctx,
      * aspect ratio and encoder ID fields available? */
     memcpy(out, "\0\0\0\0\0\0\0\0TRUEVISION-XFILE.", 26);
 
-    return out + 26 - outbuf;
+    pkt->size   = out + 26 - pkt->data;
+    pkt->flags |= AV_PKT_FLAG_KEY;
+    *got_packet = 1;
+
+    return 0;
 }
 
 static av_cold int targa_encode_init(AVCodecContext *avctx)
@@ -154,6 +155,7 @@ static av_cold int targa_encode_init(AVCodecContext *avctx)
 
     avcodec_get_frame_defaults(&s->picture);
     s->picture.key_frame= 1;
+    s->picture.pict_type = AV_PICTURE_TYPE_I;
     avctx->coded_frame= &s->picture;
 
     return 0;
@@ -165,7 +167,7 @@ AVCodec ff_targa_encoder = {
     .id = CODEC_ID_TARGA,
     .priv_data_size = sizeof(TargaContext),
     .init = targa_encode_init,
-    .encode = targa_encode_frame,
+    .encode2 = targa_encode_frame,
     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_BGRA, PIX_FMT_RGB555LE, PIX_FMT_GRAY8, PIX_FMT_NONE},
     .long_name= NULL_IF_CONFIG_SMALL("Truevision Targa image"),
 };
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index c559fd761384863585d01ee040d4ef39f7d5574a..4c6823ea761751c4ae1cf09fa80ccc900c35e665 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -993,9 +993,8 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
         if (!ret && *got_packet_ptr) {
             if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) {
                 avpkt->pts = frame->pts;
-                avpkt->duration = av_rescale_q(frame->nb_samples,
-                                               (AVRational){ 1, avctx->sample_rate },
-                                               avctx->time_base);
+                avpkt->duration = ff_samples_to_time_base(avctx,
+                                                          frame->nb_samples);
             }
             avpkt->dts = avpkt->pts;
         } else {
@@ -1053,9 +1052,8 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
                    once all encoders supporting CODEC_CAP_SMALL_LAST_FRAME use
                    encode2() */
                 if (fs_tmp) {
-                    avpkt->duration = av_rescale_q(avctx->frame_size,
-                                                   (AVRational){ 1, avctx->sample_rate },
-                                                   avctx->time_base);
+                    avpkt->duration = ff_samples_to_time_base(avctx,
+                                                              avctx->frame_size);
                 }
             }
             avpkt->size = ret;
@@ -1128,9 +1126,8 @@ int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx,
            this is needed because the avcodec_encode_audio() API does not have
            a way for the user to provide pts */
         if(avctx->sample_rate && avctx->time_base.num)
-            frame->pts = av_rescale_q(avctx->internal->sample_count,
-                                  (AVRational){ 1, avctx->sample_rate },
-                                  avctx->time_base);
+            frame->pts = ff_samples_to_time_base(avctx,
+                                                avctx->internal->sample_count);
         else
             frame->pts = AV_NOPTS_VALUE;
         avctx->internal->sample_count += frame->nb_samples;
diff --git a/libavcodec/v410enc.c b/libavcodec/v410enc.c
index bcd7c65bdc18f3efd1e1d739e1416a479f18f07c..f72dfd3c3ab8172f1e321efadae439e1ff3a5354 100644
--- a/libavcodec/v410enc.c
+++ b/libavcodec/v410enc.c
@@ -22,6 +22,7 @@
 
 #include "libavutil/intreadwrite.h"
 #include "avcodec.h"
+#include "internal.h"
 
 static av_cold int v410_encode_init(AVCodecContext *avctx)
 {
@@ -40,20 +41,19 @@ static av_cold int v410_encode_init(AVCodecContext *avctx)
     return 0;
 }
 
-static int v410_encode_frame(AVCodecContext *avctx, uint8_t *buf,
-                             int buf_size, void *data)
+static int v410_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+                             const AVFrame *pic, int *got_packet)
 {
-    AVFrame *pic = data;
-    uint8_t *dst = buf;
+    uint8_t *dst;
     uint16_t *y, *u, *v;
     uint32_t val;
-    int i, j;
-    int output_size = 0;
+    int i, j, ret;
 
-    if (buf_size < avctx->width * avctx->height * 4) {
-        av_log(avctx, AV_LOG_ERROR, "Out buffer is too small.\n");
-        return AVERROR(ENOMEM);
+    if ((ret = ff_alloc_packet(pkt, avctx->width * avctx->height * 4)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
+        return ret;
     }
+    dst = pkt->data;
 
     avctx->coded_frame->reference = 0;
     avctx->coded_frame->key_frame = 1;
@@ -70,14 +70,15 @@ static int v410_encode_frame(AVCodecContext *avctx, uint8_t *buf,
             val |= (uint32_t) v[j] << 22;
             AV_WL32(dst, val);
             dst += 4;
-            output_size += 4;
         }
         y += pic->linesize[0] >> 1;
         u += pic->linesize[1] >> 1;
         v += pic->linesize[2] >> 1;
     }
 
-    return output_size;
+    pkt->flags |= AV_PKT_FLAG_KEY;
+    *got_packet = 1;
+    return 0;
 }
 
 static av_cold int v410_encode_close(AVCodecContext *avctx)
@@ -92,7 +93,7 @@ AVCodec ff_v410_encoder = {
     .type         = AVMEDIA_TYPE_VIDEO,
     .id           = CODEC_ID_V410,
     .init         = v410_encode_init,
-    .encode       = v410_encode_frame,
+    .encode2      = v410_encode_frame,
     .close        = v410_encode_close,
     .pix_fmts     = (const enum PixelFormat[]){ PIX_FMT_YUV444P10, PIX_FMT_NONE },
     .long_name    = NULL_IF_CONFIG_SMALL("Uncompressed 4:4:4 10-bit"),
diff --git a/libavcodec/x86/h264_deblock.asm b/libavcodec/x86/h264_deblock.asm
index 08625e44a2d5054474e3649208b312a8b2382011..d2f8d0e3e972581cc2bbff809939555167def051 100644
--- a/libavcodec/x86/h264_deblock.asm
+++ b/libavcodec/x86/h264_deblock.asm
@@ -830,9 +830,13 @@ cglobal deblock_v_chroma_8_mmxext, 5,6
 ; void ff_deblock_h_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
 ;-----------------------------------------------------------------------------
 cglobal deblock_h_chroma_8_mmxext, 5,7
-%if ARCH_X86_64
+%if UNIX64
     %define buf0 [rsp-24]
     %define buf1 [rsp-16]
+%elif WIN64
+    sub   rsp, 16
+    %define buf0 [rsp]
+    %define buf1 [rsp+8]
 %else
     %define buf0 r0m
     %define buf1 r2m
@@ -849,6 +853,9 @@ cglobal deblock_h_chroma_8_mmxext, 5,7
     movq  m0, buf0
     movq  m3, buf1
     TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
+%if WIN64
+    add   rsp, 16
+%endif
     RET
 
 ALIGN 16
diff --git a/libavcodec/zmbvenc.c b/libavcodec/zmbvenc.c
index 3211e6ff4d0b2d1779d3713bfc74db6fa8778ce1..abb34a9bdbeab73d570c657ca450a92b759516aa 100644
--- a/libavcodec/zmbvenc.c
+++ b/libavcodec/zmbvenc.c
@@ -29,6 +29,7 @@
 
 #include "libavutil/intreadwrite.h"
 #include "avcodec.h"
+#include "internal.h"
 
 #include <zlib.h>
 
@@ -115,19 +116,18 @@ static int zmbv_me(ZmbvEncContext *c, uint8_t *src, int sstride, uint8_t *prev,
     return bv;
 }
 
-static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void *data)
+static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+                        const AVFrame *pict, int *got_packet)
 {
     ZmbvEncContext * const c = avctx->priv_data;
-    AVFrame *pict = data;
     AVFrame * const p = &c->pic;
-    uint8_t *src, *prev;
+    uint8_t *src, *prev, *buf;
     uint32_t *palptr;
-    int len = 0;
     int keyframe, chpal;
     int fl;
-    int work_size = 0;
+    int work_size = 0, pkt_size;
     int bw, bh;
-    int i, j;
+    int i, j, ret;
 
     keyframe = !c->curfrm;
     c->curfrm++;
@@ -138,17 +138,6 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void
     p->key_frame= keyframe;
     chpal = !keyframe && memcmp(p->data[1], c->pal2, 1024);
 
-    fl = (keyframe ? ZMBV_KEYFRAME : 0) | (chpal ? ZMBV_DELTAPAL : 0);
-    *buf++ = fl; len++;
-    if(keyframe){
-        deflateReset(&c->zstream);
-        *buf++ = 0; len++; // hi ver
-        *buf++ = 1; len++; // lo ver
-        *buf++ = 1; len++; // comp
-        *buf++ = 4; len++; // format - 8bpp
-        *buf++ = ZMBV_BLOCK; len++; // block width
-        *buf++ = ZMBV_BLOCK; len++; // block height
-    }
     palptr = (uint32_t*)p->data[1];
     src = p->data[0];
     prev = c->prev;
@@ -223,6 +212,9 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void
         src += p->linesize[0];
     }
 
+    if (keyframe)
+        deflateReset(&c->zstream);
+
     c->zstream.next_in = c->work_buf;
     c->zstream.avail_in = work_size;
     c->zstream.total_in = 0;
@@ -235,8 +227,29 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void
         return -1;
     }
 
+    pkt_size = c->zstream.total_out + 1 + 6*keyframe;
+    if ((ret = ff_alloc_packet(pkt, pkt_size)) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "Error getting packet of size %d.\n", pkt_size);
+        return ret;
+    }
+    buf = pkt->data;
+
+    fl = (keyframe ? ZMBV_KEYFRAME : 0) | (chpal ? ZMBV_DELTAPAL : 0);
+    *buf++ = fl;
+    if (keyframe) {
+        *buf++ = 0; // hi ver
+        *buf++ = 1; // lo ver
+        *buf++ = 1; // comp
+        *buf++ = 4; // format - 8bpp
+        *buf++ = ZMBV_BLOCK; // block width
+        *buf++ = ZMBV_BLOCK; // block height
+    }
     memcpy(buf, c->comp_buf, c->zstream.total_out);
-    return len + c->zstream.total_out;
+
+    pkt->flags |= AV_PKT_FLAG_KEY*keyframe;
+    *got_packet = 1;
+
+    return 0;
 }
 
 
@@ -329,7 +342,7 @@ AVCodec ff_zmbv_encoder = {
     .id             = CODEC_ID_ZMBV,
     .priv_data_size = sizeof(ZmbvEncContext),
     .init           = encode_init,
-    .encode         = encode_frame,
+    .encode2        = encode_frame,
     .close          = encode_end,
     .pix_fmts = (const enum PixelFormat[]){PIX_FMT_PAL8, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("Zip Motion Blocks Video"),
diff --git a/libavformat/hls.c b/libavformat/hls.c
index c27b7d30b05b779b2cbf5dd836a6603603839785..4b5cd074227512ec141835a72cafc433284f3fc2 100644
--- a/libavformat/hls.c
+++ b/libavformat/hls.c
@@ -547,7 +547,7 @@ static int hls_read_header(AVFormatContext *s)
 
     c->first_packet = 1;
     c->first_timestamp = AV_NOPTS_VALUE;
-    c->seek_timestamp = AV_NOPTS_VALUE;
+    c->seek_timestamp  = AV_NOPTS_VALUE;
 
     return 0;
 fail:
@@ -609,16 +609,13 @@ start:
         if (var->needed && !var->pkt.data) {
             while (1) {
                 int64_t ts_diff;
+                AVStream *st;
                 ret = av_read_frame(var->ctx, &var->pkt);
                 if (ret < 0) {
-                    if (!url_feof(&var->pb)) {
+                    if (!url_feof(&var->pb))
                         return ret;
-                    } else {
-                        if ((var->cur_seq_no - var->start_seq_no) == (var->n_segments)) {
-                            return AVERROR_EOF;
-                        }
-                    }
                     reset_packet(&var->pkt);
+                    break;
                 } else {
                     if (c->first_timestamp == AV_NOPTS_VALUE)
                         c->first_timestamp = var->pkt.dts;
@@ -632,18 +629,14 @@ start:
                     break;
                 }
 
-                ts_diff = var->pkt.dts - c->seek_timestamp;
-                if (ts_diff >= 0) {
-                    if (c->seek_flags & AVSEEK_FLAG_ANY) {
-                        c->seek_timestamp = AV_NOPTS_VALUE;
-                        break;
-                    }
-
-                    /* Seek to keyframe */
-                    if (var->pkt.flags & AV_PKT_FLAG_KEY) {
-                        c->seek_timestamp = AV_NOPTS_VALUE;
-                        break;
-                    }
+                st = var->ctx->streams[var->pkt.stream_index];
+                ts_diff = av_rescale_rnd(var->pkt.dts, AV_TIME_BASE,
+                                         st->time_base.den, AV_ROUND_DOWN) -
+                          c->seek_timestamp;
+                if (ts_diff >= 0 && (c->seek_flags  & AVSEEK_FLAG_ANY ||
+                                     var->pkt.flags & AV_PKT_FLAG_KEY)) {
+                    c->seek_timestamp = AV_NOPTS_VALUE;
+                    break;
                 }
             }
         }
@@ -685,8 +678,12 @@ static int hls_read_seek(AVFormatContext *s, int stream_index,
     if ((flags & AVSEEK_FLAG_BYTE) || !c->variants[0]->finished)
         return AVERROR(ENOSYS);
 
-    c->seek_timestamp = timestamp;
-    c->seek_flags = flags;
+    c->seek_flags     = flags;
+    c->seek_timestamp = stream_index < 0 ? timestamp :
+                        av_rescale_rnd(timestamp, AV_TIME_BASE,
+                                       s->streams[stream_index]->time_base.den,
+                                       flags & AVSEEK_FLAG_BACKWARD ?
+                                       AV_ROUND_DOWN : AV_ROUND_UP);
     timestamp = av_rescale_rnd(timestamp, 1, stream_index >= 0 ?
                                s->streams[stream_index]->time_base.den :
                                AV_TIME_BASE, flags & AVSEEK_FLAG_BACKWARD ?
@@ -712,6 +709,10 @@ static int hls_read_seek(AVFormatContext *s, int stream_index,
         av_free_packet(&var->pkt);
         reset_packet(&var->pkt);
         var->pb.eof_reached = 0;
+        /* Clear any buffered data */
+        var->pb.buf_end = var->pb.buf_ptr = var->pb.buffer;
+        /* Reset the pos, to let the mpegts demuxer know we've seeked. */
+        var->pb.pos = 0;
 
         /* Locate the segment that contains the target timestamp */
         for (j = 0; j < var->n_segments; j++) {
@@ -723,7 +724,7 @@ static int hls_read_seek(AVFormatContext *s, int stream_index,
             }
             pos += var->segments[j]->duration;
         }
-        if (ret != 0)
+        if (ret)
             c->seek_timestamp = AV_NOPTS_VALUE;
     }
     return ret;
diff --git a/libavformat/utils.c b/libavformat/utils.c
index 39cf766bdf7f965cb7bbd55b2bab89342507246e..155ba677578a64574960a60d5fb81fee45b1e0cc 100644
--- a/libavformat/utils.c
+++ b/libavformat/utils.c
@@ -942,11 +942,10 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
         compute_frame_duration(&num, &den, st, pc, pkt);
         if (den && num) {
             pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
-
-            if(pkt->duration != 0 && s->packet_buffer)
-                update_initial_durations(s, st, pkt);
         }
     }
+    if(pkt->duration != 0 && s->packet_buffer)
+        update_initial_durations(s, st, pkt);
 
     /* correct timestamps with byte offset if demuxers only have timestamps
        on packet boundaries */
@@ -1099,6 +1098,20 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
                 if (pkt->size) {
                 got_packet:
                     pkt->duration = 0;
+                    if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+                        if (st->codec->sample_rate > 0) {
+                            pkt->duration = av_rescale_q_rnd(st->parser->duration,
+                                                             (AVRational){ 1, st->codec->sample_rate },
+                                                             st->time_base,
+                                                             AV_ROUND_DOWN);
+                        }
+                    } else if (st->codec->time_base.num != 0 &&
+                               st->codec->time_base.den != 0) {
+                        pkt->duration = av_rescale_q_rnd(st->parser->duration,
+                                                         st->codec->time_base,
+                                                         st->time_base,
+                                                         AV_ROUND_DOWN);
+                    }
                     pkt->stream_index = st->index;
                     pkt->pts = st->parser->pts;
                     pkt->dts = st->parser->dts;
diff --git a/libavutil/avutil.h b/libavutil/avutil.h
index 71b9189ae79446fda52b5de4978dcda6d073f8d1..95346695887a299c4be9b14ca7d9751e94a261df 100644
--- a/libavutil/avutil.h
+++ b/libavutil/avutil.h
@@ -154,7 +154,7 @@
  */
 
 #define LIBAVUTIL_VERSION_MAJOR 51
-#define LIBAVUTIL_VERSION_MINOR 39
+#define LIBAVUTIL_VERSION_MINOR 40
 #define LIBAVUTIL_VERSION_MICRO 100
 
 #define LIBAVUTIL_VERSION_INT   AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
diff --git a/libavutil/mathematics.c b/libavutil/mathematics.c
index 180f72e3f0dec20e55ada3bb563d9a20af92e65c..2adefb74e0cf86ddcbee2e87c2ff17413751878d 100644
--- a/libavutil/mathematics.c
+++ b/libavutil/mathematics.c
@@ -131,10 +131,17 @@ int64_t av_rescale(int64_t a, int64_t b, int64_t c){
     return av_rescale_rnd(a, b, c, AV_ROUND_NEAR_INF);
 }
 
-int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq){
+int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
+                         enum AVRounding rnd)
+{
     int64_t b= bq.num * (int64_t)cq.den;
     int64_t c= cq.num * (int64_t)bq.den;
-    return av_rescale_rnd(a, b, c, AV_ROUND_NEAR_INF);
+    return av_rescale_rnd(a, b, c, rnd);
+}
+
+int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
+{
+    return av_rescale_q_rnd(a, bq, cq, AV_ROUND_NEAR_INF);
 }
 
 int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b){
diff --git a/libavutil/mathematics.h b/libavutil/mathematics.h
index ad39e263cef859a5472a910851a579de1f4301c2..93314bae16a08a8d7ea492481be9bd4869f7a99d 100644
--- a/libavutil/mathematics.h
+++ b/libavutil/mathematics.h
@@ -95,6 +95,12 @@ int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_cons
  */
 int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;
 
+/**
+ * Rescale a 64-bit integer by 2 rational numbers with specified rounding.
+ */
+int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
+                         enum AVRounding) av_const;
+
 /**
  * Compare 2 timestamps each in its own timebases.
  * The result of the function is undefined if one of the timestamps
diff --git a/libswscale/swscale_internal.h b/libswscale/swscale_internal.h
index 18ec4d985aa25b857f6627c6ef41ecb1e90d26a7..930435608b8f2045768f867dabe48de5bf76cc9a 100644
--- a/libswscale/swscale_internal.h
+++ b/libswscale/swscale_internal.h
@@ -358,10 +358,11 @@ typedef struct SwsContext {
 #define U_TEMP                "11*8+4*4*256*2+24"
 #define V_TEMP                "11*8+4*4*256*2+32"
 #define Y_TEMP                "11*8+4*4*256*2+40"
-#define UV_OFF_PX             "11*8+4*4*256*2+48"
-#define UV_OFF_BYTE           "11*8+4*4*256*2+56"
-#define DITHER16              "11*8+4*4*256*2+64"
-#define DITHER32              "11*8+4*4*256*2+80"
+#define ALP_MMX_FILTER_OFFSET "11*8+4*4*256*2+48"
+#define UV_OFF_PX             "11*8+4*4*256*3+48"
+#define UV_OFF_BYTE           "11*8+4*4*256*3+56"
+#define DITHER16              "11*8+4*4*256*3+64"
+#define DITHER32              "11*8+4*4*256*3+80"
 
     DECLARE_ALIGNED(8, uint64_t, redDither);
     DECLARE_ALIGNED(8, uint64_t, greenDither);
@@ -383,6 +384,7 @@ typedef struct SwsContext {
     DECLARE_ALIGNED(8, uint64_t, u_temp);
     DECLARE_ALIGNED(8, uint64_t, v_temp);
     DECLARE_ALIGNED(8, uint64_t, y_temp);
+    int32_t alpMmxFilter[4 * MAX_FILTER_SIZE];
     // alignment of these values is not necessary, but merely here
     // to maintain the same offset across x8632 and x86-64. Once we
     // use proper offset macros in the asm, they can be removed.
@@ -421,7 +423,6 @@ typedef struct SwsContext {
 #if HAVE_VIS
     DECLARE_ALIGNED(8, uint64_t, sparc_coeffs)[10];
 #endif
-    int32_t alpMmxFilter[4 * MAX_FILTER_SIZE];
     int use_mmx_vfilter;
 
     /* function pointers for swScale() */
diff --git a/libswscale/x86/swscale_template.c b/libswscale/x86/swscale_template.c
index b1791840341e968d1213b9750c7046f3669fc99c..4eee894c7e94262b59bcac291a7aad852f28e646 100644
--- a/libswscale/x86/swscale_template.c
+++ b/libswscale/x86/swscale_template.c
@@ -342,7 +342,7 @@ static void RENAME(yuv2rgb32_X_ar)(SwsContext *c, const int16_t *lumFilter,
         "movq                      %%mm2, "U_TEMP"(%0)  \n\t"
         "movq                      %%mm4, "V_TEMP"(%0)  \n\t"
         "movq                      %%mm5, "Y_TEMP"(%0)  \n\t"
-        YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
+        YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
         "movq               "Y_TEMP"(%0), %%mm5         \n\t"
         "psraw                        $3, %%mm1         \n\t"
         "psraw                        $3, %%mm7         \n\t"
@@ -372,7 +372,7 @@ static void RENAME(yuv2rgb32_X)(SwsContext *c, const int16_t *lumFilter,
     if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
         YSCALEYUV2PACKEDX
         YSCALEYUV2RGBX
-        YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
+        YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
         "psraw                        $3, %%mm1         \n\t"
         "psraw                        $3, %%mm7         \n\t"
         "packuswb                  %%mm7, %%mm1         \n\t"
diff --git a/tests/ref/acodec/adpcm_yam b/tests/ref/acodec/adpcm_yam
index 0fd702954e4f5d326184b35b183698ae0560fc8a..f7c9f757a6d0f3dac320178151061347904c5cca 100644
--- a/tests/ref/acodec/adpcm_yam
+++ b/tests/ref/acodec/adpcm_yam
@@ -1,4 +1,4 @@
-006f8dc92eb4f7bab82eded314ca1124 *./tests/data/acodec/adpcm_yam.wav
-266298 ./tests/data/acodec/adpcm_yam.wav
-c36a9d5a1e0ad57fbe9665a31373b7c1 *./tests/data/adpcm_yam.acodec.out.wav
-stddev: 1247.60 PSNR: 34.41 MAXDIFF:39895 bytes:  1064960/  1058400
+e9c14f701d25947317db9367b9dc772d *./tests/data/acodec/adpcm_yam.wav
+265274 ./tests/data/acodec/adpcm_yam.wav
+1488b5974fa040a65f0d407fc0224c6a *./tests/data/adpcm_yam.acodec.out.wav
+stddev: 1247.60 PSNR: 34.41 MAXDIFF:39895 bytes:  1060864/  1058400