diff --git a/avconv.c b/avconv.c
index 103fa7505ffb89a180b958181a0296ee4a5ac0f4..6f036dac4187e2b0fb5ac6cb2c9b06dec988ddf1 100644
--- a/avconv.c
+++ b/avconv.c
@@ -127,10 +127,10 @@ static int do_hex_dump = 0;
 static int do_pkt_dump = 0;
 static int do_pass = 0;
 static const char *pass_logfilename_prefix;
-static int video_sync_method= -1;
-static int audio_sync_method= 0;
-static float audio_drift_threshold= 0.1;
-static int copy_ts= 0;
+static int video_sync_method = -1;
+static int audio_sync_method = 0;
+static float audio_drift_threshold = 0.1;
+static int copy_ts = 0;
 static int copy_tb = 1;
 static int opt_shortest = 0;
 static char *vstats_filename;
@@ -214,9 +214,9 @@ typedef struct OutputStream {
     int frame_number;
     /* input pts and corresponding output pts
        for A/V sync */
-    //double sync_ipts;        /* dts from the AVPacket of the demuxer in second units */
+    // double sync_ipts;        /* dts from the AVPacket of the demuxer in second units */
     struct InputStream *sync_ist; /* input stream to sync against */
-    int64_t sync_opts;       /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
+    int64_t sync_opts;       /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
     AVBitStreamFilterContext *bitstream_filters;
     AVCodec *enc;
     int64_t max_frames;
@@ -281,9 +281,9 @@ typedef struct OutputFile {
     uint64_t limit_filesize;
 } OutputFile;
 
-static InputStream *input_streams = NULL;
+static InputStream *input_streams   = NULL;
 static int         nb_input_streams = 0;
-static InputFile   *input_files   = NULL;
+static InputFile   *input_files     = NULL;
 static int         nb_input_files   = 0;
 
 static OutputStream *output_streams = NULL;
@@ -575,9 +575,9 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
 
     ost->graph = avfilter_graph_alloc();
 
-    if (ist->st->sample_aspect_ratio.num){
+    if (ist->st->sample_aspect_ratio.num) {
         sample_aspect_ratio = ist->st->sample_aspect_ratio;
-    }else
+    } else
         sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
 
     snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
@@ -601,7 +601,7 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
         return ret;
     last_filter = ost->input_video_filter;
 
-    if (codec->width  != icodec->width || codec->height != icodec->height) {
+    if (codec->width != icodec->width || codec->height != icodec->height) {
         snprintf(args, 255, "%d:%d:flags=0x%X",
                  codec->width,
                  codec->height,
@@ -646,7 +646,7 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
     codec->height = ost->output_video_filter->inputs[0]->h;
     codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
         ost->frame_aspect_ratio ? // overridden by the -aspect cli option
-        av_d2q(ost->frame_aspect_ratio*codec->height/codec->width, 255) :
+        av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
         ost->output_video_filter->inputs[0]->sample_aspect_ratio;
 
     return 0;
@@ -696,7 +696,7 @@ static void term_init(void)
     }
 #endif
 
-    signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).  */
+    signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
 #ifdef SIGXCPU
     signal(SIGXCPU, sigterm_handler);
@@ -747,14 +747,14 @@ void exit_program(int ret)
     int i;
 
     /* close files */
-    for(i=0;i<nb_output_files;i++) {
+    for (i = 0; i < nb_output_files; i++) {
         AVFormatContext *s = output_files[i].ctx;
         if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
             avio_close(s->pb);
         avformat_free_context(s);
         av_dict_free(&output_files[i].opts);
     }
-    for(i=0;i<nb_input_files;i++) {
+    for (i = 0; i < nb_input_files; i++) {
         avformat_close_input(&input_files[i].ctx);
     }
     for (i = 0; i < nb_input_streams; i++) {
@@ -776,7 +776,7 @@ void exit_program(int ret)
     uninit_opts();
     av_free(audio_buf);
     av_free(audio_out);
-    allocated_audio_buf_size= allocated_audio_out_size= 0;
+    allocated_audio_buf_size = allocated_audio_out_size = 0;
 
 #if CONFIG_AVFILTER
     avfilter_uninit();
@@ -820,10 +820,10 @@ static void assert_codec_experimental(AVCodecContext *c, int encoder)
 
 static void choose_sample_fmt(AVStream *st, AVCodec *codec)
 {
-    if(codec && codec->sample_fmts){
-        const enum AVSampleFormat *p= codec->sample_fmts;
-        for(; *p!=-1; p++){
-            if(*p == st->codec->sample_fmt)
+    if (codec && codec->sample_fmts) {
+        const enum AVSampleFormat *p = codec->sample_fmts;
+        for (; *p != -1; p++) {
+            if (*p == st->codec->sample_fmt)
                 break;
         }
         if (*p == -1) {
@@ -842,46 +842,47 @@ static void choose_sample_fmt(AVStream *st, AVCodec *codec)
 
 static void choose_sample_rate(AVStream *st, AVCodec *codec)
 {
-    if(codec && codec->supported_samplerates){
-        const int *p= codec->supported_samplerates;
-        int best=0;
-        int best_dist=INT_MAX;
-        for(; *p; p++){
-            int dist= abs(st->codec->sample_rate - *p);
-            if(dist < best_dist){
-                best_dist= dist;
-                best= *p;
+    if (codec && codec->supported_samplerates) {
+        const int *p  = codec->supported_samplerates;
+        int best      = 0;
+        int best_dist = INT_MAX;
+        for (; *p; p++) {
+            int dist = abs(st->codec->sample_rate - *p);
+            if (dist < best_dist) {
+                best_dist = dist;
+                best      = *p;
             }
         }
-        if(best_dist){
+        if (best_dist) {
             av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
         }
-        st->codec->sample_rate= best;
+        st->codec->sample_rate = best;
     }
 }
 
 static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
 {
-    if(codec && codec->pix_fmts){
-        const enum PixelFormat *p= codec->pix_fmts;
-        if(st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL){
-            if(st->codec->codec_id==CODEC_ID_MJPEG){
-                p= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE};
-            }else if(st->codec->codec_id==CODEC_ID_LJPEG){
-                p= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE};
+    if (codec && codec->pix_fmts) {
+        const enum PixelFormat *p = codec->pix_fmts;
+        if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
+            if (st->codec->codec_id == CODEC_ID_MJPEG) {
+                p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
+            } else if (st->codec->codec_id == CODEC_ID_LJPEG) {
+                p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
+                                                 PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
             }
         }
         for (; *p != PIX_FMT_NONE; p++) {
-            if(*p == st->codec->pix_fmt)
+            if (*p == st->codec->pix_fmt)
                 break;
         }
         if (*p == PIX_FMT_NONE) {
-            if(st->codec->pix_fmt != PIX_FMT_NONE)
+            if (st->codec->pix_fmt != PIX_FMT_NONE)
                 av_log(NULL, AV_LOG_WARNING,
-                        "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
-                        av_pix_fmt_descriptors[st->codec->pix_fmt].name,
-                        codec->name,
-                        av_pix_fmt_descriptors[codec->pix_fmts[0]].name);
+                       "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
+                       av_pix_fmt_descriptors[st->codec->pix_fmt].name,
+                       codec->name,
+                       av_pix_fmt_descriptors[codec->pix_fmts[0]].name);
             st->codec->pix_fmt = codec->pix_fmts[0];
         }
     }
@@ -892,22 +893,23 @@ get_sync_ipts(const OutputStream *ost)
 {
     const InputStream *ist = ost->sync_ist;
     OutputFile *of = &output_files[ost->file_index];
-    return (double)(ist->pts - of->start_time)/AV_TIME_BASE;
+    return (double)(ist->pts - of->start_time) / AV_TIME_BASE;
 }
 
-static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){
+static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc)
+{
     int ret;
 
-    while(bsfc){
-        AVPacket new_pkt= *pkt;
-        int a= av_bitstream_filter_filter(bsfc, avctx, NULL,
-                                          &new_pkt.data, &new_pkt.size,
-                                          pkt->data, pkt->size,
-                                          pkt->flags & AV_PKT_FLAG_KEY);
-        if(a>0){
+    while (bsfc) {
+        AVPacket new_pkt = *pkt;
+        int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
+                                           &new_pkt.data, &new_pkt.size,
+                                           pkt->data, pkt->size,
+                                           pkt->flags & AV_PKT_FLAG_KEY);
+        if (a > 0) {
             av_free_packet(pkt);
-            new_pkt.destruct= av_destruct_packet;
-        } else if(a<0){
+            new_pkt.destruct = av_destruct_packet;
+        } else if (a < 0) {
             av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
                    bsfc->filter->name, pkt->stream_index,
                    avctx->codec ? avctx->codec->name : "copy");
@@ -915,13 +917,13 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx
             if (exit_on_error)
                 exit_program(1);
         }
-        *pkt= new_pkt;
+        *pkt = new_pkt;
 
-        bsfc= bsfc->next;
+        bsfc = bsfc->next;
     }
 
-    ret= av_interleaved_write_frame(s, pkt);
-    if(ret < 0){
+    ret = av_interleaved_write_frame(s, pkt);
+    if (ret < 0) {
         print_error("av_interleaved_write_frame()", ret);
         exit_program(1);
     }
@@ -942,8 +944,8 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
     int64_t audio_out_size, audio_buf_size;
 
     int size_out, frame_bytes, ret, resample_changed;
-    AVCodecContext *enc= ost->st->codec;
-    AVCodecContext *dec= ist->st->codec;
+    AVCodecContext *enc = ost->st->codec;
+    AVCodecContext *dec = ist->st->codec;
     int osize = av_get_bytes_per_sample(enc->sample_fmt);
     int isize = av_get_bytes_per_sample(dec->sample_fmt);
     const int coded_bps = av_get_bits_per_sample(enc->codec->id);
@@ -952,25 +954,25 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
     int64_t allocated_for_size = size;
 
 need_realloc:
-    audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels);
-    audio_buf_size= (audio_buf_size*enc->sample_rate + dec->sample_rate) / dec->sample_rate;
-    audio_buf_size= audio_buf_size*2 + 10000; //safety factors for the deprecated resampling API
-    audio_buf_size= FFMAX(audio_buf_size, enc->frame_size);
-    audio_buf_size*= osize*enc->channels;
-
-    audio_out_size= FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
-    if(coded_bps > 8*osize)
-        audio_out_size= audio_out_size * coded_bps / (8*osize);
+    audio_buf_size  = (allocated_for_size + isize * dec->channels - 1) / (isize * dec->channels);
+    audio_buf_size  = (audio_buf_size * enc->sample_rate + dec->sample_rate) / dec->sample_rate;
+    audio_buf_size  = audio_buf_size * 2 + 10000; // safety factors for the deprecated resampling API
+    audio_buf_size  = FFMAX(audio_buf_size, enc->frame_size);
+    audio_buf_size *= osize * enc->channels;
+
+    audio_out_size = FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
+    if (coded_bps > 8 * osize)
+        audio_out_size = audio_out_size * coded_bps / (8*osize);
     audio_out_size += FF_MIN_BUFFER_SIZE;
 
-    if(audio_out_size > INT_MAX || audio_buf_size > INT_MAX){
+    if (audio_out_size > INT_MAX || audio_buf_size > INT_MAX) {
         av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n");
         exit_program(1);
     }
 
     av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
     av_fast_malloc(&audio_out, &allocated_audio_out_size, audio_out_size);
-    if (!audio_buf || !audio_out){
+    if (!audio_buf || !audio_out) {
         av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
         exit_program(1);
     }
@@ -1018,8 +1020,8 @@ need_realloc:
     }
 
 #define MAKE_SFMT_PAIR(a,b) ((a)+AV_SAMPLE_FMT_NB*(b))
-    if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt &&
-        MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt)!=ost->reformat_pair) {
+    if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt &&
+        MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt) != ost->reformat_pair) {
         if (ost->reformat_ctx)
             av_audio_convert_free(ost->reformat_ctx);
         ost->reformat_ctx = av_audio_convert_alloc(enc->sample_fmt, 1,
@@ -1030,45 +1032,45 @@ need_realloc:
                    av_get_sample_fmt_name(enc->sample_fmt));
             exit_program(1);
         }
-        ost->reformat_pair=MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt);
+        ost->reformat_pair = MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt);
     }
 
-    if(audio_sync_method){
-        double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts
-                - av_fifo_size(ost->fifo)/(enc->channels * osize);
+    if (audio_sync_method) {
+        double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts -
+                       av_fifo_size(ost->fifo) / (enc->channels * osize);
         int idelta = delta * dec->sample_rate / enc->sample_rate;
         int byte_delta = idelta * isize * dec->channels;
 
-        //FIXME resample delay
-        if(fabs(delta) > 50){
-            if(ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate){
-                if(byte_delta < 0){
-                    byte_delta= FFMAX(byte_delta, -size);
+        // FIXME resample delay
+        if (fabs(delta) > 50) {
+            if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) {
+                if (byte_delta < 0) {
+                    byte_delta = FFMAX(byte_delta, -size);
                     size += byte_delta;
                     buf  -= byte_delta;
                     av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n",
                            -byte_delta / (isize * dec->channels));
-                    if(!size)
+                    if (!size)
                         return;
-                    ist->is_start=0;
-                }else{
-                    static uint8_t *input_tmp= NULL;
-                    input_tmp= av_realloc(input_tmp, byte_delta + size);
+                    ist->is_start = 0;
+                } else {
+                    static uint8_t *input_tmp = NULL;
+                    input_tmp = av_realloc(input_tmp, byte_delta + size);
 
-                    if(byte_delta > allocated_for_size - size){
-                        allocated_for_size= byte_delta + (int64_t)size;
+                    if (byte_delta > allocated_for_size - size) {
+                        allocated_for_size = byte_delta + (int64_t)size;
                         goto need_realloc;
                     }
-                    ist->is_start=0;
+                    ist->is_start = 0;
 
                     generate_silence(input_tmp, dec->sample_fmt, byte_delta);
                     memcpy(input_tmp + byte_delta, buf, size);
-                    buf= input_tmp;
+                    buf = input_tmp;
                     size += byte_delta;
                     av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta);
                 }
-            }else if(audio_sync_method>1){
-                int comp= av_clip(delta, -audio_sync_method, audio_sync_method);
+            } else if (audio_sync_method > 1) {
+                int comp = av_clip(delta, -audio_sync_method, audio_sync_method);
                 av_assert0(ost->audio_resample);
                 av_log(NULL, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n",
                        delta, comp, enc->sample_rate);
@@ -1076,9 +1078,9 @@ need_realloc:
                 av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
             }
         }
-    }else
-        ost->sync_opts= lrintf(get_sync_ipts(ost) * enc->sample_rate)
-                        - av_fifo_size(ost->fifo)/(enc->channels * osize); //FIXME wrong
+    } else
+        ost->sync_opts = lrintf(get_sync_ipts(ost) * enc->sample_rate) -
+                                av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong
 
     if (ost->audio_resample) {
         buftmp = audio_buf;
@@ -1091,20 +1093,20 @@ need_realloc:
         size_out = size;
     }
 
-    if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt) {
-        const void *ibuf[6]= {buftmp};
-        void *obuf[6]= {audio_buf};
-        int istride[6]= {isize};
-        int ostride[6]= {osize};
-        int len= size_out/istride[0];
-        if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
+    if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt) {
+        const void *ibuf[6] = { buftmp };
+        void *obuf[6]  = { audio_buf };
+        int istride[6] = { isize };
+        int ostride[6] = { osize };
+        int len = size_out / istride[0];
+        if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len) < 0) {
             printf("av_audio_convert() failed\n");
             if (exit_on_error)
                 exit_program(1);
             return;
         }
         buftmp = audio_buf;
-        size_out = len*osize;
+        size_out = len * osize;
     }
 
     /* now encode as many frames as possible */
@@ -1124,7 +1126,7 @@ need_realloc:
 
             av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
 
-            //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
+            // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
 
             ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
                                        (short *)audio_buf);
@@ -1133,11 +1135,11 @@ need_realloc:
                 exit_program(1);
             }
             audio_size += ret;
-            pkt.stream_index= ost->index;
-            pkt.data= audio_out;
-            pkt.size= ret;
-            if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
-                pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+            pkt.stream_index = ost->index;
+            pkt.data = audio_out;
+            pkt.size = ret;
+            if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
+                pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
             pkt.flags |= AV_PKT_FLAG_KEY;
             write_frame(s, &pkt, enc, ost->bitstream_filters);
 
@@ -1153,14 +1155,14 @@ need_realloc:
         /* determine the size of the coded buffer */
         size_out /= osize;
         if (coded_bps)
-            size_out = size_out*coded_bps/8;
+            size_out = size_out * coded_bps / 8;
 
-        if(size_out > audio_out_size){
+        if (size_out > audio_out_size) {
             av_log(NULL, AV_LOG_FATAL, "Internal error, buffer size too small\n");
             exit_program(1);
         }
 
-        //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
+        // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
         ret = avcodec_encode_audio(enc, audio_out, size_out,
                                    (short *)buftmp);
         if (ret < 0) {
@@ -1168,11 +1170,11 @@ need_realloc:
             exit_program(1);
         }
         audio_size += ret;
-        pkt.stream_index= ost->index;
-        pkt.data= audio_out;
-        pkt.size= ret;
-        if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
-            pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+        pkt.stream_index = ost->index;
+        pkt.data = audio_out;
+        pkt.size = ret;
+        if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
+            pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
         pkt.flags |= AV_PKT_FLAG_KEY;
         write_frame(s, &pkt, enc, ost->bitstream_filters);
     }
@@ -1193,14 +1195,14 @@ static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void *
 
         /* create temporary picture */
         size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
-        buf = av_malloc(size);
+        buf  = av_malloc(size);
         if (!buf)
             return;
 
         picture2 = &picture_tmp;
         avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
 
-        if(avpicture_deinterlace(picture2, picture,
+        if (avpicture_deinterlace(picture2, picture,
                                  dec->pix_fmt, dec->width, dec->height) < 0) {
             /* if error, do not deinterlace */
             av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
@@ -1250,11 +1252,11 @@ static void do_subtitle_out(AVFormatContext *s,
     else
         nb = 1;
 
-    for(i = 0; i < nb; i++) {
+    for (i = 0; i < nb; i++) {
         sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
         // start_display_time is required to be 0
-        sub->pts              += av_rescale_q(sub->start_display_time, (AVRational){1, 1000}, AV_TIME_BASE_Q);
-        sub->end_display_time -= sub->start_display_time;
+        sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
+        sub->end_display_time  -= sub->start_display_time;
         sub->start_display_time = 0;
         subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
                                                     subtitle_out_max_size, sub);
@@ -1267,7 +1269,7 @@ static void do_subtitle_out(AVFormatContext *s,
         pkt.stream_index = ost->index;
         pkt.data = subtitle_out;
         pkt.size = subtitle_out_size;
-        pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
+        pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
         if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
             /* XXX: the pts correction is handled here. Maybe handling
                it in the codec would be better */
@@ -1280,8 +1282,8 @@ static void do_subtitle_out(AVFormatContext *s,
     }
 }
 
-static int bit_buffer_size= 1024*256;
-static uint8_t *bit_buffer= NULL;
+static int bit_buffer_size = 1024 * 256;
+static uint8_t *bit_buffer = NULL;
 
 static void do_video_resample(OutputStream *ost,
                               InputStream *ist,
@@ -1382,26 +1384,26 @@ static void do_video_out(AVFormatContext *s,
 
     if (format_video_sync) {
         double vdelta = sync_ipts - ost->sync_opts;
-        //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
+        // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
         if (vdelta < -1.1)
             nb_frames = 0;
         else if (format_video_sync == 2) {
-            if(vdelta<=-0.6){
-                nb_frames=0;
-            }else if(vdelta>0.6)
-                ost->sync_opts= lrintf(sync_ipts);
-        }else if (vdelta > 1.1)
+            if (vdelta <= -0.6) {
+                nb_frames = 0;
+            } else if (vdelta > 0.6)
+                ost->sync_opts = lrintf(sync_ipts);
+        } else if (vdelta > 1.1)
             nb_frames = lrintf(vdelta);
 //fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, get_sync_ipts(ost), nb_frames);
-        if (nb_frames == 0){
+        if (nb_frames == 0) {
             ++nb_frames_drop;
             av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
-        }else if (nb_frames > 1) {
+        } else if (nb_frames > 1) {
             nb_frames_dup += nb_frames - 1;
-            av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames-1);
+            av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
         }
-    }else
-        ost->sync_opts= lrintf(sync_ipts);
+    } else
+        ost->sync_opts = lrintf(sync_ipts);
 
     nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
     if (nb_frames <= 0)
@@ -1410,10 +1412,10 @@ static void do_video_out(AVFormatContext *s,
     do_video_resample(ost, ist, in_picture, &final_picture);
 
     /* duplicates frame if needed */
-    for(i=0;i<nb_frames;i++) {
+    for (i = 0; i < nb_frames; i++) {
         AVPacket pkt;
         av_init_packet(&pkt);
-        pkt.stream_index= ost->index;
+        pkt.stream_index = ost->index;
 
         if (s->oformat->flags & AVFMT_RAWPICTURE &&
             enc->codec->id == CODEC_ID_RAWVIDEO) {
@@ -1422,16 +1424,16 @@ static void do_video_out(AVFormatContext *s,
                method. */
             enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
             enc->coded_frame->top_field_first  = in_picture->top_field_first;
-            pkt.data= (uint8_t *)final_picture;
-            pkt.size=  sizeof(AVPicture);
-            pkt.pts= av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
+            pkt.data   = (uint8_t *)final_picture;
+            pkt.size   =  sizeof(AVPicture);
+            pkt.pts    = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
             pkt.flags |= AV_PKT_FLAG_KEY;
 
             write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters);
         } else {
             AVFrame big_picture;
 
-            big_picture= *final_picture;
+            big_picture = *final_picture;
             /* better than nothing: use input picture interlaced
                settings */
             big_picture.interlaced_frame = in_picture->interlaced_frame;
@@ -1448,9 +1450,9 @@ static void do_video_out(AVFormatContext *s,
             if (!enc->me_threshold)
                 big_picture.pict_type = 0;
 //            big_picture.pts = AV_NOPTS_VALUE;
-            big_picture.pts= ost->sync_opts;
+            big_picture.pts = ost->sync_opts;
 //            big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);
-//av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
+// av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
             if (ost->forced_kf_index < ost->forced_kf_count &&
                 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
                 big_picture.pict_type = AV_PICTURE_TYPE_I;
@@ -1464,22 +1466,22 @@ static void do_video_out(AVFormatContext *s,
                 exit_program(1);
             }
 
-            if(ret>0){
-                pkt.data= bit_buffer;
-                pkt.size= ret;
-                if(enc->coded_frame->pts != AV_NOPTS_VALUE)
-                    pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+            if (ret > 0) {
+                pkt.data = bit_buffer;
+                pkt.size = ret;
+                if (enc->coded_frame->pts != AV_NOPTS_VALUE)
+                    pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
 /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n",
    pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,
    pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/
 
-                if(enc->coded_frame->key_frame)
+                if (enc->coded_frame->key_frame)
                     pkt.flags |= AV_PKT_FLAG_KEY;
                 write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters);
                 *frame_size = ret;
                 video_size += ret;
-                //fprintf(stderr,"\nFrame: %3d size: %5d type: %d",
-                //        enc->frame_number-1, ret, enc->pict_type);
+                // fprintf(stderr,"\nFrame: %3d size: %5d type: %d",
+                //         enc->frame_number-1, ret, enc->pict_type);
                 /* if two pass, output log */
                 if (ost->logfile && enc->stats_out) {
                     fprintf(ost->logfile, "%s", enc->stats_out);
@@ -1491,8 +1493,9 @@ static void do_video_out(AVFormatContext *s,
     }
 }
 
-static double psnr(double d){
-    return -10.0*log(d)/log(10.0);
+static double psnr(double d)
+{
+    return -10.0 * log(d) / log(10.0);
 }
 
 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
@@ -1514,9 +1517,9 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost,
     enc = ost->st->codec;
     if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
         frame_number = ost->frame_number;
-        fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);
+        fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
         if (enc->flags&CODEC_FLAG_PSNR)
-            fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
+            fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
 
         fprintf(vstats_file,"f_size= %6d ", frame_size);
         /* compute pts value */
@@ -1524,10 +1527,10 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost,
         if (ti1 < 0.01)
             ti1 = 0.01;
 
-        bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
+        bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
         avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
         fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
-            (double)video_size / 1024, ti1, bitrate, avg_bitrate);
+               (double)video_size / 1024, ti1, bitrate, avg_bitrate);
         fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
     }
 }
@@ -1568,56 +1571,57 @@ static void print_report(OutputFile *output_files,
     oc = output_files[0].ctx;
 
     total_size = avio_size(oc->pb);
-    if(total_size<0) // FIXME improve avio_size() so it works with non seekable output too
-        total_size= avio_tell(oc->pb);
+    if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
+        total_size = avio_tell(oc->pb);
 
     buf[0] = '\0';
     vid = 0;
-    for(i=0;i<nb_ostreams;i++) {
+    for (i = 0; i < nb_ostreams; i++) {
         float q = -1;
         ost = &ost_table[i];
         enc = ost->st->codec;
         if (!ost->stream_copy && enc->coded_frame)
-            q = enc->coded_frame->quality/(float)FF_QP2LAMBDA;
+            q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
         if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
         }
         if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
-            float t = (av_gettime()-timer_start) / 1000000.0;
+            float t = (av_gettime() - timer_start) / 1000000.0;
 
             frame_number = ost->frame_number;
             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
-                     frame_number, (t>1)?(int)(frame_number/t+0.5) : 0, q);
-            if(is_last_report)
+                     frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
+            if (is_last_report)
                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
-            if(qp_hist){
+            if (qp_hist) {
                 int j;
                 int qp = lrintf(q);
-                if(qp>=0 && qp<FF_ARRAY_ELEMS(qp_histogram))
+                if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
                     qp_histogram[qp]++;
-                for(j=0; j<32; j++)
-                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j]+1)/log(2)));
+                for (j = 0; j < 32; j++)
+                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
             }
-            if (enc->flags&CODEC_FLAG_PSNR){
+            if (enc->flags&CODEC_FLAG_PSNR) {
                 int j;
-                double error, error_sum=0;
-                double scale, scale_sum=0;
-                char type[3]= {'Y','U','V'};
+                double error, error_sum = 0;
+                double scale, scale_sum = 0;
+                char type[3] = { 'Y','U','V' };
                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
-                for(j=0; j<3; j++){
-                    if(is_last_report){
-                        error= enc->error[j];
-                        scale= enc->width*enc->height*255.0*255.0*frame_number;
-                    }else{
-                        error= enc->coded_frame->error[j];
-                        scale= enc->width*enc->height*255.0*255.0;
+                for (j = 0; j < 3; j++) {
+                    if (is_last_report) {
+                        error = enc->error[j];
+                        scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
+                    } else {
+                        error = enc->coded_frame->error[j];
+                        scale = enc->width * enc->height * 255.0 * 255.0;
                     }
-                    if(j) scale/=4;
+                    if (j)
+                        scale /= 4;
                     error_sum += error;
                     scale_sum += scale;
-                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
+                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
                 }
-                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
+                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
             }
             vid = 1;
         }
@@ -1655,10 +1659,10 @@ static void print_report(OutputFile *output_files,
         int64_t raw= audio_size + video_size + extra_size;
         av_log(NULL, AV_LOG_INFO, "\n");
         av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
-               video_size/1024.0,
-               audio_size/1024.0,
-               extra_size/1024.0,
-               100.0*(total_size - raw)/raw
+               video_size / 1024.0,
+               audio_size / 1024.0,
+               extra_size / 1024.0,
+               100.0 * (total_size - raw) / raw
         );
     }
 }
@@ -1675,16 +1679,16 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
         if (!ost->encoding_needed)
             continue;
 
-        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
+        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
             continue;
         if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
             continue;
 
-        for(;;) {
+        for (;;) {
             AVPacket pkt;
             int fifo_bytes;
             av_init_packet(&pkt);
-            pkt.stream_index= ost->index;
+            pkt.stream_index = ost->index;
 
             switch (ost->st->codec->codec_type) {
             case AVMEDIA_TYPE_AUDIO:
@@ -1718,7 +1722,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
                     exit_program(1);
                 }
                 audio_size += ret;
-                pkt.flags |= AV_PKT_FLAG_KEY;
+                pkt.flags  |= AV_PKT_FLAG_KEY;
                 break;
             case AVMEDIA_TYPE_VIDEO:
                 ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
@@ -1727,14 +1731,14 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
                     exit_program(1);
                 }
                 video_size += ret;
-                if(enc->coded_frame && enc->coded_frame->key_frame)
+                if (enc->coded_frame && enc->coded_frame->key_frame)
                     pkt.flags |= AV_PKT_FLAG_KEY;
                 if (ost->logfile && enc->stats_out) {
                     fprintf(ost->logfile, "%s", enc->stats_out);
                 }
                 break;
             default:
-                ret=-1;
+                ret = -1;
             }
 
             if (ret <= 0)
@@ -1742,7 +1746,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
             pkt.data = bit_buffer;
             pkt.size = ret;
             if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
-                pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+                pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
             write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
         }
     }
@@ -1764,7 +1768,7 @@ static int check_output_constraints(InputStream *ist, OutputStream *ost)
 
     if (of->recording_time != INT64_MAX &&
         av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time,
-                      (AVRational){1, 1000000}) >= 0) {
+                      (AVRational){ 1, 1000000 }) >= 0) {
         ost->is_past_recording_time = 1;
         return 0;
     }
@@ -1808,8 +1812,8 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
     opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
     opkt.flags    = pkt->flags;
 
-    //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
-    if(   ost->st->codec->codec_id != CODEC_ID_H264
+    // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
+    if (  ost->st->codec->codec_id != CODEC_ID_H264
        && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
        && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
        ) {
@@ -2109,12 +2113,12 @@ static int output_packet(InputStream *ist,
         avpkt = *pkt;
     }
 
-    if(pkt->dts != AV_NOPTS_VALUE)
+    if (pkt->dts != AV_NOPTS_VALUE)
         ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
-    if(pkt->pts != AV_NOPTS_VALUE)
+    if (pkt->pts != AV_NOPTS_VALUE)
         pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
 
-    //while we have more to decode or while the decoder did output something on EOF
+    // while we have more to decode or while the decoder did output something on EOF
     while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
         int ret = 0;
     handle_eof:
@@ -2127,7 +2131,7 @@ static int output_packet(InputStream *ist,
             ist->showed_multi_packet_warning = 1;
         }
 
-        switch(ist->st->codec->codec_type) {
+        switch (ist->st->codec->codec_type) {
         case AVMEDIA_TYPE_AUDIO:
             ret = transcode_audio    (ist, &avpkt, &got_output);
             break;
@@ -2164,7 +2168,7 @@ static int output_packet(InputStream *ist,
             break;
         case AVMEDIA_TYPE_VIDEO:
             if (ist->st->codec->time_base.num != 0) {
-                int ticks = ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
+                int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
                 ist->next_pts += ((int64_t)AV_TIME_BASE *
                                   ist->st->codec->time_base.num * ticks) /
                                   ist->st->codec->time_base.den;
@@ -2188,7 +2192,7 @@ static void print_sdp(OutputFile *output_files, int n)
 {
     char sdp[2048];
     int i;
-    AVFormatContext **avc = av_malloc(sizeof(*avc)*n);
+    AVFormatContext **avc = av_malloc(sizeof(*avc) * n);
 
     if (!avc)
         exit_program(1);
@@ -2229,7 +2233,7 @@ static int init_input_stream(int ist_index, OutputStream *output_streams, int nb
         assert_avoptions(ist->opts);
     }
 
-    ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames*AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
+    ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
     ist->next_pts = AV_NOPTS_VALUE;
     ist->is_start = 1;
 
@@ -2320,9 +2324,9 @@ static int transcode_init(OutputFile *output_files,
             } else
                 codec->time_base = ist->st->time_base;
 
-            switch(codec->codec_type) {
+            switch (codec->codec_type) {
             case AVMEDIA_TYPE_AUDIO:
-                if(audio_volume != 256) {
+                if (audio_volume != 256) {
                     av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
                     exit_program(1);
                 }
@@ -2363,7 +2367,7 @@ static int transcode_init(OutputFile *output_files,
             ist->decoding_needed = 1;
             ost->encoding_needed = 1;
 
-            switch(codec->codec_type) {
+            switch (codec->codec_type) {
             case AVMEDIA_TYPE_AUDIO:
                 ost->fifo = av_fifo_alloc(1024);
                 if (!ost->fifo) {
@@ -2374,7 +2378,7 @@ static int transcode_init(OutputFile *output_files,
                 if (!codec->sample_rate)
                     codec->sample_rate = icodec->sample_rate;
                 choose_sample_rate(ost->st, ost->enc);
-                codec->time_base = (AVRational){1, codec->sample_rate};
+                codec->time_base = (AVRational){ 1, codec->sample_rate };
 
                 if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
                     codec->sample_fmt = icodec->sample_fmt;
@@ -2420,7 +2424,7 @@ static int transcode_init(OutputFile *output_files,
                 ost->resample_pix_fmt = icodec->pix_fmt;
 
                 if (!ost->frame_rate.num)
-                    ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25,1};
+                    ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational) { 25, 1 };
                 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
                     int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
                     ost->frame_rate = ost->enc->supported_framerates[idx];
@@ -2474,10 +2478,10 @@ static int transcode_init(OutputFile *output_files,
                 }
             }
         }
-        if(codec->codec_type == AVMEDIA_TYPE_VIDEO){
+        if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
             /* maximum video buffer size is 6-bytes per pixel, plus DPX header size */
             int        size = codec->width * codec->height;
-            bit_buffer_size = FFMAX(bit_buffer_size, 6*size + 1664);
+            bit_buffer_size = FFMAX(bit_buffer_size, 6 * size + 1664);
         }
     }
 
@@ -2625,7 +2629,7 @@ static int transcode(OutputFile *output_files,
     OutputStream *ost;
     InputStream *ist;
     uint8_t *no_packet;
-    int no_packet_count=0;
+    int no_packet_count = 0;
     int64_t timer_start;
     int key;
 
@@ -2644,14 +2648,14 @@ static int transcode(OutputFile *output_files,
 
     timer_start = av_gettime();
 
-    for(; received_sigterm == 0;) {
+    for (; received_sigterm == 0;) {
         int file_index, ist_index;
         AVPacket pkt;
         int64_t ipts_min;
         double opts_min;
 
         ipts_min = INT64_MAX;
-        opts_min= 1e100;
+        opts_min = 1e100;
         /* if 'q' pressed, exits */
         if (!using_stdin) {
             if (q_pressed)
@@ -2720,14 +2724,15 @@ static int transcode(OutputFile *output_files,
                 continue;
             opts = ost->st->pts.val * av_q2d(ost->st->time_base);
             ipts = ist->pts;
-            if (!input_files[ist->file_index].eof_reached){
-                if(ipts < ipts_min) {
+            if (!input_files[ist->file_index].eof_reached) {
+                if (ipts < ipts_min) {
                     ipts_min = ipts;
-                    if(input_sync ) file_index = ist->file_index;
+                    if (input_sync)
+                        file_index = ist->file_index;
                 }
-                if(opts < opts_min) {
+                if (opts < opts_min) {
                     opts_min = opts;
-                    if(!input_sync) file_index = ist->file_index;
+                    if (!input_sync) file_index = ist->file_index;
                 }
             }
             if (ost->frame_number >= ost->max_frames) {
@@ -2739,8 +2744,8 @@ static int transcode(OutputFile *output_files,
         }
         /* if none, if is finished */
         if (file_index < 0) {
-            if(no_packet_count){
-                no_packet_count=0;
+            if (no_packet_count) {
+                no_packet_count = 0;
                 memset(no_packet, 0, nb_input_files);
                 usleep(10000);
                 continue;
@@ -2749,10 +2754,10 @@ static int transcode(OutputFile *output_files,
         }
 
         /* read a frame from it and output it in the fifo */
-        is = input_files[file_index].ctx;
-        ret= av_read_frame(is, &pkt);
-        if(ret == AVERROR(EAGAIN)){
-            no_packet[file_index]=1;
+        is  = input_files[file_index].ctx;
+        ret = av_read_frame(is, &pkt);
+        if (ret == AVERROR(EAGAIN)) {
+            no_packet[file_index] = 1;
             no_packet_count++;
             continue;
         }
@@ -2764,7 +2769,7 @@ static int transcode(OutputFile *output_files,
                 continue;
         }
 
-        no_packet_count=0;
+        no_packet_count = 0;
         memset(no_packet, 0, nb_input_files);
 
         if (do_pkt_dump) {
@@ -2785,27 +2790,31 @@ static int transcode(OutputFile *output_files,
         if (pkt.pts != AV_NOPTS_VALUE)
             pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
 
-        if(pkt.pts != AV_NOPTS_VALUE)
+        if (pkt.pts != AV_NOPTS_VALUE)
             pkt.pts *= ist->ts_scale;
-        if(pkt.dts != AV_NOPTS_VALUE)
+        if (pkt.dts != AV_NOPTS_VALUE)
             pkt.dts *= ist->ts_scale;
 
-//        fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files[ist->file_index].ts_offset, ist->st->codec->codec_type);
+        //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
+        //        ist->next_pts,
+        //        pkt.dts, input_files[ist->file_index].ts_offset,
+        //        ist->st->codec->codec_type);
         if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE
             && (is->iformat->flags & AVFMT_TS_DISCONT)) {
-            int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
-            int64_t delta= pkt_dts - ist->next_pts;
-            if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1<ist->pts)&& !copy_ts){
+            int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
+            int64_t delta   = pkt_dts - ist->next_pts;
+            if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->pts) && !copy_ts) {
                 input_files[ist->file_index].ts_offset -= delta;
-                av_log(NULL, AV_LOG_DEBUG, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
+                av_log(NULL, AV_LOG_DEBUG,
+                       "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                        delta, input_files[ist->file_index].ts_offset);
                 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
-                if(pkt.pts != AV_NOPTS_VALUE)
+                if (pkt.pts != AV_NOPTS_VALUE)
                     pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
             }
         }
 
-        //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
+        // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
         if (output_packet(ist, output_streams, nb_output_streams, &pkt) < 0) {
 
             av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
@@ -2835,7 +2844,7 @@ static int transcode(OutputFile *output_files,
     term_exit();
 
     /* write the trailer if needed and close file */
-    for(i=0;i<nb_output_files;i++) {
+    for (i = 0; i < nb_output_files; i++) {
         os = output_files[i].ctx;
         av_write_trailer(os);
     }
@@ -2909,7 +2918,7 @@ static double parse_frame_aspect_ratio(const char *arg)
     if (p) {
         x = strtol(arg, &end, 10);
         if (end == p)
-            y = strtol(end+1, &end, 10);
+            y = strtol(end + 1, &end, 10);
         if (x > 0 && y > 0)
             ar = (double)x / (double)y;
     } else
@@ -3147,11 +3156,11 @@ static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int e
     codec = encoder ?
         avcodec_find_encoder_by_name(name) :
         avcodec_find_decoder_by_name(name);
-    if(!codec) {
+    if (!codec) {
         av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
         exit_program(1);
     }
-    if(codec->type != type) {
+    if (codec->type != type) {
         av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
         exit_program(1);
     }
@@ -3201,7 +3210,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
             if(!ist->dec)
                 ist->dec = avcodec_find_decoder(dec->codec_id);
             if(o->audio_disable)
-                st->discard= AVDISCARD_ALL;
+                st->discard = AVDISCARD_ALL;
             break;
         case AVMEDIA_TYPE_VIDEO:
             if(!ist->dec)
@@ -3212,7 +3221,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
                 dec->flags |= CODEC_FLAG_EMU_EDGE;
             }
 
-            if (dec->time_base.den != rfps*dec->ticks_per_frame || dec->time_base.num != rfps_base) {
+            if (dec->time_base.den != rfps * dec->ticks_per_frame || dec->time_base.num != rfps_base) {
 
                 av_log(NULL, AV_LOG_INFO,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
                        i, (float)dec->time_base.den / dec->time_base.num, dec->time_base.den, dec->time_base.num,
@@ -3220,9 +3229,9 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
             }
 
             if (o->video_disable)
-                st->discard= AVDISCARD_ALL;
-            else if(video_discard)
-                st->discard= video_discard;
+                st->discard = AVDISCARD_ALL;
+            else if (video_discard)
+                st->discard = video_discard;
             break;
         case AVMEDIA_TYPE_DATA:
             break;
@@ -3426,7 +3435,7 @@ static void parse_forced_key_frames(char *kf, OutputStream *ost,
         if (*p == ',')
             n++;
     ost->forced_kf_count = n;
-    ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
+    ost->forced_kf_pts   = av_malloc(sizeof(*ost->forced_kf_pts) * n);
     if (!ost->forced_kf_pts) {
         av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
         exit_program(1);
@@ -3523,8 +3532,8 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
                                 nb_output_streams + 1);
     ost = &output_streams[nb_output_streams - 1];
     ost->file_index = nb_output_files;
-    ost->index = idx;
-    ost->st    = st;
+    ost->index      = idx;
+    ost->st         = st;
     st->codec->codec_type = type;
     choose_encoder(o, oc, ost);
     if (ost->enc) {
@@ -3604,12 +3613,12 @@ static void parse_matrix_coeffs(uint16_t *dest, const char *str)
 {
     int i;
     const char *p = str;
-    for(i = 0;; i++) {
+    for (i = 0;; i++) {
         dest[i] = atoi(p);
-        if(i == 63)
+        if (i == 63)
             break;
         p = strchr(p, ',');
-        if(!p) {
+        if (!p) {
             av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
             exit_program(1);
         }
@@ -3675,33 +3684,33 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
         }
 
         MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
-        for(i=0; p; i++){
+        for (i = 0; p; i++) {
             int start, end, q;
-            int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
-            if(e!=3){
+            int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
+            if (e != 3) {
                 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
                 exit_program(1);
             }
-            video_enc->rc_override=
+            video_enc->rc_override =
                 av_realloc(video_enc->rc_override,
-                           sizeof(RcOverride)*(i+1));
-            video_enc->rc_override[i].start_frame= start;
-            video_enc->rc_override[i].end_frame  = end;
-            if(q>0){
-                video_enc->rc_override[i].qscale= q;
-                video_enc->rc_override[i].quality_factor= 1.0;
+                           sizeof(RcOverride) * (i + 1));
+            video_enc->rc_override[i].start_frame = start;
+            video_enc->rc_override[i].end_frame   = end;
+            if (q > 0) {
+                video_enc->rc_override[i].qscale         = q;
+                video_enc->rc_override[i].quality_factor = 1.0;
             }
-            else{
-                video_enc->rc_override[i].qscale= 0;
-                video_enc->rc_override[i].quality_factor= -q/100.0;
+            else {
+                video_enc->rc_override[i].qscale         = 0;
+                video_enc->rc_override[i].quality_factor = -q/100.0;
             }
-            p= strchr(p, '/');
-            if(p) p++;
+            p = strchr(p, '/');
+            if (p) p++;
         }
-        video_enc->rc_override_count=i;
+        video_enc->rc_override_count = i;
         if (!video_enc->rc_initial_buffer_occupancy)
-            video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size*3/4;
-        video_enc->intra_dc_precision= intra_dc_precision - 8;
+            video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
+        video_enc->intra_dc_precision = intra_dc_precision - 8;
 
         /* two pass mode */
         if (do_pass) {
@@ -3829,7 +3838,7 @@ static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
     for (i = 0; i < is->nb_chapters; i++) {
         AVChapter *in_ch = is->chapters[i], *out_ch;
         int64_t ts_off   = av_rescale_q(ofile->start_time - ifile->ts_offset,
-                                      AV_TIME_BASE_Q, in_ch->time_base);
+                                       AV_TIME_BASE_Q, in_ch->time_base);
         int64_t rt       = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
                            av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
 
@@ -3852,7 +3861,7 @@ static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
             av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
 
         os->nb_chapters++;
-        os->chapters = av_realloc(os->chapters, sizeof(AVChapter)*os->nb_chapters);
+        os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
         if (!os->chapters)
             return AVERROR(ENOMEM);
         os->chapters[os->nb_chapters - 1] = out_ch;
@@ -3949,7 +3958,7 @@ static void opt_output_file(void *optctx, const char *filename)
             }
 
             ost->source_index = input_files[map->file_index].ist_index + map->stream_index;
-            ost->sync_ist = &input_streams[input_files[map->sync_file_index].ist_index +
+            ost->sync_ist     = &input_streams[input_files[map->sync_file_index].ist_index +
                                            map->sync_stream_index];
             ist->discard = 0;
         }
@@ -4216,36 +4225,36 @@ static int opt_help(const char *opt, const char *arg)
 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
 {
     enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
-    static const char *const frame_rates[] = {"25", "30000/1001", "24000/1001"};
+    static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
 
-    if(!strncmp(arg, "pal-", 4)) {
+    if (!strncmp(arg, "pal-", 4)) {
         norm = PAL;
         arg += 4;
-    } else if(!strncmp(arg, "ntsc-", 5)) {
+    } else if (!strncmp(arg, "ntsc-", 5)) {
         norm = NTSC;
         arg += 5;
-    } else if(!strncmp(arg, "film-", 5)) {
+    } else if (!strncmp(arg, "film-", 5)) {
         norm = FILM;
         arg += 5;
     } else {
         /* Try to determine PAL/NTSC by peeking in the input files */
-        if(nb_input_files) {
+        if (nb_input_files) {
             int i, j, fr;
             for (j = 0; j < nb_input_files; j++) {
                 for (i = 0; i < input_files[j].nb_streams; i++) {
                     AVCodecContext *c = input_files[j].ctx->streams[i]->codec;
-                    if(c->codec_type != AVMEDIA_TYPE_VIDEO)
+                    if (c->codec_type != AVMEDIA_TYPE_VIDEO)
                         continue;
                     fr = c->time_base.den * 1000 / c->time_base.num;
-                    if(fr == 25000) {
+                    if (fr == 25000) {
                         norm = PAL;
                         break;
-                    } else if((fr == 29970) || (fr == 23976)) {
+                    } else if ((fr == 29970) || (fr == 23976)) {
                         norm = NTSC;
                         break;
                     }
                 }
-                if(norm != UNKNOWN)
+                if (norm != UNKNOWN)
                     break;
             }
         }
@@ -4253,14 +4262,14 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
             av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
     }
 
-    if(norm == UNKNOWN) {
+    if (norm == UNKNOWN) {
         av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
         av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
         av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
         exit_program(1);
     }
 
-    if(!strcmp(arg, "vcd")) {
+    if (!strcmp(arg, "vcd")) {
         opt_video_codec(o, "c:v", "mpeg1video");
         opt_audio_codec(o, "c:a", "mp2");
         parse_option(o, "f", "vcd", options);
@@ -4286,8 +4295,8 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
            and the first pack from the other stream, respectively, may also have
            been written before.
            So the real data starts at SCR 36000+3*1200. */
-        o->mux_preload = (36000+3*1200) / 90000.0; //0.44
-    } else if(!strcmp(arg, "svcd")) {
+        o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
+    } else if (!strcmp(arg, "svcd")) {
 
         opt_video_codec(o, "c:v", "mpeg2video");
         opt_audio_codec(o, "c:a", "mp2");
@@ -4299,8 +4308,8 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
 
         opt_default("b", "2040000");
         opt_default("maxrate", "2516000");
-        opt_default("minrate", "0"); //1145000;
-        opt_default("bufsize", "1835008"); //224*1024*8;
+        opt_default("minrate", "0"); // 1145000;
+        opt_default("bufsize", "1835008"); // 224*1024*8;
         opt_default("flags", "+scan_offset");
 
 
@@ -4309,7 +4318,7 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
 
         opt_default("packetsize", "2324");
 
-    } else if(!strcmp(arg, "dvd")) {
+    } else if (!strcmp(arg, "dvd")) {
 
         opt_video_codec(o, "c:v", "mpeg2video");
         opt_audio_codec(o, "c:a", "ac3");
@@ -4321,8 +4330,8 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
 
         opt_default("b", "6000000");
         opt_default("maxrate", "9000000");
-        opt_default("minrate", "0"); //1500000;
-        opt_default("bufsize", "1835008"); //224*1024*8;
+        opt_default("minrate", "0"); // 1500000;
+        opt_default("bufsize", "1835008"); // 224*1024*8;
 
         opt_default("packetsize", "2048");  // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
         opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
@@ -4330,7 +4339,7 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
         opt_default("b:a", "448000");
         parse_option(o, "ar", "48000", options);
 
-    } else if(!strncmp(arg, "dv", 2)) {
+    } else if (!strncmp(arg, "dv", 2)) {
 
         parse_option(o, "f", "dv", options);
 
@@ -4352,7 +4361,7 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
 static int opt_vstats_file(const char *opt, const char *arg)
 {
     av_free (vstats_filename);
-    vstats_filename=av_strdup (arg);
+    vstats_filename = av_strdup (arg);
     return 0;
 }
 
@@ -4563,7 +4572,7 @@ int main(int argc, char **argv)
     /* parse options */
     parse_options(&o, argc, argv, options, opt_output_file);
 
-    if(nb_output_files <= 0 && nb_input_files == 0) {
+    if (nb_output_files <= 0 && nb_input_files == 0) {
         show_usage();
         av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
         exit_program(1);
diff --git a/doc/doxy/doxy_stylesheet.css b/doc/doxy/doxy_stylesheet.css
index a5500b69af3c32b23887e15f972e8c7c46e181ec..ff1e2147d3509088729b0c62e238bbbc4bc77f15 100644
--- a/doc/doxy/doxy_stylesheet.css
+++ b/doc/doxy/doxy_stylesheet.css
@@ -235,6 +235,8 @@ div.center img {
 #footer {
     margin: -10px 1em 0;
     padding-top: 20px;
+    text-align: center;
+    font-size: small;
 }
 
 address.footer {
@@ -962,7 +964,7 @@ dl.citelist dd {
 }
 
 .tabs3 .tablist a {
-    padding: 0 10px;
+    padding-left: 10px;
 }
 
 
@@ -981,19 +983,6 @@ h1 a, h2 a, h3 a {
     color: inherit;
 }
 
-#banner, #top {
-    background-color: #BBC9D8;
-    border-bottom: 1px solid #7A96B3;
-    border-top: 1px solid #7A96B3;
-    position: relative;
-    text-align: center;
-}
-
-#banner img, #top img {
-    padding-bottom: 1px;
-    padding-top: 5px;
-}
-
 #body {
     margin: 0 1em;
 }
@@ -1041,12 +1030,12 @@ img {
     border: 0;
 }
 
-#navrow1 {
+.tabs {
     margin-top: 12px;
     border-top: 1px solid #5C665C;
 }
 
-#navrow1, #navrow2, #navrow3, #navrow4 {
+.tabs, .tabs2, .tabs3, .tabs4 {
     background-color: #738073;
     border-bottom: 1px solid #5C665C;
     border-left: 1px solid #5C665C;
@@ -1055,24 +1044,36 @@ img {
     text-align: center;
 }
 
-#navrow1 a, #navrow2 a, #navrow3 a, #navrow4 a {
+.tabs a,
+.tabs2 a,
+.tabs3 a,
+.tabs4 a {
     color: white;
     padding: 0.3em;
     text-decoration: none;
 }
 
 
-#navrow1 ul, #navrow2 ul, #navrow3 ul, #navrow4 ul {
+.tabs ul,
+.tabs2 ul,
+.tabs3 ul,
+.tabs4 ul {
     padding: 0;
 }
 
-#navrow1 li.current a, #navrow2 li.current a, #navrow3 li.current a, #navrow4 li.current a {
+.tabs li.current a,
+.tabs2 li.current a,
+.tabs3 li.current a,
+.tabs4 li.current a {
     background-color: #414141;
     color: white;
     text-decoration: none;
 }
 
-#navrow1 a:hover, #navrow2 a:hover, #navrow3 a:hover, #navrow4 a:hover {
+.tabs a:hover,
+.tabs2 a:hover,
+.tabs3 a:hover,
+.tabs4 a:hover {
     background-color: #313131 !important;
     color: white;
     text-decoration: none;
@@ -1094,37 +1095,3 @@ pre {
 #proj_desc {
     font-size: 1.2em;
 }
-
-#repos {
-    margin-left: 1em;
-    margin-right: 1em;
-    border-collapse: collapse;
-    border: solid 1px #6A996A;
-}
-
-#repos th {
-    background-color: #7BB37B;
-    border: solid 1px #6A996A;
-}
-
-#repos td {
-    padding: 0.2em;
-    border: solid 1px #6A996A;
-}
-
-#distro_status {
-    margin-left: 1em;
-    margin-right: 1em;
-    border-collapse: collapse;
-    border: solid 1px #6A996A;
-}
-
-#distro_status th {
-    background-color: #7BB37B;
-    border: solid 1px #6A996A;
-}
-
-#distro_status td {
-    padding: 0.2em;
-    border: solid 1px #6A996A;
-}
diff --git a/ffmpeg.c b/ffmpeg.c
index b68295862c46d3c3d076579654dbe761f8817dc5..1fe9fa67fee2e1bc775e3704c3137fa3a0e1d6d8 100644
--- a/ffmpeg.c
+++ b/ffmpeg.c
@@ -144,11 +144,11 @@ static int do_pkt_dump = 0;
 static int do_psnr = 0;
 static int do_pass = 0;
 static const char *pass_logfilename_prefix;
-static int video_sync_method= -1;
-static int audio_sync_method= 0;
-static float audio_drift_threshold= 0.1;
-static int copy_ts= 0;
-static int copy_tb= -1;
+static int video_sync_method = -1;
+static int audio_sync_method = 0;
+static float audio_drift_threshold = 0.1;
+static int copy_ts = 0;
+static int copy_tb = -1;
 static int opt_shortest = 0;
 static char *vstats_filename;
 static FILE *vstats_file;
@@ -234,9 +234,9 @@ typedef struct OutputStream {
     int frame_number;
     /* input pts and corresponding output pts
        for A/V sync */
-    //double sync_ipts;        /* dts from the AVPacket of the demuxer in second units */
+    // double sync_ipts;        /* dts from the AVPacket of the demuxer in second units */
     struct InputStream *sync_ist; /* input stream to sync against */
-    int64_t sync_opts;       /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
+    int64_t sync_opts;       /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
     AVBitStreamFilterContext *bitstream_filters;
     AVCodec *enc;
     int64_t max_frames;
@@ -304,9 +304,9 @@ typedef struct OutputFile {
     uint64_t limit_filesize;
 } OutputFile;
 
-static InputStream *input_streams = NULL;
+static InputStream *input_streams   = NULL;
 static int         nb_input_streams = 0;
-static InputFile   *input_files   = NULL;
+static InputFile   *input_files     = NULL;
 static int         nb_input_files   = 0;
 
 static OutputStream *output_streams = NULL;
@@ -609,9 +609,9 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
 
     ost->graph = avfilter_graph_alloc();
 
-    if (ist->st->sample_aspect_ratio.num){
+    if (ist->st->sample_aspect_ratio.num) {
         sample_aspect_ratio = ist->st->sample_aspect_ratio;
-    }else
+    } else
         sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
 
     snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
@@ -635,7 +635,7 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
         return ret;
     last_filter = ost->input_video_filter;
 
-    if (codec->width  != icodec->width || codec->height != icodec->height) {
+    if (codec->width != icodec->width || codec->height != icodec->height) {
         snprintf(args, 255, "%d:%d:flags=0x%X",
                  codec->width,
                  codec->height,
@@ -680,7 +680,7 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
     codec->height = ost->output_video_filter->inputs[0]->h;
     codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
         ost->frame_aspect_ratio ? // overridden by the -aspect cli option
-        av_d2q(ost->frame_aspect_ratio*codec->height/codec->width, 255) :
+        av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
         ost->output_video_filter->inputs[0]->sample_aspect_ratio;
 
     return 0;
@@ -732,7 +732,7 @@ static void term_init(void)
 #endif
     avformat_network_deinit();
 
-    signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).  */
+    signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
 #ifdef SIGXCPU
     signal(SIGXCPU, sigterm_handler);
@@ -805,14 +805,14 @@ void av_noreturn exit_program(int ret)
     int i;
 
     /* close files */
-    for(i=0;i<nb_output_files;i++) {
+    for (i = 0; i < nb_output_files; i++) {
         AVFormatContext *s = output_files[i].ctx;
         if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
             avio_close(s->pb);
         avformat_free_context(s);
         av_dict_free(&output_files[i].opts);
     }
-    for(i=0;i<nb_input_files;i++) {
+    for (i = 0; i < nb_input_files; i++) {
         avformat_close_input(&input_files[i].ctx);
     }
     for (i = 0; i < nb_input_streams; i++) {
@@ -834,7 +834,7 @@ void av_noreturn exit_program(int ret)
     uninit_opts();
     av_free(audio_buf);
     av_free(audio_out);
-    allocated_audio_buf_size= allocated_audio_out_size= 0;
+    allocated_audio_buf_size = allocated_audio_out_size = 0;
 
 #if CONFIG_AVFILTER
     avfilter_uninit();
@@ -880,10 +880,10 @@ static void assert_codec_experimental(AVCodecContext *c, int encoder)
 
 static void choose_sample_fmt(AVStream *st, AVCodec *codec)
 {
-    if(codec && codec->sample_fmts){
-        const enum AVSampleFormat *p= codec->sample_fmts;
-        for(; *p!=-1; p++){
-            if(*p == st->codec->sample_fmt)
+    if (codec && codec->sample_fmts) {
+        const enum AVSampleFormat *p = codec->sample_fmts;
+        for (; *p != -1; p++) {
+            if (*p == st->codec->sample_fmt)
                 break;
         }
         if (*p == -1) {
@@ -902,49 +902,50 @@ static void choose_sample_fmt(AVStream *st, AVCodec *codec)
 
 static void choose_sample_rate(AVStream *st, AVCodec *codec)
 {
-    if(codec && codec->supported_samplerates){
-        const int *p= codec->supported_samplerates;
-        int best=0;
-        int best_dist=INT_MAX;
-        for(; *p; p++){
-            int dist= abs(st->codec->sample_rate - *p);
-            if(dist < best_dist){
-                best_dist= dist;
-                best= *p;
+    if (codec && codec->supported_samplerates) {
+        const int *p  = codec->supported_samplerates;
+        int best      = 0;
+        int best_dist = INT_MAX;
+        for (; *p; p++) {
+            int dist = abs(st->codec->sample_rate - *p);
+            if (dist < best_dist) {
+                best_dist = dist;
+                best      = *p;
             }
         }
-        if(best_dist){
+        if (best_dist) {
             av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
         }
-        st->codec->sample_rate= best;
+        st->codec->sample_rate = best;
     }
 }
 
 static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
 {
-    if(codec && codec->pix_fmts){
-        const enum PixelFormat *p= codec->pix_fmts;
+    if (codec && codec->pix_fmts) {
+        const enum PixelFormat *p = codec->pix_fmts;
         int has_alpha= av_pix_fmt_descriptors[st->codec->pix_fmt].nb_components % 2 == 0;
         enum PixelFormat best= PIX_FMT_NONE;
-        if(st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL){
-            if(st->codec->codec_id==CODEC_ID_MJPEG){
-                p= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE};
-            }else if(st->codec->codec_id==CODEC_ID_LJPEG){
-                p= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE};
+        if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
+            if (st->codec->codec_id == CODEC_ID_MJPEG) {
+                p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
+            } else if (st->codec->codec_id == CODEC_ID_LJPEG) {
+                p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
+                                                 PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
             }
         }
         for (; *p != PIX_FMT_NONE; p++) {
             best= avcodec_find_best_pix_fmt2(best, *p, st->codec->pix_fmt, has_alpha, NULL);
-            if(*p == st->codec->pix_fmt)
+            if (*p == st->codec->pix_fmt)
                 break;
         }
         if (*p == PIX_FMT_NONE) {
-            if(st->codec->pix_fmt != PIX_FMT_NONE)
+            if (st->codec->pix_fmt != PIX_FMT_NONE)
                 av_log(NULL, AV_LOG_WARNING,
-                        "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
-                        av_pix_fmt_descriptors[st->codec->pix_fmt].name,
-                        codec->name,
-                        av_pix_fmt_descriptors[best].name);
+                       "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
+                       av_pix_fmt_descriptors[st->codec->pix_fmt].name,
+                       codec->name,
+                       av_pix_fmt_descriptors[best].name);
             st->codec->pix_fmt = best;
         }
     }
@@ -954,23 +955,23 @@ static double get_sync_ipts(const OutputStream *ost)
 {
     const InputStream *ist = ost->sync_ist;
     OutputFile *of = &output_files[ost->file_index];
-    return (double)(ist->pts - of->start_time)/AV_TIME_BASE;
+    return (double)(ist->pts - of->start_time) / AV_TIME_BASE;
 }
 
 static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc)
 {
     int ret;
 
-    while(bsfc){
-        AVPacket new_pkt= *pkt;
-        int a= av_bitstream_filter_filter(bsfc, avctx, NULL,
-                                          &new_pkt.data, &new_pkt.size,
-                                          pkt->data, pkt->size,
-                                          pkt->flags & AV_PKT_FLAG_KEY);
-        if(a>0){
+    while (bsfc) {
+        AVPacket new_pkt = *pkt;
+        int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
+                                           &new_pkt.data, &new_pkt.size,
+                                           pkt->data, pkt->size,
+                                           pkt->flags & AV_PKT_FLAG_KEY);
+        if (a > 0) {
             av_free_packet(pkt);
-            new_pkt.destruct= av_destruct_packet;
-        } else if(a<0){
+            new_pkt.destruct = av_destruct_packet;
+        } else if (a < 0) {
             av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
                    bsfc->filter->name, pkt->stream_index,
                    avctx->codec ? avctx->codec->name : "copy");
@@ -978,13 +979,13 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx
             if (exit_on_error)
                 exit_program(1);
         }
-        *pkt= new_pkt;
+        *pkt = new_pkt;
 
-        bsfc= bsfc->next;
+        bsfc = bsfc->next;
     }
 
-    ret= av_interleaved_write_frame(s, pkt);
-    if(ret < 0){
+    ret = av_interleaved_write_frame(s, pkt);
+    if (ret < 0) {
         print_error("av_interleaved_write_frame()", ret);
         exit_program(1);
     }
@@ -1005,8 +1006,8 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
     int64_t audio_out_size, audio_buf_size;
 
     int size_out, frame_bytes, ret, resample_changed;
-    AVCodecContext *enc= ost->st->codec;
-    AVCodecContext *dec= ist->st->codec;
+    AVCodecContext *enc = ost->st->codec;
+    AVCodecContext *dec = ist->st->codec;
     int osize = av_get_bytes_per_sample(enc->sample_fmt);
     int isize = av_get_bytes_per_sample(dec->sample_fmt);
     const int coded_bps = av_get_bits_per_sample(enc->codec->id);
@@ -1015,25 +1016,25 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
     int64_t allocated_for_size = size;
 
 need_realloc:
-    audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels);
-    audio_buf_size= (audio_buf_size*enc->sample_rate + dec->sample_rate) / dec->sample_rate;
-    audio_buf_size= audio_buf_size*2 + 10000; //safety factors for the deprecated resampling API
-    audio_buf_size= FFMAX(audio_buf_size, enc->frame_size);
-    audio_buf_size*= osize*enc->channels;
-
-    audio_out_size= FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
-    if(coded_bps > 8*osize)
-        audio_out_size= audio_out_size * coded_bps / (8*osize);
+    audio_buf_size  = (allocated_for_size + isize * dec->channels - 1) / (isize * dec->channels);
+    audio_buf_size  = (audio_buf_size * enc->sample_rate + dec->sample_rate) / dec->sample_rate;
+    audio_buf_size  = audio_buf_size * 2 + 10000; // safety factors for the deprecated resampling API
+    audio_buf_size  = FFMAX(audio_buf_size, enc->frame_size);
+    audio_buf_size *= osize * enc->channels;
+
+    audio_out_size = FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
+    if (coded_bps > 8 * osize)
+        audio_out_size = audio_out_size * coded_bps / (8*osize);
     audio_out_size += FF_MIN_BUFFER_SIZE;
 
-    if(audio_out_size > INT_MAX || audio_buf_size > INT_MAX){
+    if (audio_out_size > INT_MAX || audio_buf_size > INT_MAX) {
         av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n");
         exit_program(1);
     }
 
     av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
     av_fast_malloc(&audio_out, &allocated_audio_out_size, audio_out_size);
-    if (!audio_buf || !audio_out){
+    if (!audio_buf || !audio_out) {
         av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
         exit_program(1);
     }
@@ -1097,41 +1098,41 @@ need_realloc:
 
     av_assert0(ost->audio_resample || dec->sample_fmt==enc->sample_fmt);
 
-    if(audio_sync_method){
-        double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts
-                - av_fifo_size(ost->fifo)/(enc->channels * osize);
+    if (audio_sync_method) {
+        double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts -
+                       av_fifo_size(ost->fifo) / (enc->channels * osize);
         int idelta = delta * dec->sample_rate / enc->sample_rate;
         int byte_delta = idelta * isize * dec->channels;
 
-        //FIXME resample delay
-        if(fabs(delta) > 50){
-            if(ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate){
-                if(byte_delta < 0){
-                    byte_delta= FFMAX(byte_delta, -size);
+        // FIXME resample delay
+        if (fabs(delta) > 50) {
+            if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) {
+                if (byte_delta < 0) {
+                    byte_delta = FFMAX(byte_delta, -size);
                     size += byte_delta;
                     buf  -= byte_delta;
                     av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n",
                            -byte_delta / (isize * dec->channels));
-                    if(!size)
+                    if (!size)
                         return;
-                    ist->is_start=0;
-                }else{
-                    input_tmp= av_realloc(input_tmp, byte_delta + size);
+                    ist->is_start = 0;
+                } else {
+                    input_tmp = av_realloc(input_tmp, byte_delta + size);
 
-                    if(byte_delta > allocated_for_size - size){
-                        allocated_for_size= byte_delta + (int64_t)size;
+                    if (byte_delta > allocated_for_size - size) {
+                        allocated_for_size = byte_delta + (int64_t)size;
                         goto need_realloc;
                     }
-                    ist->is_start=0;
+                    ist->is_start = 0;
 
                     generate_silence(input_tmp, dec->sample_fmt, byte_delta);
                     memcpy(input_tmp + byte_delta, buf, size);
-                    buf= input_tmp;
+                    buf = input_tmp;
                     size += byte_delta;
                     av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta);
                 }
-            }else if(audio_sync_method>1){
-                int comp= av_clip(delta, -audio_sync_method, audio_sync_method);
+            } else if (audio_sync_method > 1) {
+                int comp = av_clip(delta, -audio_sync_method, audio_sync_method);
                 av_assert0(ost->audio_resample);
                 av_log(NULL, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n",
                        delta, comp, enc->sample_rate);
@@ -1139,9 +1140,9 @@ need_realloc:
                 swr_compensate(ost->swr, comp, enc->sample_rate);
             }
         }
-    }else
-        ost->sync_opts= lrintf(get_sync_ipts(ost) * enc->sample_rate)
-                        - av_fifo_size(ost->fifo)/(enc->channels * osize); //FIXME wrong
+    } else
+        ost->sync_opts = lrintf(get_sync_ipts(ost) * enc->sample_rate) -
+                                av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong
 
     if (ost->audio_resample) {
         buftmp = audio_buf;
@@ -1172,7 +1173,7 @@ need_realloc:
 
             av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
 
-            //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
+            // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
 
             ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
                                        (short *)audio_buf);
@@ -1181,11 +1182,11 @@ need_realloc:
                 exit_program(1);
             }
             audio_size += ret;
-            pkt.stream_index= ost->index;
-            pkt.data= audio_out;
-            pkt.size= ret;
-            if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
-                pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+            pkt.stream_index = ost->index;
+            pkt.data = audio_out;
+            pkt.size = ret;
+            if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
+                pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
             pkt.flags |= AV_PKT_FLAG_KEY;
             write_frame(s, &pkt, enc, ost->bitstream_filters);
 
@@ -1201,14 +1202,14 @@ need_realloc:
         /* determine the size of the coded buffer */
         size_out /= osize;
         if (coded_bps)
-            size_out = size_out*coded_bps/8;
+            size_out = size_out * coded_bps / 8;
 
-        if(size_out > audio_out_size){
+        if (size_out > audio_out_size) {
             av_log(NULL, AV_LOG_FATAL, "Internal error, buffer size too small\n");
             exit_program(1);
         }
 
-        //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
+        // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
         ret = avcodec_encode_audio(enc, audio_out, size_out,
                                    (short *)buftmp);
         if (ret < 0) {
@@ -1216,11 +1217,11 @@ need_realloc:
             exit_program(1);
         }
         audio_size += ret;
-        pkt.stream_index= ost->index;
-        pkt.data= audio_out;
-        pkt.size= ret;
-        if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
-            pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+        pkt.stream_index = ost->index;
+        pkt.data = audio_out;
+        pkt.size = ret;
+        if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
+            pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
         pkt.flags |= AV_PKT_FLAG_KEY;
         write_frame(s, &pkt, enc, ost->bitstream_filters);
     }
@@ -1241,14 +1242,14 @@ static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void *
 
         /* create temporary picture */
         size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
-        buf = av_malloc(size);
+        buf  = av_malloc(size);
         if (!buf)
             return;
 
         picture2 = &picture_tmp;
         avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
 
-        if(avpicture_deinterlace(picture2, picture,
+        if (avpicture_deinterlace(picture2, picture,
                                  dec->pix_fmt, dec->width, dec->height) < 0) {
             /* if error, do not deinterlace */
             av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
@@ -1298,11 +1299,11 @@ static void do_subtitle_out(AVFormatContext *s,
     else
         nb = 1;
 
-    for(i = 0; i < nb; i++) {
+    for (i = 0; i < nb; i++) {
         sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
         // start_display_time is required to be 0
-        sub->pts              += av_rescale_q(sub->start_display_time, (AVRational){1, 1000}, AV_TIME_BASE_Q);
-        sub->end_display_time -= sub->start_display_time;
+        sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
+        sub->end_display_time  -= sub->start_display_time;
         sub->start_display_time = 0;
         subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
                                                     subtitle_out_max_size, sub);
@@ -1315,7 +1316,7 @@ static void do_subtitle_out(AVFormatContext *s,
         pkt.stream_index = ost->index;
         pkt.data = subtitle_out;
         pkt.size = subtitle_out_size;
-        pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
+        pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
         if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
             /* XXX: the pts correction is handled here. Maybe handling
                it in the codec would be better */
@@ -1328,8 +1329,8 @@ static void do_subtitle_out(AVFormatContext *s,
     }
 }
 
-static int bit_buffer_size= 1024*256;
-static uint8_t *bit_buffer= NULL;
+static int bit_buffer_size = 1024 * 256;
+static uint8_t *bit_buffer = NULL;
 
 static void do_video_resample(OutputStream *ost,
                               InputStream *ist,
@@ -1425,26 +1426,26 @@ static void do_video_out(AVFormatContext *s,
 
     if (format_video_sync) {
         double vdelta = sync_ipts - ost->sync_opts + duration;
-        //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
+        // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
         if (vdelta < -1.1)
             nb_frames = 0;
         else if (format_video_sync == 2) {
-            if(vdelta<=-0.6){
-                nb_frames=0;
-            }else if(vdelta>0.6)
-                ost->sync_opts= lrintf(sync_ipts);
-        }else if (vdelta > 1.1)
+            if (vdelta <= -0.6) {
+                nb_frames = 0;
+            } else if (vdelta > 0.6)
+                ost->sync_opts = lrintf(sync_ipts);
+        } else if (vdelta > 1.1)
             nb_frames = lrintf(vdelta);
 //fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, get_sync_ipts(ost), nb_frames);
-        if (nb_frames == 0){
+        if (nb_frames == 0) {
             ++nb_frames_drop;
             av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
-        }else if (nb_frames > 1) {
+        } else if (nb_frames > 1) {
             nb_frames_dup += nb_frames - 1;
-            av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames-1);
+            av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
         }
-    }else
-        ost->sync_opts= lrintf(sync_ipts);
+    } else
+        ost->sync_opts = lrintf(sync_ipts);
 
     nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
     if (nb_frames <= 0)
@@ -1453,10 +1454,10 @@ static void do_video_out(AVFormatContext *s,
     do_video_resample(ost, ist, in_picture, &final_picture);
 
     /* duplicates frame if needed */
-    for(i=0;i<nb_frames;i++) {
+    for (i = 0; i < nb_frames; i++) {
         AVPacket pkt;
         av_init_packet(&pkt);
-        pkt.stream_index= ost->index;
+        pkt.stream_index = ost->index;
 
         if (s->oformat->flags & AVFMT_RAWPICTURE &&
             enc->codec->id == CODEC_ID_RAWVIDEO) {
@@ -1465,16 +1466,16 @@ static void do_video_out(AVFormatContext *s,
                method. */
             enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
             enc->coded_frame->top_field_first  = in_picture->top_field_first;
-            pkt.data= (uint8_t *)final_picture;
-            pkt.size=  sizeof(AVPicture);
-            pkt.pts= av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
+            pkt.data   = (uint8_t *)final_picture;
+            pkt.size   =  sizeof(AVPicture);
+            pkt.pts    = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
             pkt.flags |= AV_PKT_FLAG_KEY;
 
             write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters);
         } else {
             AVFrame big_picture;
 
-            big_picture= *final_picture;
+            big_picture = *final_picture;
             /* better than nothing: use input picture interlaced
                settings */
             big_picture.interlaced_frame = in_picture->interlaced_frame;
@@ -1491,9 +1492,9 @@ static void do_video_out(AVFormatContext *s,
             if (!enc->me_threshold)
                 big_picture.pict_type = 0;
 //            big_picture.pts = AV_NOPTS_VALUE;
-            big_picture.pts= ost->sync_opts;
+            big_picture.pts = ost->sync_opts;
 //            big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);
-//av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
+// av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
             if (ost->forced_kf_index < ost->forced_kf_count &&
                 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
                 big_picture.pict_type = AV_PICTURE_TYPE_I;
@@ -1507,22 +1508,22 @@ static void do_video_out(AVFormatContext *s,
                 exit_program(1);
             }
 
-            if(ret>0){
-                pkt.data= bit_buffer;
-                pkt.size= ret;
-                if(enc->coded_frame->pts != AV_NOPTS_VALUE)
-                    pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+            if (ret > 0) {
+                pkt.data = bit_buffer;
+                pkt.size = ret;
+                if (enc->coded_frame->pts != AV_NOPTS_VALUE)
+                    pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
 /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n",
    pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,
    pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/
 
-                if(enc->coded_frame->key_frame)
+                if (enc->coded_frame->key_frame)
                     pkt.flags |= AV_PKT_FLAG_KEY;
                 write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters);
                 *frame_size = ret;
                 video_size += ret;
-                //fprintf(stderr,"\nFrame: %3d size: %5d type: %d",
-                //        enc->frame_number-1, ret, enc->pict_type);
+                // fprintf(stderr,"\nFrame: %3d size: %5d type: %d",
+                //         enc->frame_number-1, ret, enc->pict_type);
                 /* if two pass, output log */
                 if (ost->logfile && enc->stats_out) {
                     fprintf(ost->logfile, "%s", enc->stats_out);
@@ -1536,7 +1537,7 @@ static void do_video_out(AVFormatContext *s,
 
 static double psnr(double d)
 {
-    return -10.0*log(d)/log(10.0);
+    return -10.0 * log(d) / log(10.0);
 }
 
 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
@@ -1558,9 +1559,9 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost,
     enc = ost->st->codec;
     if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
         frame_number = ost->frame_number;
-        fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);
+        fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
         if (enc->flags&CODEC_FLAG_PSNR)
-            fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
+            fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
 
         fprintf(vstats_file,"f_size= %6d ", frame_size);
         /* compute pts value */
@@ -1568,10 +1569,10 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost,
         if (ti1 < 0.01)
             ti1 = 0.01;
 
-        bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
+        bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
         avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
         fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
-            (double)video_size / 1024, ti1, bitrate, avg_bitrate);
+               (double)video_size / 1024, ti1, bitrate, avg_bitrate);
         fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
     }
 }
@@ -1610,19 +1611,19 @@ static void print_report(OutputFile *output_files,
 
     total_size = avio_size(oc->pb);
     if (total_size < 0) { // FIXME improve avio_size() so it works with non seekable output too
-        total_size= avio_tell(oc->pb);
+        total_size = avio_tell(oc->pb);
         if (total_size < 0)
             total_size = 0;
     }
 
     buf[0] = '\0';
     vid = 0;
-    for(i=0;i<nb_ostreams;i++) {
+    for (i = 0; i < nb_ostreams; i++) {
         float q = -1;
         ost = &ost_table[i];
         enc = ost->st->codec;
         if (!ost->stream_copy && enc->coded_frame)
-            q = enc->coded_frame->quality/(float)FF_QP2LAMBDA;
+            q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
         if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
         }
@@ -1631,37 +1632,38 @@ static void print_report(OutputFile *output_files,
 
             frame_number = ost->frame_number;
             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
-                     frame_number, (t>1)?(int)(frame_number/t+0.5) : 0, q);
-            if(is_last_report)
+                     frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
+            if (is_last_report)
                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
-            if(qp_hist){
+            if (qp_hist) {
                 int j;
                 int qp = lrintf(q);
-                if(qp>=0 && qp<FF_ARRAY_ELEMS(qp_histogram))
+                if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
                     qp_histogram[qp]++;
-                for(j=0; j<32; j++)
-                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j]+1)/log(2)));
+                for (j = 0; j < 32; j++)
+                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
             }
-            if (enc->flags&CODEC_FLAG_PSNR){
+            if (enc->flags&CODEC_FLAG_PSNR) {
                 int j;
-                double error, error_sum=0;
-                double scale, scale_sum=0;
-                char type[3]= {'Y','U','V'};
+                double error, error_sum = 0;
+                double scale, scale_sum = 0;
+                char type[3] = { 'Y','U','V' };
                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
-                for(j=0; j<3; j++){
-                    if(is_last_report){
-                        error= enc->error[j];
-                        scale= enc->width*enc->height*255.0*255.0*frame_number;
-                    }else{
-                        error= enc->coded_frame->error[j];
-                        scale= enc->width*enc->height*255.0*255.0;
+                for (j = 0; j < 3; j++) {
+                    if (is_last_report) {
+                        error = enc->error[j];
+                        scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
+                    } else {
+                        error = enc->coded_frame->error[j];
+                        scale = enc->width * enc->height * 255.0 * 255.0;
                     }
-                    if(j) scale/=4;
+                    if (j)
+                        scale /= 4;
                     error_sum += error;
                     scale_sum += scale;
-                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
+                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
                 }
-                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
+                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
             }
             vid = 1;
         }
@@ -1699,10 +1701,10 @@ static void print_report(OutputFile *output_files,
         int64_t raw= audio_size + video_size + extra_size;
         av_log(NULL, AV_LOG_INFO, "\n");
         av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
-               video_size/1024.0,
-               audio_size/1024.0,
-               extra_size/1024.0,
-               100.0*(total_size - raw)/raw
+               video_size / 1024.0,
+               audio_size / 1024.0,
+               extra_size / 1024.0,
+               100.0 * (total_size - raw) / raw
         );
         if(video_size + audio_size + extra_size == 0){
             av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
@@ -1722,16 +1724,16 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
         if (!ost->encoding_needed)
             continue;
 
-        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
+        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
             continue;
         if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
             continue;
 
-        for(;;) {
+        for (;;) {
             AVPacket pkt;
             int fifo_bytes;
             av_init_packet(&pkt);
-            pkt.stream_index= ost->index;
+            pkt.stream_index = ost->index;
 
             switch (ost->st->codec->codec_type) {
             case AVMEDIA_TYPE_AUDIO:
@@ -1765,7 +1767,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
                     exit_program(1);
                 }
                 audio_size += ret;
-                pkt.flags |= AV_PKT_FLAG_KEY;
+                pkt.flags  |= AV_PKT_FLAG_KEY;
                 break;
             case AVMEDIA_TYPE_VIDEO:
                 ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
@@ -1774,14 +1776,14 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
                     exit_program(1);
                 }
                 video_size += ret;
-                if(enc->coded_frame && enc->coded_frame->key_frame)
+                if (enc->coded_frame && enc->coded_frame->key_frame)
                     pkt.flags |= AV_PKT_FLAG_KEY;
                 if (ost->logfile && enc->stats_out) {
                     fprintf(ost->logfile, "%s", enc->stats_out);
                 }
                 break;
             default:
-                ret=-1;
+                ret = -1;
             }
 
             if (ret <= 0)
@@ -1789,7 +1791,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
             pkt.data = bit_buffer;
             pkt.size = ret;
             if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
-                pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+                pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
             write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
         }
     }
@@ -1811,7 +1813,7 @@ static int check_output_constraints(InputStream *ist, OutputStream *ost)
 
     if (of->recording_time != INT64_MAX &&
         av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time,
-                      (AVRational){1, 1000000}) >= 0) {
+                      (AVRational){ 1, 1000000 }) >= 0) {
         ost->is_past_recording_time = 1;
         return 0;
     }
@@ -1855,8 +1857,8 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
     opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
     opkt.flags    = pkt->flags;
 
-    //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
-    if(   ost->st->codec->codec_id != CODEC_ID_H264
+    // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
+    if (  ost->st->codec->codec_id != CODEC_ID_H264
        && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
        && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
        ) {
@@ -2186,15 +2188,15 @@ static int output_packet(InputStream *ist,
         avpkt = *pkt;
     }
 
-    if(pkt->dts != AV_NOPTS_VALUE){
-        if(ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
+    if (pkt->dts != AV_NOPTS_VALUE) {
+        if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
             ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
         pkt_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
     }
     if(pkt->pts != AV_NOPTS_VALUE)
         pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
 
-    //while we have more to decode or while the decoder did output something on EOF
+    // while we have more to decode or while the decoder did output something on EOF
     while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
     handle_eof:
 
@@ -2206,7 +2208,7 @@ static int output_packet(InputStream *ist,
             ist->showed_multi_packet_warning = 1;
         }
 
-        switch(ist->st->codec->codec_type) {
+        switch (ist->st->codec->codec_type) {
         case AVMEDIA_TYPE_AUDIO:
             ret = transcode_audio    (ist, &avpkt, &got_output);
             break;
@@ -2251,7 +2253,7 @@ static int output_packet(InputStream *ist,
             if (pkt->duration) {
                 ist->next_pts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
             } else if(ist->st->codec->time_base.num != 0) {
-                int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
+                int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
                 ist->next_pts += ((int64_t)AV_TIME_BASE *
                                   ist->st->codec->time_base.num * ticks) /
                                   ist->st->codec->time_base.den;
@@ -2275,7 +2277,7 @@ static void print_sdp(OutputFile *output_files, int n)
 {
     char sdp[2048];
     int i;
-    AVFormatContext **avc = av_malloc(sizeof(*avc)*n);
+    AVFormatContext **avc = av_malloc(sizeof(*avc) * n);
 
     if (!avc)
         exit_program(1);
@@ -2316,7 +2318,7 @@ static int init_input_stream(int ist_index, OutputStream *output_streams, int nb
         assert_avoptions(ist->opts);
     }
 
-    ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames*AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
+    ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
     ist->next_pts = AV_NOPTS_VALUE;
     ist->is_start = 1;
 
@@ -2420,9 +2422,9 @@ static int transcode_init(OutputFile *output_files, int nb_output_files,
             av_reduce(&codec->time_base.num, &codec->time_base.den,
                         codec->time_base.num, codec->time_base.den, INT_MAX);
 
-            switch(codec->codec_type) {
+            switch (codec->codec_type) {
             case AVMEDIA_TYPE_AUDIO:
-                if(audio_volume != 256) {
+                if (audio_volume != 256) {
                     av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
                     exit_program(1);
                 }
@@ -2464,7 +2466,7 @@ static int transcode_init(OutputFile *output_files, int nb_output_files,
             ist->decoding_needed = 1;
             ost->encoding_needed = 1;
 
-            switch(codec->codec_type) {
+            switch (codec->codec_type) {
             case AVMEDIA_TYPE_AUDIO:
                 ost->fifo = av_fifo_alloc(1024);
                 if (!ost->fifo) {
@@ -2473,7 +2475,7 @@ static int transcode_init(OutputFile *output_files, int nb_output_files,
                 if (!codec->sample_rate)
                     codec->sample_rate = icodec->sample_rate;
                 choose_sample_rate(ost->st, ost->enc);
-                codec->time_base = (AVRational){1, codec->sample_rate};
+                codec->time_base = (AVRational){ 1, codec->sample_rate };
 
                 if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
                     codec->sample_fmt = icodec->sample_fmt;
@@ -2529,7 +2531,7 @@ static int transcode_init(OutputFile *output_files, int nb_output_files,
                                       codec->height  != icodec->height ||
                                       codec->pix_fmt != icodec->pix_fmt;
                 if (ost->video_resample) {
-                    codec->bits_per_raw_sample= frame_bits_per_raw_sample;
+                    codec->bits_per_raw_sample = frame_bits_per_raw_sample;
                 }
 
                 ost->resample_height  = icodec->height;
@@ -2537,13 +2539,13 @@ static int transcode_init(OutputFile *output_files, int nb_output_files,
                 ost->resample_pix_fmt = icodec->pix_fmt;
 
                 if (!ost->frame_rate.num)
-                    ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25,1};
+                    ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational) { 25, 1 };
                 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
                     int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
                     ost->frame_rate = ost->enc->supported_framerates[idx];
                 }
                 codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
-                if(   av_q2d(codec->time_base) < 0.001 && video_sync_method
+                if (   av_q2d(codec->time_base) < 0.001 && video_sync_method
                    && (video_sync_method==1 || (video_sync_method<0 && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
                     av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not effciciently supporting it.\n"
                                                "Please consider specifiying a lower framerate, a different muxer or -vsync 2\n");
@@ -2596,7 +2598,7 @@ static int transcode_init(OutputFile *output_files, int nb_output_files,
                 }
             }
         }
-        if(codec->codec_type == AVMEDIA_TYPE_VIDEO){
+        if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
             /* maximum video buffer size is 6-bytes per pixel, plus DPX header size (1664)*/
             int size        = codec->width * codec->height;
             bit_buffer_size = FFMAX(bit_buffer_size, 7*size + 10000);
@@ -2754,7 +2756,7 @@ static int transcode(OutputFile *output_files, int nb_output_files,
     OutputStream *ost;
     InputStream *ist;
     uint8_t *no_packet;
-    int no_packet_count=0;
+    int no_packet_count = 0;
     int64_t timer_start;
     int key;
 
@@ -2771,15 +2773,15 @@ static int transcode(OutputFile *output_files, int nb_output_files,
 
     timer_start = av_gettime();
 
-    for(; received_sigterm == 0;) {
+    for (; received_sigterm == 0;) {
         int file_index, ist_index;
         AVPacket pkt;
         int64_t ipts_min;
         double opts_min;
         int64_t cur_time= av_gettime();
 
-        ipts_min= INT64_MAX;
-        opts_min= 1e100;
+        ipts_min = INT64_MAX;
+        opts_min = 1e100;
         /* if 'q' pressed, exits */
         if (!using_stdin) {
             static int64_t last_time;
@@ -2889,14 +2891,15 @@ static int transcode(OutputFile *output_files, int nb_output_files,
                 continue;
             opts = ost->st->pts.val * av_q2d(ost->st->time_base);
             ipts = ist->pts;
-            if (!input_files[ist->file_index].eof_reached){
-                if(ipts < ipts_min) {
+            if (!input_files[ist->file_index].eof_reached) {
+                if (ipts < ipts_min) {
                     ipts_min = ipts;
-                    if(input_sync ) file_index = ist->file_index;
+                    if (input_sync)
+                        file_index = ist->file_index;
                 }
-                if(opts < opts_min) {
+                if (opts < opts_min) {
                     opts_min = opts;
-                    if(!input_sync) file_index = ist->file_index;
+                    if (!input_sync) file_index = ist->file_index;
                 }
             }
             if (ost->frame_number >= ost->max_frames) {
@@ -2908,8 +2911,8 @@ static int transcode(OutputFile *output_files, int nb_output_files,
         }
         /* if none, if is finished */
         if (file_index < 0) {
-            if(no_packet_count){
-                no_packet_count=0;
+            if (no_packet_count) {
+                no_packet_count = 0;
                 memset(no_packet, 0, nb_input_files);
                 usleep(10000);
                 continue;
@@ -2918,10 +2921,10 @@ static int transcode(OutputFile *output_files, int nb_output_files,
         }
 
         /* read a frame from it and output it in the fifo */
-        is = input_files[file_index].ctx;
-        ret= av_read_frame(is, &pkt);
-        if(ret == AVERROR(EAGAIN)){
-            no_packet[file_index]=1;
+        is  = input_files[file_index].ctx;
+        ret = av_read_frame(is, &pkt);
+        if (ret == AVERROR(EAGAIN)) {
+            no_packet[file_index] = 1;
             no_packet_count++;
             continue;
         }
@@ -2933,7 +2936,7 @@ static int transcode(OutputFile *output_files, int nb_output_files,
                 continue;
         }
 
-        no_packet_count=0;
+        no_packet_count = 0;
         memset(no_packet, 0, nb_input_files);
 
         if (do_pkt_dump) {
@@ -2954,30 +2957,34 @@ static int transcode(OutputFile *output_files, int nb_output_files,
         if (pkt.pts != AV_NOPTS_VALUE)
             pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
 
-        if(pkt.pts != AV_NOPTS_VALUE)
+        if (pkt.pts != AV_NOPTS_VALUE)
             pkt.pts *= ist->ts_scale;
-        if(pkt.dts != AV_NOPTS_VALUE)
+        if (pkt.dts != AV_NOPTS_VALUE)
             pkt.dts *= ist->ts_scale;
 
-//        fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files[ist->file_index].ts_offset, ist->st->codec->codec_type);
+        //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
+        //        ist->next_pts,
+        //        pkt.dts, input_files[ist->file_index].ts_offset,
+        //        ist->st->codec->codec_type);
         if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE
             && (is->iformat->flags & AVFMT_TS_DISCONT)) {
-            int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
-            int64_t delta= pkt_dts - ist->next_pts;
+            int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
+            int64_t delta   = pkt_dts - ist->next_pts;
             if((delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
                 (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
                  ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
                 pkt_dts+1<ist->pts)&& !copy_ts){
                 input_files[ist->file_index].ts_offset -= delta;
-                av_log(NULL, AV_LOG_DEBUG, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
+                av_log(NULL, AV_LOG_DEBUG,
+                       "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                        delta, input_files[ist->file_index].ts_offset);
                 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
-                if(pkt.pts != AV_NOPTS_VALUE)
+                if (pkt.pts != AV_NOPTS_VALUE)
                     pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
             }
         }
 
-        //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
+        // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
         if (output_packet(ist, output_streams, nb_output_streams, &pkt) < 0) {
 
             av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
@@ -3007,7 +3014,7 @@ static int transcode(OutputFile *output_files, int nb_output_files,
     term_exit();
 
     /* write the trailer if needed and close file */
-    for(i=0;i<nb_output_files;i++) {
+    for (i = 0; i < nb_output_files; i++) {
         os = output_files[i].ctx;
         av_write_trailer(os);
     }
@@ -3090,7 +3097,7 @@ static double parse_frame_aspect_ratio(const char *arg)
     if (p) {
         x = strtol(arg, &end, 10);
         if (end == p)
-            y = strtol(end+1, &end, 10);
+            y = strtol(end + 1, &end, 10);
         if (x > 0 && y > 0)
             ar = (double)x / (double)y;
     } else
@@ -3416,11 +3423,11 @@ static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int e
     codec = encoder ?
         avcodec_find_encoder_by_name(name) :
         avcodec_find_decoder_by_name(name);
-    if(!codec) {
+    if (!codec) {
         av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
         exit_program(1);
     }
-    if(codec->type != type) {
+    if (codec->type != type) {
         av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
         exit_program(1);
     }
@@ -3476,10 +3483,10 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
 
         switch (dec->codec_type) {
         case AVMEDIA_TYPE_AUDIO:
-            if(!ist->dec)
+            if (!ist->dec)
                 ist->dec = avcodec_find_decoder(dec->codec_id);
-            if(o->audio_disable)
-                st->discard= AVDISCARD_ALL;
+            if (o->audio_disable)
+                st->discard = AVDISCARD_ALL;
             break;
         case AVMEDIA_TYPE_VIDEO:
             if(!ist->dec)
@@ -3490,7 +3497,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
                 dec->flags |= CODEC_FLAG_EMU_EDGE;
             }
 
-            if (dec->time_base.den != rfps*dec->ticks_per_frame || dec->time_base.num != rfps_base) {
+            if (dec->time_base.den != rfps * dec->ticks_per_frame || dec->time_base.num != rfps_base) {
 
                 av_log(NULL, AV_LOG_INFO,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
                        i, (float)dec->time_base.den / dec->time_base.num, dec->time_base.den, dec->time_base.num,
@@ -3498,9 +3505,9 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
             }
 
             if (o->video_disable)
-                st->discard= AVDISCARD_ALL;
-            else if(video_discard)
-                st->discard= video_discard;
+                st->discard = AVDISCARD_ALL;
+            else if (video_discard)
+                st->discard = video_discard;
             break;
         case AVMEDIA_TYPE_DATA:
             if (o->data_disable)
@@ -3721,7 +3728,7 @@ static void parse_forced_key_frames(char *kf, OutputStream *ost)
         if (*p == ',')
             n++;
     ost->forced_kf_count = n;
-    ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
+    ost->forced_kf_pts   = av_malloc(sizeof(*ost->forced_kf_pts) * n);
     if (!ost->forced_kf_pts) {
         av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
         exit_program(1);
@@ -3817,8 +3824,8 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
                                 nb_output_streams + 1);
     ost = &output_streams[nb_output_streams - 1];
     ost->file_index = nb_output_files;
-    ost->index = idx;
-    ost->st    = st;
+    ost->index      = idx;
+    ost->st         = st;
     st->codec->codec_type = type;
     choose_encoder(o, oc, ost);
     if (ost->enc) {
@@ -3898,12 +3905,12 @@ static void parse_matrix_coeffs(uint16_t *dest, const char *str)
 {
     int i;
     const char *p = str;
-    for(i = 0;; i++) {
+    for (i = 0;; i++) {
         dest[i] = atoi(p);
-        if(i == 63)
+        if (i == 63)
             break;
         p = strchr(p, ',');
-        if(!p) {
+        if (!p) {
             av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
             exit_program(1);
         }
@@ -3972,34 +3979,34 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
         }
 
         MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
-        for(i=0; p; i++){
+        for (i = 0; p; i++) {
             int start, end, q;
-            int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
-            if(e!=3){
+            int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
+            if (e != 3) {
                 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
                 exit_program(1);
             }
             /* FIXME realloc failure */
-            video_enc->rc_override=
+            video_enc->rc_override =
                 av_realloc(video_enc->rc_override,
-                           sizeof(RcOverride)*(i+1));
-            video_enc->rc_override[i].start_frame= start;
-            video_enc->rc_override[i].end_frame  = end;
-            if(q>0){
-                video_enc->rc_override[i].qscale= q;
-                video_enc->rc_override[i].quality_factor= 1.0;
+                           sizeof(RcOverride) * (i + 1));
+            video_enc->rc_override[i].start_frame = start;
+            video_enc->rc_override[i].end_frame   = end;
+            if (q > 0) {
+                video_enc->rc_override[i].qscale         = q;
+                video_enc->rc_override[i].quality_factor = 1.0;
             }
-            else{
-                video_enc->rc_override[i].qscale= 0;
-                video_enc->rc_override[i].quality_factor= -q/100.0;
+            else {
+                video_enc->rc_override[i].qscale         = 0;
+                video_enc->rc_override[i].quality_factor = -q/100.0;
             }
-            p= strchr(p, '/');
-            if(p) p++;
+            p = strchr(p, '/');
+            if (p) p++;
         }
-        video_enc->rc_override_count=i;
+        video_enc->rc_override_count = i;
         if (!video_enc->rc_initial_buffer_occupancy)
-            video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size*3/4;
-        video_enc->intra_dc_precision= intra_dc_precision - 8;
+            video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
+        video_enc->intra_dc_precision = intra_dc_precision - 8;
 
         if (do_psnr)
             video_enc->flags|= CODEC_FLAG_PSNR;
@@ -4150,7 +4157,7 @@ static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
     for (i = 0; i < is->nb_chapters; i++) {
         AVChapter *in_ch = is->chapters[i], *out_ch;
         int64_t ts_off   = av_rescale_q(ofile->start_time - ifile->ts_offset,
-                                      AV_TIME_BASE_Q, in_ch->time_base);
+                                       AV_TIME_BASE_Q, in_ch->time_base);
         int64_t rt       = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
                            av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
 
@@ -4343,7 +4350,7 @@ static void opt_output_file(void *optctx, const char *filename)
             }
 
             ost->source_index = input_files[map->file_index].ist_index + map->stream_index;
-            ost->sync_ist = &input_streams[input_files[map->sync_file_index].ist_index +
+            ost->sync_ist     = &input_streams[input_files[map->sync_file_index].ist_index +
                                            map->sync_stream_index];
             ist->discard = 0;
         }
@@ -4618,36 +4625,36 @@ static int opt_help(const char *opt, const char *arg)
 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
 {
     enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
-    static const char *const frame_rates[] = {"25", "30000/1001", "24000/1001"};
+    static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
 
-    if(!strncmp(arg, "pal-", 4)) {
+    if (!strncmp(arg, "pal-", 4)) {
         norm = PAL;
         arg += 4;
-    } else if(!strncmp(arg, "ntsc-", 5)) {
+    } else if (!strncmp(arg, "ntsc-", 5)) {
         norm = NTSC;
         arg += 5;
-    } else if(!strncmp(arg, "film-", 5)) {
+    } else if (!strncmp(arg, "film-", 5)) {
         norm = FILM;
         arg += 5;
     } else {
         /* Try to determine PAL/NTSC by peeking in the input files */
-        if(nb_input_files) {
+        if (nb_input_files) {
             int i, j, fr;
             for (j = 0; j < nb_input_files; j++) {
                 for (i = 0; i < input_files[j].nb_streams; i++) {
                     AVCodecContext *c = input_files[j].ctx->streams[i]->codec;
-                    if(c->codec_type != AVMEDIA_TYPE_VIDEO)
+                    if (c->codec_type != AVMEDIA_TYPE_VIDEO)
                         continue;
                     fr = c->time_base.den * 1000 / c->time_base.num;
-                    if(fr == 25000) {
+                    if (fr == 25000) {
                         norm = PAL;
                         break;
-                    } else if((fr == 29970) || (fr == 23976)) {
+                    } else if ((fr == 29970) || (fr == 23976)) {
                         norm = NTSC;
                         break;
                     }
                 }
-                if(norm != UNKNOWN)
+                if (norm != UNKNOWN)
                     break;
             }
         }
@@ -4655,14 +4662,14 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
             av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
     }
 
-    if(norm == UNKNOWN) {
+    if (norm == UNKNOWN) {
         av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
         av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
         av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
         exit_program(1);
     }
 
-    if(!strcmp(arg, "vcd")) {
+    if (!strcmp(arg, "vcd")) {
         opt_video_codec(o, "c:v", "mpeg1video");
         opt_audio_codec(o, "c:a", "mp2");
         parse_option(o, "f", "vcd", options);
@@ -4688,8 +4695,8 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
            and the first pack from the other stream, respectively, may also have
            been written before.
            So the real data starts at SCR 36000+3*1200. */
-        o->mux_preload = (36000+3*1200) / 90000.0; //0.44
-    } else if(!strcmp(arg, "svcd")) {
+        o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
+    } else if (!strcmp(arg, "svcd")) {
 
         opt_video_codec(o, "c:v", "mpeg2video");
         opt_audio_codec(o, "c:a", "mp2");
@@ -4702,8 +4709,8 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
 
         opt_default("b:v", "2040000");
         opt_default("maxrate", "2516000");
-        opt_default("minrate", "0"); //1145000;
-        opt_default("bufsize", "1835008"); //224*1024*8;
+        opt_default("minrate", "0"); // 1145000;
+        opt_default("bufsize", "1835008"); // 224*1024*8;
         opt_default("flags", "+scan_offset");
 
 
@@ -4712,7 +4719,7 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
 
         opt_default("packetsize", "2324");
 
-    } else if(!strcmp(arg, "dvd")) {
+    } else if (!strcmp(arg, "dvd")) {
 
         opt_video_codec(o, "c:v", "mpeg2video");
         opt_audio_codec(o, "c:a", "ac3");
@@ -4725,8 +4732,8 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
 
         opt_default("b:v", "6000000");
         opt_default("maxrate", "9000000");
-        opt_default("minrate", "0"); //1500000;
-        opt_default("bufsize", "1835008"); //224*1024*8;
+        opt_default("minrate", "0"); // 1500000;
+        opt_default("bufsize", "1835008"); // 224*1024*8;
 
         opt_default("packetsize", "2048");  // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
         opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
@@ -4734,7 +4741,7 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
         opt_default("b:a", "448000");
         parse_option(o, "ar", "48000", options);
 
-    } else if(!strncmp(arg, "dv", 2)) {
+    } else if (!strncmp(arg, "dv", 2)) {
 
         parse_option(o, "f", "dv", options);
 
@@ -4756,7 +4763,7 @@ static int opt_target(OptionsContext *o, const char *opt, const char *arg)
 static int opt_vstats_file(const char *opt, const char *arg)
 {
     av_free (vstats_filename);
-    vstats_filename=av_strdup (arg);
+    vstats_filename = av_strdup (arg);
     return 0;
 }
 
@@ -5041,7 +5048,7 @@ int main(int argc, char **argv)
     /* parse options */
     parse_options(&o, argc, argv, options, opt_output_file);
 
-    if(nb_output_files <= 0 && nb_input_files == 0) {
+    if (nb_output_files <= 0 && nb_input_files == 0) {
         show_usage();
         av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
         exit_program(1);
diff --git a/libavcodec/indeo4.c b/libavcodec/indeo4.c
index 8f6d2eab65ed40cbca33c40cfca16ae3f885297e..e816330e530d6a34d2f004994aaa673073ef215e 100644
--- a/libavcodec/indeo4.c
+++ b/libavcodec/indeo4.c
@@ -160,7 +160,7 @@ static inline int scale_tile_size(int def_size, int size_factor)
  */
 static int decode_pic_hdr(IVI4DecContext *ctx, AVCodecContext *avctx)
 {
-    int             pic_size_indx, val, i, p;
+    int             pic_size_indx, i, p;
     IVIPicConfig    pic_conf;
 
     if (get_bits(&ctx->gb, 18) != 0x3FFF8) {
@@ -301,7 +301,7 @@ static int decode_pic_hdr(IVI4DecContext *ctx, AVCodecContext *avctx)
     /* skip picture header extension if any */
     while (get_bits1(&ctx->gb)) {
         av_dlog(avctx, "Pic hdr extension encountered!\n");
-        val = get_bits(&ctx->gb, 8);
+        skip_bits(&ctx->gb, 8);
     }
 
     if (get_bits1(&ctx->gb)) {
@@ -325,7 +325,7 @@ static int decode_pic_hdr(IVI4DecContext *ctx, AVCodecContext *avctx)
 static int decode_band_hdr(IVI4DecContext *ctx, IVIBandDesc *band,
                            AVCodecContext *avctx)
 {
-    int plane, band_num, hdr_size, indx, transform_id, scan_indx;
+    int plane, band_num, indx, transform_id, scan_indx;
     int i;
 
     plane    = get_bits(&ctx->gb, 2);
@@ -337,7 +337,10 @@ static int decode_band_hdr(IVI4DecContext *ctx, IVIBandDesc *band,
 
     band->is_empty = get_bits1(&ctx->gb);
     if (!band->is_empty) {
-        hdr_size = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 16) : 4;
+        /* skip header size
+         * If header size is not given, header size is 4 bytes. */
+        if (get_bits1(&ctx->gb))
+            skip_bits(&ctx->gb, 16);
 
         band->is_halfpel = get_bits(&ctx->gb, 2);
         if (band->is_halfpel >= 2) {
diff --git a/libavcodec/libmp3lame.c b/libavcodec/libmp3lame.c
index b540362d5c62f23648c728b6a1ba927254cc11ab..1b736d03428ec8a42b51180365d78795bfc12ead 100644
--- a/libavcodec/libmp3lame.c
+++ b/libavcodec/libmp3lame.c
@@ -31,7 +31,7 @@
 #include "mpegaudio.h"
 #include <lame/lame.h>
 
-#define BUFFER_SIZE (7200 + 2*MPA_FRAME_SIZE + MPA_FRAME_SIZE/4)
+#define BUFFER_SIZE (7200 + 2 * MPA_FRAME_SIZE + MPA_FRAME_SIZE / 4)
 typedef struct Mp3AudioContext {
     AVClass *class;
     lame_global_flags *gfp;
@@ -62,17 +62,17 @@ static av_cold int MP3lame_encode_init(AVCodecContext *avctx)
     lame_set_in_samplerate(s->gfp, avctx->sample_rate);
     lame_set_out_samplerate(s->gfp, avctx->sample_rate);
     lame_set_num_channels(s->gfp, avctx->channels);
-    if(avctx->compression_level == FF_COMPRESSION_DEFAULT) {
+    if (avctx->compression_level == FF_COMPRESSION_DEFAULT) {
         lame_set_quality(s->gfp, 5);
     } else {
         lame_set_quality(s->gfp, avctx->compression_level);
     }
     lame_set_mode(s->gfp, s->stereo ? JOINT_STEREO : MONO);
-    lame_set_brate(s->gfp, avctx->bit_rate/1000);
-    if(avctx->flags & CODEC_FLAG_QSCALE) {
+    lame_set_brate(s->gfp, avctx->bit_rate / 1000);
+    if (avctx->flags & CODEC_FLAG_QSCALE) {
         lame_set_brate(s->gfp, 0);
         lame_set_VBR(s->gfp, vbr_default);
-        lame_set_VBR_quality(s->gfp, avctx->global_quality/(float)FF_QP2LAMBDA);
+        lame_set_VBR_quality(s->gfp, avctx->global_quality / (float)FF_QP2LAMBDA);
     }
     lame_set_bWriteVbrTag(s->gfp,0);
 #if FF_API_LAME_GLOBAL_OPTS
@@ -82,14 +82,14 @@ static av_cold int MP3lame_encode_init(AVCodecContext *avctx)
     if (lame_init_params(s->gfp) < 0)
         goto err_close;
 
-    avctx->frame_size = lame_get_framesize(s->gfp);
+    avctx->frame_size             = lame_get_framesize(s->gfp);
 
     if(!(avctx->coded_frame= avcodec_alloc_frame())) {
         lame_close(s->gfp);
 
         return AVERROR(ENOMEM);
     }
-    avctx->coded_frame->key_frame= 1;
+    avctx->coded_frame->key_frame = 1;
 
     if(AV_SAMPLE_FMT_S32 == avctx->sample_fmt && s->stereo) {
         int nelem = 2 * avctx->frame_size;
@@ -117,60 +117,62 @@ static const int sSampleRates[] = {
 };
 
 static const int sBitRates[2][3][15] = {
-    {   {  0, 32, 64, 96,128,160,192,224,256,288,320,352,384,416,448},
-        {  0, 32, 48, 56, 64, 80, 96,112,128,160,192,224,256,320,384},
-        {  0, 32, 40, 48, 56, 64, 80, 96,112,128,160,192,224,256,320}
+    {
+        { 0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448 },
+        { 0, 32, 48, 56, 64,  80,  96,  112, 128, 160, 192, 224, 256, 320, 384 },
+        { 0, 32, 40, 48, 56,  64,  80,  96,  112, 128, 160, 192, 224, 256, 320 }
     },
-    {   {  0, 32, 48, 56, 64, 80, 96,112,128,144,160,176,192,224,256},
-        {  0,  8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160},
-        {  0,  8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160}
+    {
+        { 0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256 },
+        { 0,  8, 16, 24, 32, 40, 48,  56,  64,  80,  96, 112, 128, 144, 160 },
+        { 0,  8, 16, 24, 32, 40, 48,  56,  64,  80,  96, 112, 128, 144, 160 }
     },
 };
 
-static const int sSamplesPerFrame[2][3] =
-{
-    {  384,     1152,    1152 },
-    {  384,     1152,     576 }
+static const int sSamplesPerFrame[2][3] = {
+    { 384, 1152, 1152 },
+    { 384, 1152,  576 }
 };
 
-static const int sBitsPerSlot[3] = {
-    32,
-    8,
-    8
-};
+static const int sBitsPerSlot[3] = { 32, 8, 8 };
 
 static int mp3len(void *data, int *samplesPerFrame, int *sampleRate)
 {
-    uint32_t header = AV_RB32(data);
-    int layerID = 3 - ((header >> 17) & 0x03);
-    int bitRateID = ((header >> 12) & 0x0f);
+    uint32_t header  = AV_RB32(data);
+    int layerID      = 3 - ((header >> 17) & 0x03);
+    int bitRateID    = ((header >> 12) & 0x0f);
     int sampleRateID = ((header >> 10) & 0x03);
-    int bitsPerSlot = sBitsPerSlot[layerID];
-    int isPadded = ((header >> 9) & 0x01);
-    static int const mode_tab[4]= {2,3,1,0};
-    int mode= mode_tab[(header >> 19) & 0x03];
-    int mpeg_id= mode>0;
+    int bitsPerSlot  = sBitsPerSlot[layerID];
+    int isPadded     = ((header >> 9) & 0x01);
+    static int const mode_tab[4] = { 2, 3, 1, 0 };
+    int mode    = mode_tab[(header >> 19) & 0x03];
+    int mpeg_id = mode > 0;
     int temp0, temp1, bitRate;
 
-    if ( (( header >> 21 ) & 0x7ff) != 0x7ff || mode == 3 || layerID==3 || sampleRateID==3) {
+    if (((header >> 21) & 0x7ff) != 0x7ff || mode == 3 || layerID == 3 ||
+        sampleRateID == 3) {
         return -1;
     }
 
-    if(!samplesPerFrame) samplesPerFrame= &temp0;
-    if(!sampleRate     ) sampleRate     = &temp1;
+    if (!samplesPerFrame)
+        samplesPerFrame = &temp0;
+    if (!sampleRate)
+        sampleRate      = &temp1;
 
-//    *isMono = ((header >>  6) & 0x03) == 0x03;
+    //*isMono = ((header >>  6) & 0x03) == 0x03;
 
-    *sampleRate = sSampleRates[sampleRateID]>>mode;
-    bitRate = sBitRates[mpeg_id][layerID][bitRateID] * 1000;
+    *sampleRate      = sSampleRates[sampleRateID] >> mode;
+    bitRate          = sBitRates[mpeg_id][layerID][bitRateID] * 1000;
     *samplesPerFrame = sSamplesPerFrame[mpeg_id][layerID];
-//av_log(NULL, AV_LOG_DEBUG, "sr:%d br:%d spf:%d l:%d m:%d\n", *sampleRate, bitRate, *samplesPerFrame, layerID, mode);
+    //av_log(NULL, AV_LOG_DEBUG,
+    //       "sr:%d br:%d spf:%d l:%d m:%d\n",
+    //       *sampleRate, bitRate, *samplesPerFrame, layerID, mode);
 
     return *samplesPerFrame * bitRate / (bitsPerSlot * *sampleRate) + isPadded;
 }
 
-static int MP3lame_encode_frame(AVCodecContext *avctx,
-                                unsigned char *frame, int buf_size, void *data)
+static int MP3lame_encode_frame(AVCodecContext *avctx, unsigned char *frame,
+                                int buf_size, void *data)
 {
     Mp3AudioContext *s = avctx->priv_data;
     int len;
@@ -178,7 +180,7 @@ static int MP3lame_encode_frame(AVCodecContext *avctx,
 
     /* lame 3.91 dies on '1-channel interleaved' data */
 
-    if(!data){
+    if (!data){
         lame_result= lame_encode_flush(
                 s->gfp,
                 s->buffer + s->buffer_index,
@@ -237,32 +239,35 @@ static int MP3lame_encode_frame(AVCodecContext *avctx,
         }
     }
 
-    if(lame_result < 0){
-        if(lame_result==-1) {
+    if (lame_result < 0) {
+        if (lame_result == -1) {
             /* output buffer too small */
-            av_log(avctx, AV_LOG_ERROR, "lame: output buffer too small (buffer index: %d, free bytes: %d)\n", s->buffer_index, BUFFER_SIZE - s->buffer_index);
+            av_log(avctx, AV_LOG_ERROR,
+                   "lame: output buffer too small (buffer index: %d, free bytes: %d)\n",
+                   s->buffer_index, BUFFER_SIZE - s->buffer_index);
         }
         return -1;
     }
 
     s->buffer_index += lame_result;
 
-    if(s->buffer_index<4)
+    if (s->buffer_index < 4)
         return 0;
 
-    len= mp3len(s->buffer, NULL, NULL);
-//av_log(avctx, AV_LOG_DEBUG, "in:%d packet-len:%d index:%d\n", avctx->frame_size, len, s->buffer_index);
-    if(len <= s->buffer_index){
+    len = mp3len(s->buffer, NULL, NULL);
+    //av_log(avctx, AV_LOG_DEBUG, "in:%d packet-len:%d index:%d\n",
+    //       avctx->frame_size, len, s->buffer_index);
+    if (len <= s->buffer_index) {
         memcpy(frame, s->buffer, len);
         s->buffer_index -= len;
 
-        memmove(s->buffer, s->buffer+len, s->buffer_index);
-            //FIXME fix the audio codec API, so we do not need the memcpy()
-/*for(i=0; i<len; i++){
-    av_log(avctx, AV_LOG_DEBUG, "%2X ", frame[i]);
-}*/
+        memmove(s->buffer, s->buffer + len, s->buffer_index);
+        // FIXME fix the audio codec API, so we do not need the memcpy()
+        /*for(i=0; i<len; i++) {
+            av_log(avctx, AV_LOG_DEBUG, "%2X ", frame[i]);
+        }*/
         return len;
-    }else
+    } else
         return 0;
 }
 
@@ -280,7 +285,7 @@ static av_cold int MP3lame_encode_close(AVCodecContext *avctx)
 #define OFFSET(x) offsetof(Mp3AudioContext, x)
 #define AE AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
 static const AVOption options[] = {
-    { "reservoir",      "Use bit reservoir.",   OFFSET(reservoir),  AV_OPT_TYPE_INT, { 1 }, 0, 1, AE },
+    { "reservoir", "Use bit reservoir.", OFFSET(reservoir), AV_OPT_TYPE_INT, { 1 }, 0, 1, AE },
     { NULL },
 };
 
@@ -292,20 +297,20 @@ static const AVClass libmp3lame_class = {
 };
 
 AVCodec ff_libmp3lame_encoder = {
-    .name           = "libmp3lame",
-    .type           = AVMEDIA_TYPE_AUDIO,
-    .id             = CODEC_ID_MP3,
-    .priv_data_size = sizeof(Mp3AudioContext),
-    .init           = MP3lame_encode_init,
-    .encode         = MP3lame_encode_frame,
-    .close          = MP3lame_encode_close,
-    .capabilities= CODEC_CAP_DELAY,
-    .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,
+    .name                  = "libmp3lame",
+    .type                  = AVMEDIA_TYPE_AUDIO,
+    .id                    = CODEC_ID_MP3,
+    .priv_data_size        = sizeof(Mp3AudioContext),
+    .init                  = MP3lame_encode_init,
+    .encode                = MP3lame_encode_frame,
+    .close                 = MP3lame_encode_close,
+    .capabilities          = CODEC_CAP_DELAY,
+    .sample_fmts           = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
 #if 2147483647 == INT_MAX
     AV_SAMPLE_FMT_S32,
 #endif
-    AV_SAMPLE_FMT_NONE},
-    .supported_samplerates= sSampleRates,
-    .long_name= NULL_IF_CONFIG_SMALL("libmp3lame MP3 (MPEG audio layer 3)"),
-    .priv_class     = &libmp3lame_class,
+                                                             AV_SAMPLE_FMT_NONE },
+    .supported_samplerates = sSampleRates,
+    .long_name             = NULL_IF_CONFIG_SMALL("libmp3lame MP3 (MPEG audio layer 3)"),
+    .priv_class            = &libmp3lame_class,
 };
diff --git a/libavcodec/libvorbis.c b/libavcodec/libvorbis.c
index 53993e37198d87fb4cc36227e488abc3334f75cb..504447750e5c8dc79aea1e3199b08763bc3f1568 100644
--- a/libavcodec/libvorbis.c
+++ b/libavcodec/libvorbis.c
@@ -37,63 +37,65 @@
 
 #define OGGVORBIS_FRAME_SIZE 64
 
-#define BUFFER_SIZE (1024*64)
+#define BUFFER_SIZE (1024 * 64)
 
 typedef struct OggVorbisContext {
     AVClass *av_class;
-    vorbis_info vi ;
-    vorbis_dsp_state vd ;
-    vorbis_block vb ;
+    vorbis_info vi;
+    vorbis_dsp_state vd;
+    vorbis_block vb;
     uint8_t buffer[BUFFER_SIZE];
     int buffer_index;
     int eof;
 
     /* decoder */
-    vorbis_comment vc ;
+    vorbis_comment vc;
     ogg_packet op;
 
     double iblock;
-} OggVorbisContext ;
+} OggVorbisContext;
 
-static const AVOption options[]={
-{"iblock", "Sets the impulse block bias", offsetof(OggVorbisContext, iblock), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, -15, 0, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_ENCODING_PARAM},
-{NULL}
+static const AVOption options[] = {
+    { "iblock", "Sets the impulse block bias", offsetof(OggVorbisContext, iblock), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -15, 0, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM },
+    { NULL }
 };
 static const AVClass class = { "libvorbis", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
 
-static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avccontext) {
-    OggVorbisContext *context = avccontext->priv_data ;
+static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avccontext)
+{
+    OggVorbisContext *context = avccontext->priv_data;
     double cfreq;
 
-    if(avccontext->flags & CODEC_FLAG_QSCALE) {
+    if (avccontext->flags & CODEC_FLAG_QSCALE) {
         /* variable bitrate */
-        if(vorbis_encode_setup_vbr(vi, avccontext->channels,
-                avccontext->sample_rate,
-                avccontext->global_quality / (float)FF_QP2LAMBDA / 10.0))
+        if (vorbis_encode_setup_vbr(vi, avccontext->channels,
+                                    avccontext->sample_rate,
+                                    avccontext->global_quality / (float)FF_QP2LAMBDA / 10.0))
             return -1;
     } else {
         int minrate = avccontext->rc_min_rate > 0 ? avccontext->rc_min_rate : -1;
         int maxrate = avccontext->rc_min_rate > 0 ? avccontext->rc_max_rate : -1;
 
         /* constant bitrate */
-        if(vorbis_encode_setup_managed(vi, avccontext->channels,
-                avccontext->sample_rate, minrate, avccontext->bit_rate, maxrate))
+        if (vorbis_encode_setup_managed(vi, avccontext->channels,
+                                        avccontext->sample_rate, minrate,
+                                        avccontext->bit_rate, maxrate))
             return -1;
 
         /* variable bitrate by estimate, disable slow rate management */
-        if(minrate == -1 && maxrate == -1)
-            if(vorbis_encode_ctl(vi, OV_ECTL_RATEMANAGE2_SET, NULL))
+        if (minrate == -1 && maxrate == -1)
+            if (vorbis_encode_ctl(vi, OV_ECTL_RATEMANAGE2_SET, NULL))
                 return -1;
     }
 
     /* cutoff frequency */
-    if(avccontext->cutoff > 0) {
+    if (avccontext->cutoff > 0) {
         cfreq = avccontext->cutoff / 1000.0;
-        if(vorbis_encode_ctl(vi, OV_ECTL_LOWPASS_SET, &cfreq))
+        if (vorbis_encode_ctl(vi, OV_ECTL_LOWPASS_SET, &cfreq))
             return -1;
     }
 
-    if(context->iblock){
+    if (context->iblock) {
         vorbis_encode_ctl(vi, OV_ECTL_IBLOCK_SET, &context->iblock);
     }
 
@@ -130,35 +132,39 @@ static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avcco
 }
 
 /* How many bytes are needed for a buffer of length 'l' */
-static int xiph_len(int l) { return (1 + l / 255 + l); }
+static int xiph_len(int l)
+{
+    return (1 + l / 255 + l);
+}
 
-static av_cold int oggvorbis_encode_init(AVCodecContext *avccontext) {
-    OggVorbisContext *context = avccontext->priv_data ;
+static av_cold int oggvorbis_encode_init(AVCodecContext *avccontext)
+{
+    OggVorbisContext *context = avccontext->priv_data;
     ogg_packet header, header_comm, header_code;
     uint8_t *p;
     unsigned int offset;
 
-    vorbis_info_init(&context->vi) ;
-    if(oggvorbis_init_encoder(&context->vi, avccontext) < 0) {
-        av_log(avccontext, AV_LOG_ERROR, "oggvorbis_encode_init: init_encoder failed\n") ;
-        return -1 ;
+    vorbis_info_init(&context->vi);
+    if (oggvorbis_init_encoder(&context->vi, avccontext) < 0) {
+        av_log(avccontext, AV_LOG_ERROR, "oggvorbis_encode_init: init_encoder failed\n");
+        return -1;
     }
-    vorbis_analysis_init(&context->vd, &context->vi) ;
-    vorbis_block_init(&context->vd, &context->vb) ;
+    vorbis_analysis_init(&context->vd, &context->vi);
+    vorbis_block_init(&context->vd, &context->vb);
 
     vorbis_comment_init(&context->vc);
-    vorbis_comment_add_tag(&context->vc, "encoder", LIBAVCODEC_IDENT) ;
+    vorbis_comment_add_tag(&context->vc, "encoder", LIBAVCODEC_IDENT);
 
     vorbis_analysis_headerout(&context->vd, &context->vc, &header,
-                                &header_comm, &header_code);
+                              &header_comm, &header_code);
 
-    avccontext->extradata_size=
+    avccontext->extradata_size =
         1 + xiph_len(header.bytes) + xiph_len(header_comm.bytes) +
         header_code.bytes;
     p = avccontext->extradata =
-      av_malloc(avccontext->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
-    p[0] = 2;
-    offset = 1;
+            av_malloc(avccontext->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+    p[0]    = 2;
+    offset  = 1;
     offset += av_xiphlacing(&p[offset], header.bytes);
     offset += av_xiphlacing(&p[offset], header_comm.bytes);
     memcpy(&p[offset], header.packet, header.bytes);
@@ -169,56 +175,57 @@ static av_cold int oggvorbis_encode_init(AVCodecContext *avccontext) {
     offset += header_code.bytes;
     assert(offset == avccontext->extradata_size);
 
-/*    vorbis_block_clear(&context->vb);
+#if 0
+    vorbis_block_clear(&context->vb);
     vorbis_dsp_clear(&context->vd);
-    vorbis_info_clear(&context->vi);*/
+    vorbis_info_clear(&context->vi);
+#endif
     vorbis_comment_clear(&context->vc);
 
-    avccontext->frame_size = OGGVORBIS_FRAME_SIZE ;
+    avccontext->frame_size = OGGVORBIS_FRAME_SIZE;
 
-    avccontext->coded_frame= avcodec_alloc_frame();
-    avccontext->coded_frame->key_frame= 1;
+    avccontext->coded_frame = avcodec_alloc_frame();
+    avccontext->coded_frame->key_frame = 1;
 
-    return 0 ;
+    return 0;
 }
 
-
 static int oggvorbis_encode_frame(AVCodecContext *avccontext,
                                   unsigned char *packets,
-                           int buf_size, void *data)
+                                  int buf_size, void *data)
 {
-    OggVorbisContext *context = avccontext->priv_data ;
-    ogg_packet op ;
-    signed short *audio = data ;
+    OggVorbisContext *context = avccontext->priv_data;
+    ogg_packet op;
+    signed short *audio = data;
     int l;
 
-    if(data) {
+    if (data) {
         const int samples = avccontext->frame_size;
-        float **buffer ;
+        float **buffer;
         int c, channels = context->vi.channels;
 
-        buffer = vorbis_analysis_buffer(&context->vd, samples) ;
+        buffer = vorbis_analysis_buffer(&context->vd, samples);
         for (c = 0; c < channels; c++) {
             int co = (channels > 8) ? c :
-                ff_vorbis_encoding_channel_layout_offsets[channels-1][c];
-            for(l = 0 ; l < samples ; l++)
-                buffer[c][l]=audio[l*channels+co]/32768.f;
+                     ff_vorbis_encoding_channel_layout_offsets[channels - 1][c];
+            for (l = 0; l < samples; l++)
+                buffer[c][l] = audio[l * channels + co] / 32768.f;
         }
-        vorbis_analysis_wrote(&context->vd, samples) ;
+        vorbis_analysis_wrote(&context->vd, samples);
     } else {
-        if(!context->eof)
-            vorbis_analysis_wrote(&context->vd, 0) ;
+        if (!context->eof)
+            vorbis_analysis_wrote(&context->vd, 0);
         context->eof = 1;
     }
 
-    while(vorbis_analysis_blockout(&context->vd, &context->vb) == 1) {
+    while (vorbis_analysis_blockout(&context->vd, &context->vb) == 1) {
         vorbis_analysis(&context->vb, NULL);
-        vorbis_bitrate_addblock(&context->vb) ;
+        vorbis_bitrate_addblock(&context->vb);
 
-        while(vorbis_bitrate_flushpacket(&context->vd, &op)) {
+        while (vorbis_bitrate_flushpacket(&context->vd, &op)) {
             /* i'd love to say the following line is a hack, but sadly it's
              * not, apparently the end of stream decision is in libogg. */
-            if(op.bytes==1 && op.e_o_s)
+            if (op.bytes == 1 && op.e_o_s)
                 continue;
             if (context->buffer_index + sizeof(ogg_packet) + op.bytes > BUFFER_SIZE) {
                 av_log(avccontext, AV_LOG_ERROR, "libvorbis: buffer overflow.");
@@ -232,13 +239,13 @@ static int oggvorbis_encode_frame(AVCodecContext *avccontext,
         }
     }
 
-    l=0;
-    if(context->buffer_index){
-        ogg_packet *op2= (ogg_packet*)context->buffer;
+    l = 0;
+    if (context->buffer_index) {
+        ogg_packet *op2 = (ogg_packet *)context->buffer;
         op2->packet = context->buffer + sizeof(ogg_packet);
 
-        l=  op2->bytes;
-        avccontext->coded_frame->pts= av_rescale_q(op2->granulepos, (AVRational){1, avccontext->sample_rate}, avccontext->time_base);
+        l = op2->bytes;
+        avccontext->coded_frame->pts = av_rescale_q(op2->granulepos, (AVRational) { 1, avccontext->sample_rate }, avccontext->time_base);
         //FIXME we should reorder the user supplied pts and not assume that they are spaced by 1/sample_rate
 
         if (l > buf_size) {
@@ -255,12 +262,12 @@ static int oggvorbis_encode_frame(AVCodecContext *avccontext,
     return l;
 }
 
-
-static av_cold int oggvorbis_encode_close(AVCodecContext *avccontext) {
-    OggVorbisContext *context = avccontext->priv_data ;
+static av_cold int oggvorbis_encode_close(AVCodecContext *avccontext)
+{
+    OggVorbisContext *context = avccontext->priv_data;
 /*  ogg_packet op ; */
 
-    vorbis_analysis_wrote(&context->vd, 0) ; /* notify vorbisenc this is EOF */
+    vorbis_analysis_wrote(&context->vd, 0);  /* notify vorbisenc this is EOF */
 
     vorbis_block_clear(&context->vb);
     vorbis_dsp_clear(&context->vd);
@@ -269,10 +276,9 @@ static av_cold int oggvorbis_encode_close(AVCodecContext *avccontext) {
     av_freep(&avccontext->coded_frame);
     av_freep(&avccontext->extradata);
 
-    return 0 ;
+    return 0;
 }
 
-
 AVCodec ff_libvorbis_encoder = {
     .name           = "libvorbis",
     .type           = AVMEDIA_TYPE_AUDIO,
@@ -282,7 +288,7 @@ AVCodec ff_libvorbis_encoder = {
     .encode         = oggvorbis_encode_frame,
     .close          = oggvorbis_encode_close,
     .capabilities   = CODEC_CAP_DELAY,
-    .sample_fmts    = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
+    .sample_fmts    = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
     .long_name      = NULL_IF_CONFIG_SMALL("libvorbis Vorbis"),
     .priv_class     = &class,
 };
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index 7595822de44d3ba235c7462727b1378280b9a677..77e251e00c2a78ce0c1426e2d1c815b0a164654c 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -1106,7 +1106,7 @@ static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
 
 static int estimate_best_b_count(MpegEncContext *s)
 {
-    AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
+    AVCodec *codec    = avcodec_find_encoder(s->avctx->codec_id);
     AVCodecContext *c = avcodec_alloc_context3(NULL);
     AVFrame input[FF_MAX_B_FRAMES + 2];
     const int scale = s->avctx->brd_scale;
@@ -1144,21 +1144,22 @@ static int estimate_best_b_count(MpegEncContext *s)
     if (avcodec_open2(c, codec, NULL) < 0)
         return -1;
 
-    for(i=0; i<s->max_b_frames+2; i++){
-        int ysize= c->width*c->height;
-        int csize= (c->width/2)*(c->height/2);
-        Picture pre_input, *pre_input_ptr= i ? s->input_picture[i-1] : s->next_picture_ptr;
+    for (i = 0; i < s->max_b_frames + 2; i++) {
+        int ysize = c->width * c->height;
+        int csize = (c->width / 2) * (c->height / 2);
+        Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
+                                                s->next_picture_ptr;
 
         avcodec_get_frame_defaults(&input[i]);
-        input[i].data[0]= av_malloc(ysize + 2*csize);
-        input[i].data[1]= input[i].data[0] + ysize;
-        input[i].data[2]= input[i].data[1] + csize;
-        input[i].linesize[0]= c->width;
-        input[i].linesize[1]=
-        input[i].linesize[2]= c->width/2;
+        input[i].data[0]     = av_malloc(ysize + 2 * csize);
+        input[i].data[1]     = input[i].data[0] + ysize;
+        input[i].data[2]     = input[i].data[1] + csize;
+        input[i].linesize[0] = c->width;
+        input[i].linesize[1] =
+        input[i].linesize[2] = c->width / 2;
 
-        if(pre_input_ptr && (!i || s->input_picture[i-1])) {
-            pre_input= *pre_input_ptr;
+        if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
+            pre_input = *pre_input_ptr;
 
             if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
                 pre_input.f.data[0] += INPLACE_OFFSET;
@@ -1166,45 +1167,54 @@ static int estimate_best_b_count(MpegEncContext *s)
                 pre_input.f.data[2] += INPLACE_OFFSET;
             }
 
-            s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.f.data[0], pre_input.f.linesize[0], c->width,      c->height);
-            s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.f.data[1], pre_input.f.linesize[1], c->width >> 1, c->height >> 1);
-            s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.f.data[2], pre_input.f.linesize[2], c->width >> 1, c->height >> 1);
+            s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
+                                 pre_input.f.data[0], pre_input.f.linesize[0],
+                                 c->width,      c->height);
+            s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
+                                 pre_input.f.data[1], pre_input.f.linesize[1],
+                                 c->width >> 1, c->height >> 1);
+            s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
+                                 pre_input.f.data[2], pre_input.f.linesize[2],
+                                 c->width >> 1, c->height >> 1);
         }
     }
 
-    for(j=0; j<s->max_b_frames+1; j++){
-        int64_t rd=0;
+    for (j = 0; j < s->max_b_frames + 1; j++) {
+        int64_t rd = 0;
 
-        if(!s->input_picture[j])
+        if (!s->input_picture[j])
             break;
 
-        c->error[0]= c->error[1]= c->error[2]= 0;
+        c->error[0] = c->error[1] = c->error[2] = 0;
 
-        input[0].pict_type= AV_PICTURE_TYPE_I;
-        input[0].quality= 1 * FF_QP2LAMBDA;
-        out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[0]);
-//        rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
+        input[0].pict_type = AV_PICTURE_TYPE_I;
+        input[0].quality   = 1 * FF_QP2LAMBDA;
+        out_size           = avcodec_encode_video(c, outbuf,
+                                                  outbuf_size, &input[0]);
+        //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
 
-        for(i=0; i<s->max_b_frames+1; i++){
-            int is_p= i % (j+1) == j || i==s->max_b_frames;
+        for (i = 0; i < s->max_b_frames + 1; i++) {
+            int is_p = i % (j + 1) == j || i == s->max_b_frames;
 
-            input[i+1].pict_type= is_p ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
-            input[i+1].quality= is_p ? p_lambda : b_lambda;
-            out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[i+1]);
+            input[i + 1].pict_type = is_p ?
+                                     AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
+            input[i + 1].quality   = is_p ? p_lambda : b_lambda;
+            out_size = avcodec_encode_video(c, outbuf, outbuf_size,
+                                            &input[i + 1]);
             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
         }
 
         /* get the delayed frames */
-        while(out_size){
+        while (out_size) {
             out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
         }
 
         rd += c->error[0] + c->error[1] + c->error[2];
 
-        if(rd < best_rd){
-            best_rd= rd;
-            best_b_count= j;
+        if (rd < best_rd) {
+            best_rd = rd;
+            best_b_count = j;
         }
     }
 
@@ -1212,43 +1222,50 @@ static int estimate_best_b_count(MpegEncContext *s)
     avcodec_close(c);
     av_freep(&c);
 
-    for(i=0; i<s->max_b_frames+2; i++){
+    for (i = 0; i < s->max_b_frames + 2; i++) {
         av_freep(&input[i].data[0]);
     }
 
     return best_b_count;
 }
 
-static int select_input_picture(MpegEncContext *s){
+static int select_input_picture(MpegEncContext *s)
+{
     int i;
 
-    for(i=1; i<MAX_PICTURE_COUNT; i++)
-        s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
-    s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
+    for (i = 1; i < MAX_PICTURE_COUNT; i++)
+        s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
+    s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
 
     /* set next picture type & ordering */
-    if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
-        if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
-            s->reordered_input_picture[0]= s->input_picture[0];
+    if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
+        if (/*s->picture_in_gop_number >= s->gop_size ||*/
+            s->next_picture_ptr == NULL || s->intra_only) {
+            s->reordered_input_picture[0] = s->input_picture[0];
             s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
-            s->reordered_input_picture[0]->f.coded_picture_number = s->coded_picture_number++;
-        }else{
+            s->reordered_input_picture[0]->f.coded_picture_number =
+                s->coded_picture_number++;
+        } else {
             int b_frames;
 
-            if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){
-                if(s->picture_in_gop_number < s->gop_size && skip_check(s, s->input_picture[0], s->next_picture_ptr)){
-                //FIXME check that te gop check above is +-1 correct
-//av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n", s->input_picture[0]->f.data[0], s->input_picture[0]->pts);
+            if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
+                if (s->picture_in_gop_number < s->gop_size &&
+                    skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
+                    // FIXME check that te gop check above is +-1 correct
+                    //av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n",
+                    //       s->input_picture[0]->f.data[0],
+                    //       s->input_picture[0]->pts);
 
                     if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
-                        for(i=0; i<4; i++)
+                        for (i = 0; i < 4; i++)
                             s->input_picture[0]->f.data[i] = NULL;
                         s->input_picture[0]->f.type = 0;
-                    }else{
-                        assert(   s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER
-                               || s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
+                    } else {
+                        assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
+                               s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
 
-                        s->avctx->release_buffer(s->avctx, (AVFrame*)s->input_picture[0]);
+                        s->avctx->release_buffer(s->avctx,
+                                                 (AVFrame *) s->input_picture[0]);
                     }
 
                     emms_c();
@@ -1258,14 +1275,14 @@ static int select_input_picture(MpegEncContext *s){
                 }
             }
 
-            if(s->flags&CODEC_FLAG_PASS2){
-                for(i=0; i<s->max_b_frames+1; i++){
+            if (s->flags & CODEC_FLAG_PASS2) {
+                for (i = 0; i < s->max_b_frames + 1; i++) {
                     int pict_num = s->input_picture[0]->f.display_picture_number + i;
 
-                    if(pict_num >= s->rc_context.num_entries)
+                    if (pict_num >= s->rc_context.num_entries)
                         break;
-                    if(!s->input_picture[i]){
-                        s->rc_context.entry[pict_num-1].new_pict_type = AV_PICTURE_TYPE_P;
+                    if (!s->input_picture[i]) {
+                        s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
                         break;
                     }
 
@@ -1274,121 +1291,141 @@ static int select_input_picture(MpegEncContext *s){
                 }
             }
 
-            if(s->avctx->b_frame_strategy==0){
-                b_frames= s->max_b_frames;
-                while(b_frames && !s->input_picture[b_frames]) b_frames--;
-            }else if(s->avctx->b_frame_strategy==1){
-                for(i=1; i<s->max_b_frames+1; i++){
-                    if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
-                        s->input_picture[i]->b_frame_score=
-                            get_intra_count(s, s->input_picture[i  ]->f.data[0],
-                                               s->input_picture[i-1]->f.data[0], s->linesize) + 1;
+            if (s->avctx->b_frame_strategy == 0) {
+                b_frames = s->max_b_frames;
+                while (b_frames && !s->input_picture[b_frames])
+                    b_frames--;
+            } else if (s->avctx->b_frame_strategy == 1) {
+                for (i = 1; i < s->max_b_frames + 1; i++) {
+                    if (s->input_picture[i] &&
+                        s->input_picture[i]->b_frame_score == 0) {
+                        s->input_picture[i]->b_frame_score =
+                            get_intra_count(s,
+                                            s->input_picture[i    ]->f.data[0],
+                                            s->input_picture[i - 1]->f.data[0],
+                                            s->linesize) + 1;
                     }
                 }
-                for(i=0; i<s->max_b_frames+1; i++){
-                    if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/s->avctx->b_sensitivity) break;
+                for (i = 0; i < s->max_b_frames + 1; i++) {
+                    if (s->input_picture[i] == NULL ||
+                        s->input_picture[i]->b_frame_score - 1 >
+                            s->mb_num / s->avctx->b_sensitivity)
+                        break;
                 }
 
-                b_frames= FFMAX(0, i-1);
+                b_frames = FFMAX(0, i - 1);
 
                 /* reset scores */
-                for(i=0; i<b_frames+1; i++){
-                    s->input_picture[i]->b_frame_score=0;
+                for (i = 0; i < b_frames + 1; i++) {
+                    s->input_picture[i]->b_frame_score = 0;
                 }
-            }else if(s->avctx->b_frame_strategy==2){
-                b_frames= estimate_best_b_count(s);
-            }else{
+            } else if (s->avctx->b_frame_strategy == 2) {
+                b_frames = estimate_best_b_count(s);
+            } else {
                 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
-                b_frames=0;
+                b_frames = 0;
             }
 
             emms_c();
-//static int b_count=0;
-//b_count+= b_frames;
-//av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
+            //static int b_count = 0;
+            //b_count += b_frames;
+            //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
 
-            for(i= b_frames - 1; i>=0; i--){
+            for (i = b_frames - 1; i >= 0; i--) {
                 int type = s->input_picture[i]->f.pict_type;
-                if(type && type != AV_PICTURE_TYPE_B)
-                    b_frames= i;
+                if (type && type != AV_PICTURE_TYPE_B)
+                    b_frames = i;
             }
-            if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B && b_frames == s->max_b_frames){
-                av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
+            if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
+                b_frames == s->max_b_frames) {
+                av_log(s->avctx, AV_LOG_ERROR,
+                       "warning, too many b frames in a row\n");
             }
 
-            if(s->picture_in_gop_number + b_frames >= s->gop_size){
-              if((s->flags2 & CODEC_FLAG2_STRICT_GOP) && s->gop_size > s->picture_in_gop_number){
-                    b_frames= s->gop_size - s->picture_in_gop_number - 1;
-              }else{
-                if(s->flags & CODEC_FLAG_CLOSED_GOP)
-                    b_frames=0;
-                s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
-              }
+            if (s->picture_in_gop_number + b_frames >= s->gop_size) {
+                if ((s->flags2 & CODEC_FLAG2_STRICT_GOP) &&
+                    s->gop_size > s->picture_in_gop_number) {
+                    b_frames = s->gop_size - s->picture_in_gop_number - 1;
+                } else {
+                    if (s->flags & CODEC_FLAG_CLOSED_GOP)
+                        b_frames = 0;
+                    s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
+                }
             }
 
-            if(   (s->flags & CODEC_FLAG_CLOSED_GOP)
-               && b_frames
-               && s->input_picture[b_frames]->f.pict_type== AV_PICTURE_TYPE_I)
+            if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
+                s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
                 b_frames--;
 
-            s->reordered_input_picture[0]= s->input_picture[b_frames];
+            s->reordered_input_picture[0] = s->input_picture[b_frames];
             if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
                 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
-            s->reordered_input_picture[0]->f.coded_picture_number = s->coded_picture_number++;
-            for(i=0; i<b_frames; i++){
+            s->reordered_input_picture[0]->f.coded_picture_number =
+                s->coded_picture_number++;
+            for (i = 0; i < b_frames; i++) {
                 s->reordered_input_picture[i + 1] = s->input_picture[i];
-                s->reordered_input_picture[i + 1]->f.pict_type = AV_PICTURE_TYPE_B;
-                s->reordered_input_picture[i + 1]->f.coded_picture_number = s->coded_picture_number++;
+                s->reordered_input_picture[i + 1]->f.pict_type =
+                    AV_PICTURE_TYPE_B;
+                s->reordered_input_picture[i + 1]->f.coded_picture_number =
+                    s->coded_picture_number++;
             }
         }
     }
 no_output_pic:
-    if(s->reordered_input_picture[0]){
-        s->reordered_input_picture[0]->f.reference = s->reordered_input_picture[0]->f.pict_type!=AV_PICTURE_TYPE_B ? 3 : 0;
+    if (s->reordered_input_picture[0]) {
+        s->reordered_input_picture[0]->f.reference =
+           s->reordered_input_picture[0]->f.pict_type !=
+               AV_PICTURE_TYPE_B ? 3 : 0;
 
         ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
 
-        if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size) {
-            // input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable
+        if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
+            s->avctx->rc_buffer_size) {
+            // input is a shared pix, so we can't modifiy it -> alloc a new
+            // one & ensure that the shared one is reuseable
 
             Picture *pic;
-            int i= ff_find_unused_picture(s, 0);
+            int i = ff_find_unused_picture(s, 0);
             if (i < 0)
                 return i;
             pic = &s->picture[i];
 
             pic->f.reference = s->reordered_input_picture[0]->f.reference;
-            if(ff_alloc_picture(s, pic, 0) < 0){
+            if (ff_alloc_picture(s, pic, 0) < 0) {
                 return -1;
             }
 
             /* mark us unused / free shared pic */
             if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
-                s->avctx->release_buffer(s->avctx, (AVFrame*)s->reordered_input_picture[0]);
-            for(i=0; i<4; i++)
+                s->avctx->release_buffer(s->avctx,
+                                         (AVFrame *) s->reordered_input_picture[0]);
+            for (i = 0; i < 4; i++)
                 s->reordered_input_picture[0]->f.data[i] = NULL;
             s->reordered_input_picture[0]->f.type = 0;
 
-            copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
+            copy_picture_attributes(s, (AVFrame *) pic,
+                                    (AVFrame *) s->reordered_input_picture[0]);
 
-            s->current_picture_ptr= pic;
-        }else{
+            s->current_picture_ptr = pic;
+        } else {
             // input is not a shared pix -> reuse buffer for current_pix
 
-            assert(   s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_USER
-                   || s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
+            assert(s->reordered_input_picture[0]->f.type ==
+                       FF_BUFFER_TYPE_USER ||
+                   s->reordered_input_picture[0]->f.type ==
+                       FF_BUFFER_TYPE_INTERNAL);
 
-            s->current_picture_ptr= s->reordered_input_picture[0];
-            for(i=0; i<4; i++){
+            s->current_picture_ptr = s->reordered_input_picture[0];
+            for (i = 0; i < 4; i++) {
                 s->new_picture.f.data[i] += INPLACE_OFFSET;
             }
         }
         ff_copy_picture(&s->current_picture, s->current_picture_ptr);
 
         s->picture_number = s->new_picture.f.display_picture_number;
-//printf("dpn:%d\n", s->picture_number);
-    }else{
-       memset(&s->new_picture, 0, sizeof(Picture));
+        //printf("dpn:%d\n", s->picture_number);
+    } else {
+        memset(&s->new_picture, 0, sizeof(Picture));
     }
     return 0;
 }
@@ -1397,33 +1434,34 @@ int MPV_encode_picture(AVCodecContext *avctx,
                        unsigned char *buf, int buf_size, void *data)
 {
     MpegEncContext *s = avctx->priv_data;
-    AVFrame *pic_arg = data;
+    AVFrame *pic_arg  = data;
     int i, stuffing_count, context_count = avctx->thread_count;
 
-    for(i=0; i<context_count; i++){
-        int start_y= s->thread_context[i]->start_mb_y;
-        int   end_y= s->thread_context[i]->  end_mb_y;
-        int h= s->mb_height;
-        uint8_t *start= buf + (size_t)(((int64_t) buf_size)*start_y/h);
-        uint8_t *end  = buf + (size_t)(((int64_t) buf_size)*  end_y/h);
+    for (i = 0; i < context_count; i++) {
+        int start_y = s->thread_context[i]->start_mb_y;
+        int   end_y = s->thread_context[i]->  end_mb_y;
+        int h       = s->mb_height;
+        uint8_t *start = buf + (size_t)(((int64_t) buf_size) * start_y / h);
+        uint8_t *end   = buf + (size_t)(((int64_t) buf_size) *   end_y / h);
 
         init_put_bits(&s->thread_context[i]->pb, start, end - start);
     }
 
     s->picture_in_gop_number++;
 
-    if(load_input_picture(s, pic_arg) < 0)
+    if (load_input_picture(s, pic_arg) < 0)
         return -1;
 
-    if(select_input_picture(s) < 0){
+    if (select_input_picture(s) < 0) {
         return -1;
     }
 
     /* output? */
     if (s->new_picture.f.data[0]) {
         s->pict_type = s->new_picture.f.pict_type;
-//emms_c();
-//printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
+        //emms_c();
+        //printf("qs:%f %f %d\n", s->new_picture.quality,
+        //       s->current_picture.quality, s->qscale);
         MPV_frame_start(s, avctx);
 vbv_retry:
         if (encode_picture(s, s->picture_number) < 0)
@@ -1435,7 +1473,8 @@ vbv_retry:
         avctx->i_tex_bits  = s->i_tex_bits;
         avctx->p_tex_bits  = s->p_tex_bits;
         avctx->i_count     = s->i_count;
-        avctx->p_count     = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
+        // FIXME f/b_count in avctx
+        avctx->p_count     = s->mb_num - s->i_count - s->skip_count;
         avctx->skip_count  = s->skip_count;
 
         MPV_frame_end(s);
@@ -1443,29 +1482,37 @@ vbv_retry:
         if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
             ff_mjpeg_encode_picture_trailer(s);
 
-        if(avctx->rc_buffer_size){
-            RateControlContext *rcc= &s->rc_context;
-            int max_size= rcc->buffer_index * avctx->rc_max_available_vbv_use;
+        if (avctx->rc_buffer_size) {
+            RateControlContext *rcc = &s->rc_context;
+            int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
 
-            if(put_bits_count(&s->pb) > max_size && s->lambda < s->avctx->lmax){
-                s->next_lambda= FFMAX(s->lambda+1, s->lambda*(s->qscale+1) / s->qscale);
-                if(s->adaptive_quant){
+            if (put_bits_count(&s->pb) > max_size &&
+                s->lambda < s->avctx->lmax) {
+                s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
+                                       (s->qscale + 1) / s->qscale);
+                if (s->adaptive_quant) {
                     int i;
-                    for(i=0; i<s->mb_height*s->mb_stride; i++)
-                        s->lambda_table[i]= FFMAX(s->lambda_table[i]+1, s->lambda_table[i]*(s->qscale+1) / s->qscale);
+                    for (i = 0; i < s->mb_height * s->mb_stride; i++)
+                        s->lambda_table[i] =
+                            FFMAX(s->lambda_table[i] + 1,
+                                  s->lambda_table[i] * (s->qscale + 1) /
+                                  s->qscale);
                 }
-                s->mb_skipped = 0;        //done in MPV_frame_start()
-                if(s->pict_type==AV_PICTURE_TYPE_P){ //done in encode_picture() so we must undo it
-                    if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
+                s->mb_skipped = 0;        // done in MPV_frame_start()
+                // done in encode_picture() so we must undo it
+                if (s->pict_type == AV_PICTURE_TYPE_P) {
+                    if (s->flipflop_rounding          ||
+                        s->codec_id == CODEC_ID_H263P ||
+                        s->codec_id == CODEC_ID_MPEG4)
                         s->no_rounding ^= 1;
                 }
-                if(s->pict_type!=AV_PICTURE_TYPE_B){
-                    s->time_base= s->last_time_base;
-                    s->last_non_b_time= s->time - s->pp_time;
+                if (s->pict_type != AV_PICTURE_TYPE_B) {
+                    s->time_base       = s->last_time_base;
+                    s->last_non_b_time = s->time - s->pp_time;
                 }
-//                av_log(NULL, AV_LOG_ERROR, "R:%d ", s->next_lambda);
-                for(i=0; i<context_count; i++){
-                    PutBitContext *pb= &s->thread_context[i]->pb;
+                //av_log(NULL, AV_LOG_ERROR, "R:%d ", s->next_lambda);
+                for (i = 0; i < context_count; i++) {
+                    PutBitContext *pb = &s->thread_context[i]->pb;
                     init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
                 }
                 goto vbv_retry;
@@ -1474,30 +1521,33 @@ vbv_retry:
             assert(s->avctx->rc_max_rate);
         }
 
-        if(s->flags&CODEC_FLAG_PASS1)
+        if (s->flags & CODEC_FLAG_PASS1)
             ff_write_pass1_stats(s);
 
-        for(i=0; i<4; i++){
-            s->current_picture_ptr->f.error[i]  = s->current_picture.f.error[i];
-            avctx->error[i]                        += s->current_picture_ptr->f.error[i];
+        for (i = 0; i < 4; i++) {
+            s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
+            avctx->error[i] += s->current_picture_ptr->f.error[i];
         }
 
-        if(s->flags&CODEC_FLAG_PASS1)
-            assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits + avctx->i_tex_bits + avctx->p_tex_bits == put_bits_count(&s->pb));
+        if (s->flags & CODEC_FLAG_PASS1)
+            assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
+                   avctx->i_tex_bits + avctx->p_tex_bits ==
+                       put_bits_count(&s->pb));
         flush_put_bits(&s->pb);
         s->frame_bits  = put_bits_count(&s->pb);
 
-        stuffing_count= ff_vbv_update(s, s->frame_bits);
-        if(stuffing_count){
-            if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < stuffing_count + 50){
+        stuffing_count = ff_vbv_update(s, s->frame_bits);
+        if (stuffing_count) {
+            if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
+                    stuffing_count + 50) {
                 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
                 return -1;
             }
 
-            switch(s->codec_id){
+            switch (s->codec_id) {
             case CODEC_ID_MPEG1VIDEO:
             case CODEC_ID_MPEG2VIDEO:
-                while(stuffing_count--){
+                while (stuffing_count--) {
                     put_bits(&s->pb, 8, 0);
                 }
             break;
@@ -1505,7 +1555,7 @@ vbv_retry:
                 put_bits(&s->pb, 16, 0);
                 put_bits(&s->pb, 16, 0x1C3);
                 stuffing_count -= 4;
-                while(stuffing_count--){
+                while (stuffing_count--) {
                     put_bits(&s->pb, 8, 0xFF);
                 }
             break;
@@ -1517,245 +1567,281 @@ vbv_retry:
         }
 
         /* update mpeg1/2 vbv_delay for CBR */
-        if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && s->out_format == FMT_MPEG1
-           && 90000LL * (avctx->rc_buffer_size-1) <= s->avctx->rc_max_rate*0xFFFFLL){
+        if (s->avctx->rc_max_rate                          &&
+            s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
+            s->out_format == FMT_MPEG1                     &&
+            90000LL * (avctx->rc_buffer_size - 1) <=
+                s->avctx->rc_max_rate * 0xFFFFLL) {
             int vbv_delay, min_delay;
-            double inbits = s->avctx->rc_max_rate*av_q2d(s->avctx->time_base);
-            int    minbits= s->frame_bits - 8*(s->vbv_delay_ptr - s->pb.buf - 1);
-            double bits   = s->rc_context.buffer_index + minbits - inbits;
+            double inbits  = s->avctx->rc_max_rate *
+                             av_q2d(s->avctx->time_base);
+            int    minbits = s->frame_bits - 8 *
+                             (s->vbv_delay_ptr - s->pb.buf - 1);
+            double bits    = s->rc_context.buffer_index + minbits - inbits;
 
-            if(bits<0)
-                av_log(s->avctx, AV_LOG_ERROR, "Internal error, negative bits\n");
+            if (bits < 0)
+                av_log(s->avctx, AV_LOG_ERROR,
+                       "Internal error, negative bits\n");
 
-            assert(s->repeat_first_field==0);
+            assert(s->repeat_first_field == 0);
 
-            vbv_delay=     bits * 90000                               / s->avctx->rc_max_rate;
-            min_delay= (minbits * 90000LL + s->avctx->rc_max_rate - 1)/ s->avctx->rc_max_rate;
+            vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
+            min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
+                        s->avctx->rc_max_rate;
 
-            vbv_delay= FFMAX(vbv_delay, min_delay);
+            vbv_delay = FFMAX(vbv_delay, min_delay);
 
             assert(vbv_delay < 0xFFFF);
 
             s->vbv_delay_ptr[0] &= 0xF8;
-            s->vbv_delay_ptr[0] |= vbv_delay>>13;
-            s->vbv_delay_ptr[1]  = vbv_delay>>5;
+            s->vbv_delay_ptr[0] |= vbv_delay >> 13;
+            s->vbv_delay_ptr[1]  = vbv_delay >> 5;
             s->vbv_delay_ptr[2] &= 0x07;
-            s->vbv_delay_ptr[2] |= vbv_delay<<3;
-            avctx->vbv_delay = vbv_delay*300;
+            s->vbv_delay_ptr[2] |= vbv_delay << 3;
+            avctx->vbv_delay     = vbv_delay * 300;
         }
-        s->total_bits += s->frame_bits;
+        s->total_bits     += s->frame_bits;
         avctx->frame_bits  = s->frame_bits;
-    }else{
+    } else {
         assert((put_bits_ptr(&s->pb) == s->pb.buf));
-        s->frame_bits=0;
+        s->frame_bits = 0;
     }
-    assert((s->frame_bits&7)==0);
+    assert((s->frame_bits & 7) == 0);
 
-    return s->frame_bits/8;
+    return s->frame_bits / 8;
 }
 
-static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
+static inline void dct_single_coeff_elimination(MpegEncContext *s,
+                                                int n, int threshold)
 {
-    static const char tab[64]=
-        {3,2,2,1,1,1,1,1,
-         1,1,1,1,1,1,1,1,
-         1,1,1,1,1,1,1,1,
-         0,0,0,0,0,0,0,0,
-         0,0,0,0,0,0,0,0,
-         0,0,0,0,0,0,0,0,
-         0,0,0,0,0,0,0,0,
-         0,0,0,0,0,0,0,0};
-    int score=0;
-    int run=0;
+    static const char tab[64] = {
+        3, 2, 2, 1, 1, 1, 1, 1,
+        1, 1, 1, 1, 1, 1, 1, 1,
+        1, 1, 1, 1, 1, 1, 1, 1,
+        0, 0, 0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0, 0, 0
+    };
+    int score = 0;
+    int run = 0;
     int i;
-    DCTELEM *block= s->block[n];
-    const int last_index= s->block_last_index[n];
+    DCTELEM *block = s->block[n];
+    const int last_index = s->block_last_index[n];
     int skip_dc;
 
-    if(threshold<0){
-        skip_dc=0;
-        threshold= -threshold;
-    }else
-        skip_dc=1;
+    if (threshold < 0) {
+        skip_dc = 0;
+        threshold = -threshold;
+    } else
+        skip_dc = 1;
 
     /* Are all we could set to zero already zero? */
-    if(last_index<=skip_dc - 1) return;
+    if (last_index <= skip_dc - 1)
+        return;
 
-    for(i=0; i<=last_index; i++){
+    for (i = 0; i <= last_index; i++) {
         const int j = s->intra_scantable.permutated[i];
         const int level = FFABS(block[j]);
-        if(level==1){
-            if(skip_dc && i==0) continue;
-            score+= tab[run];
-            run=0;
-        }else if(level>1){
+        if (level == 1) {
+            if (skip_dc && i == 0)
+                continue;
+            score += tab[run];
+            run = 0;
+        } else if (level > 1) {
             return;
-        }else{
+        } else {
             run++;
         }
     }
-    if(score >= threshold) return;
-    for(i=skip_dc; i<=last_index; i++){
+    if (score >= threshold)
+        return;
+    for (i = skip_dc; i <= last_index; i++) {
         const int j = s->intra_scantable.permutated[i];
-        block[j]=0;
+        block[j] = 0;
     }
-    if(block[0]) s->block_last_index[n]= 0;
-    else         s->block_last_index[n]= -1;
+    if (block[0])
+        s->block_last_index[n] = 0;
+    else
+        s->block_last_index[n] = -1;
 }
 
-static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
+static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
+                               int last_index)
 {
     int i;
-    const int maxlevel= s->max_qcoeff;
-    const int minlevel= s->min_qcoeff;
-    int overflow=0;
+    const int maxlevel = s->max_qcoeff;
+    const int minlevel = s->min_qcoeff;
+    int overflow = 0;
 
-    if(s->mb_intra){
-        i=1; //skip clipping of intra dc
-    }else
-        i=0;
+    if (s->mb_intra) {
+        i = 1; // skip clipping of intra dc
+    } else
+        i = 0;
 
-    for(;i<=last_index; i++){
-        const int j= s->intra_scantable.permutated[i];
+    for (; i <= last_index; i++) {
+        const int j = s->intra_scantable.permutated[i];
         int level = block[j];
 
-        if     (level>maxlevel){
-            level=maxlevel;
+        if (level > maxlevel) {
+            level = maxlevel;
             overflow++;
-        }else if(level<minlevel){
-            level=minlevel;
+        } else if (level < minlevel) {
+            level = minlevel;
             overflow++;
         }
 
-        block[j]= level;
+        block[j] = level;
     }
 
-    if(overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
-        av_log(s->avctx, AV_LOG_INFO, "warning, clipping %d dct coefficients to %d..%d\n", overflow, minlevel, maxlevel);
+    if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
+        av_log(s->avctx, AV_LOG_INFO,
+               "warning, clipping %d dct coefficients to %d..%d\n",
+               overflow, minlevel, maxlevel);
 }
 
-static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride){
+static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
+{
     int x, y;
-//FIXME optimize
-    for(y=0; y<8; y++){
-        for(x=0; x<8; x++){
+    // FIXME optimize
+    for (y = 0; y < 8; y++) {
+        for (x = 0; x < 8; x++) {
             int x2, y2;
-            int sum=0;
-            int sqr=0;
-            int count=0;
+            int sum = 0;
+            int sqr = 0;
+            int count = 0;
 
-            for(y2= FFMAX(y-1, 0); y2 < FFMIN(8, y+2); y2++){
-                for(x2= FFMAX(x-1, 0); x2 < FFMIN(8, x+2); x2++){
-                    int v= ptr[x2 + y2*stride];
+            for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
+                for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
+                    int v = ptr[x2 + y2 * stride];
                     sum += v;
-                    sqr += v*v;
+                    sqr += v * v;
                     count++;
                 }
             }
-            weight[x + 8*y]= (36*ff_sqrt(count*sqr - sum*sum)) / count;
+            weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
         }
     }
 }
 
-static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_count)
+static av_always_inline void encode_mb_internal(MpegEncContext *s,
+                                                int motion_x, int motion_y,
+                                                int mb_block_height,
+                                                int mb_block_count)
 {
     int16_t weight[8][64];
     DCTELEM orig[8][64];
-    const int mb_x= s->mb_x;
-    const int mb_y= s->mb_y;
+    const int mb_x = s->mb_x;
+    const int mb_y = s->mb_y;
     int i;
     int skip_dct[8];
-    int dct_offset   = s->linesize*8; //default for progressive frames
+    int dct_offset = s->linesize * 8; // default for progressive frames
     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
     int wrap_y, wrap_c;
 
-    for(i=0; i<mb_block_count; i++) skip_dct[i]=s->skipdct;
+    for (i = 0; i < mb_block_count; i++)
+        skip_dct[i] = s->skipdct;
 
-    if(s->adaptive_quant){
-        const int last_qp= s->qscale;
-        const int mb_xy= mb_x + mb_y*s->mb_stride;
+    if (s->adaptive_quant) {
+        const int last_qp = s->qscale;
+        const int mb_xy = mb_x + mb_y * s->mb_stride;
 
-        s->lambda= s->lambda_table[mb_xy];
+        s->lambda = s->lambda_table[mb_xy];
         update_qscale(s);
 
-        if(!(s->flags&CODEC_FLAG_QP_RD)){
+        if (!(s->flags & CODEC_FLAG_QP_RD)) {
             s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
-            s->dquant= s->qscale - last_qp;
+            s->dquant = s->qscale - last_qp;
 
-            if(s->out_format==FMT_H263){
-                s->dquant= av_clip(s->dquant, -2, 2);
+            if (s->out_format == FMT_H263) {
+                s->dquant = av_clip(s->dquant, -2, 2);
 
-                if(s->codec_id==CODEC_ID_MPEG4){
-                    if(!s->mb_intra){
-                        if(s->pict_type == AV_PICTURE_TYPE_B){
-                            if(s->dquant&1 || s->mv_dir&MV_DIRECT)
-                                s->dquant= 0;
+                if (s->codec_id == CODEC_ID_MPEG4) {
+                    if (!s->mb_intra) {
+                        if (s->pict_type == AV_PICTURE_TYPE_B) {
+                            if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
+                                s->dquant = 0;
                         }
-                        if(s->mv_type==MV_TYPE_8X8)
-                            s->dquant=0;
+                        if (s->mv_type == MV_TYPE_8X8)
+                            s->dquant = 0;
                     }
                 }
             }
         }
         ff_set_qscale(s, last_qp + s->dquant);
-    }else if(s->flags&CODEC_FLAG_QP_RD)
+    } else if (s->flags & CODEC_FLAG_QP_RD)
         ff_set_qscale(s, s->qscale + s->dquant);
 
     wrap_y = s->linesize;
     wrap_c = s->uvlinesize;
-    ptr_y  = s->new_picture.f.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
-    ptr_cb = s->new_picture.f.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
-    ptr_cr = s->new_picture.f.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
+    ptr_y  = s->new_picture.f.data[0] +
+             (mb_y * 16 * wrap_y)              + mb_x * 16;
+    ptr_cb = s->new_picture.f.data[1] +
+             (mb_y * mb_block_height * wrap_c) + mb_x * 8;
+    ptr_cr = s->new_picture.f.data[2] +
+             (mb_y * mb_block_height * wrap_c) + mb_x * 8;
 
     if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != CODEC_ID_AMV){
-        uint8_t *ebuf= s->edge_emu_buffer + 32;
-        s->dsp.emulated_edge_mc(ebuf            , ptr_y , wrap_y,16,16,mb_x*16,mb_y*16, s->width   , s->height);
-        ptr_y= ebuf;
-        s->dsp.emulated_edge_mc(ebuf+18*wrap_y  , ptr_cb, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
-        ptr_cb= ebuf+18*wrap_y;
-        s->dsp.emulated_edge_mc(ebuf+18*wrap_y+8, ptr_cr, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
-        ptr_cr= ebuf+18*wrap_y+8;
+        uint8_t *ebuf = s->edge_emu_buffer + 32;
+        s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
+                                mb_y * 16, s->width, s->height);
+        ptr_y = ebuf;
+        s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
+                                mb_block_height, mb_x * 8, mb_y * 8,
+                                s->width >> 1, s->height >> 1);
+        ptr_cb = ebuf + 18 * wrap_y;
+        s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
+                                mb_block_height, mb_x * 8, mb_y * 8,
+                                s->width >> 1, s->height >> 1);
+        ptr_cr = ebuf + 18 * wrap_y + 8;
     }
 
     if (s->mb_intra) {
-        if(s->flags&CODEC_FLAG_INTERLACED_DCT){
+        if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
             int progressive_score, interlaced_score;
 
-            s->interlaced_dct=0;
-            progressive_score= s->dsp.ildct_cmp[4](s, ptr_y           , NULL, wrap_y, 8)
-                              +s->dsp.ildct_cmp[4](s, ptr_y + wrap_y*8, NULL, wrap_y, 8) - 400;
-
-            if(progressive_score > 0){
-                interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y           , NULL, wrap_y*2, 8)
-                                  +s->dsp.ildct_cmp[4](s, ptr_y + wrap_y  , NULL, wrap_y*2, 8);
-                if(progressive_score > interlaced_score){
-                    s->interlaced_dct=1;
-
-                    dct_offset= wrap_y;
-                    wrap_y<<=1;
+            s->interlaced_dct = 0;
+            progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
+                                                    NULL, wrap_y, 8) +
+                                s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
+                                                    NULL, wrap_y, 8) - 400;
+
+            if (progressive_score > 0) {
+                interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
+                                                       NULL, wrap_y * 2, 8) +
+                                   s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
+                                                       NULL, wrap_y * 2, 8);
+                if (progressive_score > interlaced_score) {
+                    s->interlaced_dct = 1;
+
+                    dct_offset = wrap_y;
+                    wrap_y <<= 1;
                     if (s->chroma_format == CHROMA_422)
-                        wrap_c<<=1;
+                        wrap_c <<= 1;
                 }
             }
         }
 
-        s->dsp.get_pixels(s->block[0], ptr_y                 , wrap_y);
-        s->dsp.get_pixels(s->block[1], ptr_y              + 8, wrap_y);
-        s->dsp.get_pixels(s->block[2], ptr_y + dct_offset    , wrap_y);
-        s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
+        s->dsp.get_pixels(s->block[0], ptr_y                  , wrap_y);
+        s->dsp.get_pixels(s->block[1], ptr_y              + 8 , wrap_y);
+        s->dsp.get_pixels(s->block[2], ptr_y + dct_offset     , wrap_y);
+        s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
 
-        if(s->flags&CODEC_FLAG_GRAY){
-            skip_dct[4]= 1;
-            skip_dct[5]= 1;
-        }else{
+        if (s->flags & CODEC_FLAG_GRAY) {
+            skip_dct[4] = 1;
+            skip_dct[5] = 1;
+        } else {
             s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
             s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
-            if(!s->chroma_y_shift){ /* 422 */
-                s->dsp.get_pixels(s->block[6], ptr_cb + (dct_offset>>1), wrap_c);
-                s->dsp.get_pixels(s->block[7], ptr_cr + (dct_offset>>1), wrap_c);
+            if (!s->chroma_y_shift) { /* 422 */
+                s->dsp.get_pixels(s->block[6],
+                                  ptr_cb + (dct_offset >> 1), wrap_c);
+                s->dsp.get_pixels(s->block[7],
+                                  ptr_cr + (dct_offset >> 1), wrap_c);
             }
         }
-    }else{
+    } else {
         op_pixels_func (*op_pix)[4];
         qpel_mc_func (*op_qpix)[16];
         uint8_t *dest_y, *dest_cb, *dest_cr;
@@ -1764,146 +1850,197 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
         dest_cb = s->dest[1];
         dest_cr = s->dest[2];
 
-        if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
-            op_pix = s->dsp.put_pixels_tab;
-            op_qpix= s->dsp.put_qpel_pixels_tab;
-        }else{
-            op_pix = s->dsp.put_no_rnd_pixels_tab;
-            op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
+        if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
+            op_pix  = s->dsp.put_pixels_tab;
+            op_qpix = s->dsp.put_qpel_pixels_tab;
+        } else {
+            op_pix  = s->dsp.put_no_rnd_pixels_tab;
+            op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
         }
 
         if (s->mv_dir & MV_DIR_FORWARD) {
-            MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
-            op_pix = s->dsp.avg_pixels_tab;
-            op_qpix= s->dsp.avg_qpel_pixels_tab;
+            MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data,
+                       op_pix, op_qpix);
+            op_pix  = s->dsp.avg_pixels_tab;
+            op_qpix = s->dsp.avg_qpel_pixels_tab;
         }
         if (s->mv_dir & MV_DIR_BACKWARD) {
-            MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
+            MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data,
+                       op_pix, op_qpix);
         }
 
-        if(s->flags&CODEC_FLAG_INTERLACED_DCT){
+        if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
             int progressive_score, interlaced_score;
 
-            s->interlaced_dct=0;
-            progressive_score= s->dsp.ildct_cmp[0](s, dest_y           , ptr_y           , wrap_y, 8)
-                              +s->dsp.ildct_cmp[0](s, dest_y + wrap_y*8, ptr_y + wrap_y*8, wrap_y, 8) - 400;
-
-            if(s->avctx->ildct_cmp == FF_CMP_VSSE) progressive_score -= 400;
-
-            if(progressive_score>0){
-                interlaced_score = s->dsp.ildct_cmp[0](s, dest_y           , ptr_y           , wrap_y*2, 8)
-                                  +s->dsp.ildct_cmp[0](s, dest_y + wrap_y  , ptr_y + wrap_y  , wrap_y*2, 8);
-
-                if(progressive_score > interlaced_score){
-                    s->interlaced_dct=1;
-
-                    dct_offset= wrap_y;
-                    wrap_y<<=1;
+            s->interlaced_dct = 0;
+            progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
+                                                    ptr_y,              wrap_y,
+                                                    8) +
+                                s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
+                                                    ptr_y + wrap_y * 8, wrap_y,
+                                                    8) - 400;
+
+            if (s->avctx->ildct_cmp == FF_CMP_VSSE)
+                progressive_score -= 400;
+
+            if (progressive_score > 0) {
+                interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
+                                                       ptr_y,
+                                                       wrap_y * 2, 8) +
+                                   s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
+                                                       ptr_y + wrap_y,
+                                                       wrap_y * 2, 8);
+
+                if (progressive_score > interlaced_score) {
+                    s->interlaced_dct = 1;
+
+                    dct_offset = wrap_y;
+                    wrap_y <<= 1;
                     if (s->chroma_format == CHROMA_422)
-                        wrap_c<<=1;
+                        wrap_c <<= 1;
                 }
             }
         }
 
-        s->dsp.diff_pixels(s->block[0], ptr_y                 , dest_y                 , wrap_y);
-        s->dsp.diff_pixels(s->block[1], ptr_y              + 8, dest_y              + 8, wrap_y);
-        s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset    , dest_y + dct_offset    , wrap_y);
-        s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
+        s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
+        s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
+        s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
+                           dest_y + dct_offset, wrap_y);
+        s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
+                           dest_y + dct_offset + 8, wrap_y);
 
-        if(s->flags&CODEC_FLAG_GRAY){
-            skip_dct[4]= 1;
-            skip_dct[5]= 1;
-        }else{
+        if (s->flags & CODEC_FLAG_GRAY) {
+            skip_dct[4] = 1;
+            skip_dct[5] = 1;
+        } else {
             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
-            if(!s->chroma_y_shift){ /* 422 */
-                s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset>>1), dest_cb + (dct_offset>>1), wrap_c);
-                s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset>>1), dest_cr + (dct_offset>>1), wrap_c);
+            if (!s->chroma_y_shift) { /* 422 */
+                s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
+                                   dest_cb + (dct_offset >> 1), wrap_c);
+                s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
+                                   dest_cr + (dct_offset >> 1), wrap_c);
             }
         }
         /* pre quantization */
-        if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
-            //FIXME optimize
-            if(s->dsp.sad[1](NULL, ptr_y               , dest_y               , wrap_y, 8) < 20*s->qscale) skip_dct[0]= 1;
-            if(s->dsp.sad[1](NULL, ptr_y            + 8, dest_y            + 8, wrap_y, 8) < 20*s->qscale) skip_dct[1]= 1;
-            if(s->dsp.sad[1](NULL, ptr_y +dct_offset   , dest_y +dct_offset   , wrap_y, 8) < 20*s->qscale) skip_dct[2]= 1;
-            if(s->dsp.sad[1](NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1;
-            if(s->dsp.sad[1](NULL, ptr_cb              , dest_cb              , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1;
-            if(s->dsp.sad[1](NULL, ptr_cr              , dest_cr              , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1;
-            if(!s->chroma_y_shift){ /* 422 */
-                if(s->dsp.sad[1](NULL, ptr_cb +(dct_offset>>1), dest_cb +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[6]= 1;
-                if(s->dsp.sad[1](NULL, ptr_cr +(dct_offset>>1), dest_cr +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[7]= 1;
+        if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
+                2 * s->qscale * s->qscale) {
+            // FIXME optimize
+            if (s->dsp.sad[1](NULL, ptr_y , dest_y,
+                              wrap_y, 8) < 20 * s->qscale)
+                skip_dct[0] = 1;
+            if (s->dsp.sad[1](NULL, ptr_y + 8,
+                              dest_y + 8, wrap_y, 8) < 20 * s->qscale)
+                skip_dct[1] = 1;
+            if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
+                              dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
+                skip_dct[2] = 1;
+            if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
+                              dest_y + dct_offset + 8,
+                              wrap_y, 8) < 20 * s->qscale)
+                skip_dct[3] = 1;
+            if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
+                              wrap_c, 8) < 20 * s->qscale)
+                skip_dct[4] = 1;
+            if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
+                              wrap_c, 8) < 20 * s->qscale)
+                skip_dct[5] = 1;
+            if (!s->chroma_y_shift) { /* 422 */
+                if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
+                                  dest_cb + (dct_offset >> 1),
+                                  wrap_c, 8) < 20 * s->qscale)
+                    skip_dct[6] = 1;
+                if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
+                                  dest_cr + (dct_offset >> 1),
+                                  wrap_c, 8) < 20 * s->qscale)
+                    skip_dct[7] = 1;
             }
         }
     }
 
-    if(s->avctx->quantizer_noise_shaping){
-        if(!skip_dct[0]) get_visual_weight(weight[0], ptr_y                 , wrap_y);
-        if(!skip_dct[1]) get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
-        if(!skip_dct[2]) get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
-        if(!skip_dct[3]) get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
-        if(!skip_dct[4]) get_visual_weight(weight[4], ptr_cb                , wrap_c);
-        if(!skip_dct[5]) get_visual_weight(weight[5], ptr_cr                , wrap_c);
-        if(!s->chroma_y_shift){ /* 422 */
-            if(!skip_dct[6]) get_visual_weight(weight[6], ptr_cb + (dct_offset>>1), wrap_c);
-            if(!skip_dct[7]) get_visual_weight(weight[7], ptr_cr + (dct_offset>>1), wrap_c);
+    if (s->avctx->quantizer_noise_shaping) {
+        if (!skip_dct[0])
+            get_visual_weight(weight[0], ptr_y                 , wrap_y);
+        if (!skip_dct[1])
+            get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
+        if (!skip_dct[2])
+            get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
+        if (!skip_dct[3])
+            get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
+        if (!skip_dct[4])
+            get_visual_weight(weight[4], ptr_cb                , wrap_c);
+        if (!skip_dct[5])
+            get_visual_weight(weight[5], ptr_cr                , wrap_c);
+        if (!s->chroma_y_shift) { /* 422 */
+            if (!skip_dct[6])
+                get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
+                                  wrap_c);
+            if (!skip_dct[7])
+                get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
+                                  wrap_c);
         }
-        memcpy(orig[0], s->block[0], sizeof(DCTELEM)*64*mb_block_count);
+        memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
     }
 
     /* DCT & quantize */
-    assert(s->out_format!=FMT_MJPEG || s->qscale==8);
+    assert(s->out_format != FMT_MJPEG || s->qscale == 8);
     {
-        for(i=0;i<mb_block_count;i++) {
-            if(!skip_dct[i]){
+        for (i = 0; i < mb_block_count; i++) {
+            if (!skip_dct[i]) {
                 int overflow;
                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
-            // FIXME we could decide to change to quantizer instead of clipping
-            // JS: I don't think that would be a good idea it could lower quality instead
-            //     of improve it. Just INTRADC clipping deserves changes in quantizer
-                if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
-            }else
-                s->block_last_index[i]= -1;
-        }
-        if(s->avctx->quantizer_noise_shaping){
-            for(i=0;i<mb_block_count;i++) {
-                if(!skip_dct[i]){
-                    s->block_last_index[i] = dct_quantize_refine(s, s->block[i], weight[i], orig[i], i, s->qscale);
+                // FIXME we could decide to change to quantizer instead of
+                // clipping
+                // JS: I don't think that would be a good idea it could lower
+                //     quality instead of improve it. Just INTRADC clipping
+                //     deserves changes in quantizer
+                if (overflow)
+                    clip_coeffs(s, s->block[i], s->block_last_index[i]);
+            } else
+                s->block_last_index[i] = -1;
+        }
+        if (s->avctx->quantizer_noise_shaping) {
+            for (i = 0; i < mb_block_count; i++) {
+                if (!skip_dct[i]) {
+                    s->block_last_index[i] =
+                        dct_quantize_refine(s, s->block[i], weight[i],
+                                            orig[i], i, s->qscale);
                 }
             }
         }
 
-        if(s->luma_elim_threshold && !s->mb_intra)
-            for(i=0; i<4; i++)
+        if (s->luma_elim_threshold && !s->mb_intra)
+            for (i = 0; i < 4; i++)
                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
-        if(s->chroma_elim_threshold && !s->mb_intra)
-            for(i=4; i<mb_block_count; i++)
+        if (s->chroma_elim_threshold && !s->mb_intra)
+            for (i = 4; i < mb_block_count; i++)
                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
 
-        if(s->flags & CODEC_FLAG_CBP_RD){
-            for(i=0;i<mb_block_count;i++) {
-                if(s->block_last_index[i] == -1)
-                    s->coded_score[i]= INT_MAX/256;
+        if (s->flags & CODEC_FLAG_CBP_RD) {
+            for (i = 0; i < mb_block_count; i++) {
+                if (s->block_last_index[i] == -1)
+                    s->coded_score[i] = INT_MAX / 256;
             }
         }
     }
 
-    if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
-        s->block_last_index[4]=
-        s->block_last_index[5]= 0;
-        s->block[4][0]=
-        s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
+    if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
+        s->block_last_index[4] =
+        s->block_last_index[5] = 0;
+        s->block[4][0] =
+        s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
     }
 
-    //non c quantize code returns incorrect block_last_index FIXME
-    if(s->alternate_scan && s->dct_quantize != dct_quantize_c){
-        for(i=0; i<mb_block_count; i++){
+    // non c quantize code returns incorrect block_last_index FIXME
+    if (s->alternate_scan && s->dct_quantize != dct_quantize_c) {
+        for (i = 0; i < mb_block_count; i++) {
             int j;
-            if(s->block_last_index[i]>0){
-                for(j=63; j>0; j--){
-                    if(s->block[i][ s->intra_scantable.permutated[j] ]) break;
+            if (s->block_last_index[i] > 0) {
+                for (j = 63; j > 0; j--) {
+                    if (s->block[i][s->intra_scantable.permutated[j]])
+                        break;
                 }
-                s->block_last_index[i]= j;
+                s->block_last_index[i] = j;
             }
         }
     }
@@ -1965,7 +2102,7 @@ static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext
     /* mpeg1 */
     d->mb_skip_run= s->mb_skip_run;
     for(i=0; i<3; i++)
-        d->last_dc[i]= s->last_dc[i];
+        d->last_dc[i] = s->last_dc[i];
 
     /* statistics */
     d->mv_bits= s->mv_bits;
@@ -1994,7 +2131,7 @@ static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *
     /* mpeg1 */
     d->mb_skip_run= s->mb_skip_run;
     for(i=0; i<3; i++)
-        d->last_dc[i]= s->last_dc[i];
+        d->last_dc[i] = s->last_dc[i];
 
     /* statistics */
     d->mv_bits= s->mv_bits;
diff --git a/libavformat/applehttp.c b/libavformat/applehttp.c
index 99febc6d95aa36fa11cc8f129ea9f897889ed17d..5733f5fb27222fcd25032c14045472bf937711b8 100644
--- a/libavformat/applehttp.c
+++ b/libavformat/applehttp.c
@@ -376,13 +376,23 @@ static int read_data(void *opaque, uint8_t *buf, int buf_size)
 
 restart:
     if (!v->input) {
-reload:
-        /* If this is a live stream and target_duration has elapsed since
+        /* If this is a live stream and the reload interval has elapsed since
          * the last playlist reload, reload the variant playlists now. */
+        int64_t reload_interval = v->n_segments > 0 ?
+                                  v->segments[v->n_segments - 1]->duration :
+                                  v->target_duration;
+        reload_interval *= 1000000;
+
+reload:
         if (!v->finished &&
-            av_gettime() - v->last_load_time >= v->target_duration*1000000 &&
-            (ret = parse_playlist(c, v->url, v, NULL)) < 0)
+            av_gettime() - v->last_load_time >= reload_interval) {
+            if ((ret = parse_playlist(c, v->url, v, NULL)) < 0)
                 return ret;
+            /* If we need to reload the playlist again below (if
+             * there's still no more segments), switch to a reload
+             * interval of half the target duration. */
+            reload_interval = v->target_duration * 500000;
+        }
         if (v->cur_seq_no < v->start_seq_no) {
             av_log(NULL, AV_LOG_WARNING,
                    "skipping %d segments ahead, expired from playlists\n",
@@ -392,8 +402,7 @@ reload:
         if (v->cur_seq_no >= v->start_seq_no + v->n_segments) {
             if (v->finished)
                 return AVERROR_EOF;
-            while (av_gettime() - v->last_load_time <
-                   v->target_duration*1000000) {
+            while (av_gettime() - v->last_load_time < reload_interval) {
                 if (ff_check_interrupt(c->interrupt_callback))
                     return AVERROR_EXIT;
                 usleep(100*1000);
diff --git a/libavformat/applehttpproto.c b/libavformat/applehttpproto.c
index 8295ccc5f5ea6a0615512aa2d88ddd99270f59cf..be04131642457e80df3f042b4db7443dabe044cf 100644
--- a/libavformat/applehttpproto.c
+++ b/libavformat/applehttpproto.c
@@ -244,6 +244,7 @@ static int applehttp_read(URLContext *h, uint8_t *buf, int size)
     AppleHTTPContext *s = h->priv_data;
     const char *url;
     int ret;
+    int64_t reload_interval;
 
 start:
     if (s->seg_hd) {
@@ -256,12 +257,21 @@ start:
         s->seg_hd = NULL;
         s->cur_seq_no++;
     }
+    reload_interval = s->n_segments > 0 ?
+                      s->segments[s->n_segments - 1]->duration :
+                      s->target_duration;
+    reload_interval *= 1000000;
 retry:
     if (!s->finished) {
         int64_t now = av_gettime();
-        if (now - s->last_load_time >= s->target_duration*1000000)
+        if (now - s->last_load_time >= reload_interval) {
             if ((ret = parse_playlist(h, s->playlisturl)) < 0)
                 return ret;
+            /* If we need to reload the playlist again below (if
+             * there's still no more segments), switch to a reload
+             * interval of half the target duration. */
+            reload_interval = s->target_duration * 500000;
+        }
     }
     if (s->cur_seq_no < s->start_seq_no) {
         av_log(h, AV_LOG_WARNING,
@@ -272,7 +282,7 @@ retry:
     if (s->cur_seq_no - s->start_seq_no >= s->n_segments) {
         if (s->finished)
             return AVERROR_EOF;
-        while (av_gettime() - s->last_load_time < s->target_duration*1000000) {
+        while (av_gettime() - s->last_load_time < reload_interval) {
             if (ff_check_interrupt(&h->interrupt_callback))
                 return AVERROR_EXIT;
             usleep(100*1000);
diff --git a/tests/Makefile b/tests/Makefile
index 04a29968a7ccc95bf09ee6ee8877a34a2f7373f9..417a8fde719818ad463b1200046185e2a076afa3 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -50,6 +50,7 @@ include $(SRC_PATH)/tests/fate/amrnb.mak
 include $(SRC_PATH)/tests/fate/amrwb.mak
 include $(SRC_PATH)/tests/fate/atrac.mak
 include $(SRC_PATH)/tests/fate/dct.mak
+include $(SRC_PATH)/tests/fate/dpcm.mak
 include $(SRC_PATH)/tests/fate/fft.mak
 include $(SRC_PATH)/tests/fate/h264.mak
 include $(SRC_PATH)/tests/fate/image.mak
@@ -62,6 +63,7 @@ include $(SRC_PATH)/tests/fate/lossless-video.mak
 include $(SRC_PATH)/tests/fate/microsoft.mak
 include $(SRC_PATH)/tests/fate/mp3.mak
 include $(SRC_PATH)/tests/fate/mpc.mak
+include $(SRC_PATH)/tests/fate/pcm.mak
 include $(SRC_PATH)/tests/fate/prores.mak
 include $(SRC_PATH)/tests/fate/qtrle.mak
 include $(SRC_PATH)/tests/fate/real.mak
diff --git a/tests/fate.mak b/tests/fate.mak
index e9b8e5deb97b9be1fa8b89d9b68db2d30788569e..6d9d6a00bee582c16c2d4ff58f7966bfb853ae07 100644
--- a/tests/fate.mak
+++ b/tests/fate.mak
@@ -6,10 +6,6 @@ FATE_TESTS += fate-8bps
 fate-8bps: CMD = framecrc  -i $(SAMPLES)/8bps/full9iron-partial.mov -pix_fmt rgb24
 FATE_TESTS += fate-aasc
 fate-aasc: CMD = framecrc  -i $(SAMPLES)/aasc/AASC-1.5MB.AVI -pix_fmt rgb24
-FATE_TESTS += fate-adpcm-ea-r2
-fate-adpcm-ea-r2: CMD = crc  -i $(SAMPLES)/ea-mpc/THX_logo.mpc -vn
-FATE_TESTS += fate-adpcm-ea-r3
-fate-adpcm-ea-r3: CMD = crc  -i $(SAMPLES)/ea-vp6/THX_logo.vp6 -vn
 FATE_TESTS += fate-adts-demux
 fate-adts-demux: CMD = crc  -i $(SAMPLES)/aac/ct_faac-adts.aac -acodec copy
 FATE_TESTS += fate-aea-demux
@@ -40,14 +36,6 @@ FATE_TESTS += fate-cljr
 fate-cljr: CMD = framecrc  -i $(SAMPLES)/cljr/testcljr-partial.avi
 FATE_TESTS += fate-corepng
 fate-corepng: CMD = framecrc  -i $(SAMPLES)/png1/corepng-partial.avi
-FATE_TESTS += fate-creative-adpcm
-fate-creative-adpcm: CMD = md5  -i $(SAMPLES)/creative/intro-partial.wav -f s16le
-FATE_TESTS += fate-creative-adpcm-8-2.6bit
-fate-creative-adpcm-8-2.6bit: CMD = md5  -i $(SAMPLES)/creative/BBC_3BIT.VOC -f s16le
-FATE_TESTS += fate-creative-adpcm-8-2bit
-fate-creative-adpcm-8-2bit: CMD = md5  -i $(SAMPLES)/creative/BBC_2BIT.VOC -f s16le
-FATE_TESTS += fate-creative-adpcm-8-4bit
-fate-creative-adpcm-8-4bit: CMD = md5  -i $(SAMPLES)/creative/BBC_4BIT.VOC -f s16le
 FATE_TESTS += fate-creatureshock-avs
 fate-creatureshock-avs: CMD = framecrc  -i $(SAMPLES)/creatureshock-avs/OUTATIME.AVS -pix_fmt rgb24
 FATE_TESTS += fate-cryo-apc
@@ -66,10 +54,6 @@ FATE_TESTS += fate-delphine-cin
 fate-delphine-cin: CMD = framecrc  -i $(SAMPLES)/delphine-cin/LOGO-partial.CIN -pix_fmt rgb24 -vsync 0
 FATE_TESTS += fate-deluxepaint-anm
 fate-deluxepaint-anm: CMD = framecrc  -i $(SAMPLES)/deluxepaint-anm/INTRO1.ANM -pix_fmt rgb24
-FATE_TESTS += fate-duck-dk3
-fate-duck-dk3: CMD = md5  -i $(SAMPLES)/duck/sop-audio-only.avi -f s16le
-FATE_TESTS += fate-duck-dk4
-fate-duck-dk4: CMD = md5  -i $(SAMPLES)/duck/salsa-audio-only.avi -f s16le
 FATE_TESTS += fate-duck-tm2
 fate-duck-tm2: CMD = framecrc  -i $(SAMPLES)/duck/tm20.avi
 FATE_TESTS += fate-ea-cdata
@@ -78,22 +62,14 @@ FATE_TESTS += fate-ea-cmv
 fate-ea-cmv: CMD = framecrc  -i $(SAMPLES)/ea-cmv/TITLE.CMV -vsync 0 -pix_fmt rgb24
 FATE_TESTS += fate-ea-dct
 fate-ea-dct: CMD = framecrc  -idct simple -i $(SAMPLES)/ea-dct/NFS2Esprit-partial.dct
-FATE_TESTS += fate-ea-mad-adpcm-ea-r1
-fate-ea-mad-adpcm-ea-r1: CMD = framecrc  -i $(SAMPLES)/ea-mad/NFS6LogoE.mad
-FATE_TESTS += fate-ea-mad-pcm-planar
-fate-ea-mad-pcm-planar: CMD = framecrc  -i $(SAMPLES)/ea-mad/xeasport.mad
 FATE_TESTS += fate-ea-tgq
 fate-ea-tgq: CMD = framecrc  -i $(SAMPLES)/ea-tgq/v27.tgq -an
 FATE_TESTS += fate-ea-tgv-ima-ea-eacs
 fate-ea-tgv-ima-ea-eacs: CMD = framecrc  -i $(SAMPLES)/ea-tgv/INTRO8K-partial.TGV -pix_fmt rgb24
 FATE_TESTS += fate-ea-tgv-ima-ea-sead
 fate-ea-tgv-ima-ea-sead: CMD = framecrc  -i $(SAMPLES)/ea-tgv/INTEL_S.TGV -pix_fmt rgb24
-FATE_TESTS += fate-ea-tqi-adpcm
-fate-ea-tqi-adpcm: CMD = framecrc  -i $(SAMPLES)/ea-wve/networkBackbone-partial.wve -frames:v 26
 FATE_TESTS += fate-feeble-dxa
 fate-feeble-dxa: CMD = framecrc  -i $(SAMPLES)/dxa/meetsquid.dxa -t 2 -pix_fmt rgb24
-FATE_TESTS += fate-film-cvid-pcm-stereo-8bit
-fate-film-cvid-pcm-stereo-8bit: CMD = framecrc  -i $(SAMPLES)/film/logo-capcom.cpk
 FATE_TESTS += fate-flic-af11-palette-change
 fate-flic-af11-palette-change: CMD = framecrc  -i $(SAMPLES)/fli/fli-engines.fli -t 3.3 -pix_fmt rgb24
 FATE_TESTS += fate-flic-af12
@@ -110,8 +86,6 @@ FATE_TESTS += fate-g729-1
 fate-g729-1: CMD = framecrc  -i $(SAMPLES)/act/REC05.act -t 10
 FATE_TESTS += fate-id-cin-video
 fate-id-cin-video: CMD = framecrc  -i $(SAMPLES)/idcin/idlog-2MB.cin -pix_fmt rgb24
-FATE_TESTS += fate-idroq-video-dpcm
-fate-idroq-video-dpcm: CMD = framecrc  -i $(SAMPLES)/idroq/idlogo.roq
 FATE_TESTS-$(CONFIG_AVFILTER) += fate-idroq-video-encode
 fate-idroq-video-encode: CMD = md5  -f image2 -vcodec pgmyuv -i $(SAMPLES)/ffmpeg-synthetic/vsynth1/%02d.pgm -sws_flags +bitexact -vf pad=512:512:80:112 -f RoQ -t 0.2
 FATE_TESTS += fate-iff-byterun1
@@ -120,8 +94,6 @@ FATE_TESTS += fate-iff-fibonacci
 fate-iff-fibonacci: CMD = md5  -i $(SAMPLES)/iff/dasboot-in-compressed -f s16le
 FATE_TESTS += fate-iff-ilbm
 fate-iff-ilbm: CMD = framecrc  -i $(SAMPLES)/iff/lms-matriks.ilbm -pix_fmt rgb24
-FATE_TESTS += fate-iff-pcm
-fate-iff-pcm: CMD = md5  -i $(SAMPLES)/iff/Bells -f s16le
 FATE_TESTS += fate-interplay-mve-16bit
 fate-interplay-mve-16bit: CMD = framecrc  -i $(SAMPLES)/interplay-mve/descent3-level5-16bit-partial.mve -pix_fmt rgb24
 FATE_TESTS += fate-interplay-mve-8bit
@@ -150,14 +122,10 @@ FATE_TESTS += fate-nuv
 fate-nuv: CMD = framecrc  -idct simple -i $(SAMPLES)/nuv/Today.nuv -vsync 0
 FATE_TESTS += fate-oma-demux
 fate-oma-demux: CMD = crc  -i $(SAMPLES)/oma/01-Untitled-partial.oma -acodec copy
-FATE_TESTS += fate-pcm_dvd
-fate-pcm_dvd: CMD = framecrc  -i $(SAMPLES)/pcm-dvd/coolitnow-partial.vob -vn
 FATE_TESTS += fate-psx-str
 fate-psx-str: CMD = framecrc  -i $(SAMPLES)/psx-str/descent-partial.str
 FATE_TESTS += fate-psx-str-v3-mdec
 fate-psx-str-v3-mdec: CMD = framecrc  -i $(SAMPLES)/psx-str/abc000_cut.str -an
-FATE_TESTS += fate-psx-str-v3-adpcm_xa
-fate-psx-str-v3-adpcm_xa: CMD = framecrc  -i $(SAMPLES)/psx-str/abc000_cut.str -vn
 FATE_TESTS += fate-pva-demux
 fate-pva-demux: CMD = framecrc  -idct simple -i $(SAMPLES)/pva/PVA_test-partial.pva -t 0.6 -acodec copy
 FATE_TESTS += fate-qcp-demux
@@ -180,18 +148,6 @@ FATE_TESTS += fate-qt-mac6-mono
 fate-qt-mac6-mono: CMD = md5  -i $(SAMPLES)/qt-surge-suite/surge-1-8-MAC6.mov -f s16le
 FATE_TESTS += fate-qt-mac6-stereo
 fate-qt-mac6-stereo: CMD = md5  -i $(SAMPLES)/qt-surge-suite/surge-2-8-MAC6.mov -f s16le
-FATE_TESTS += fate-qt-msadpcm-stereo
-fate-qt-msadpcm-stereo: CMD = md5  -i $(SAMPLES)/qt-surge-suite/surge-2-16-L-ms02.mov -f s16le
-FATE_TESTS += fate-qt-msimaadpcm-stereo
-fate-qt-msimaadpcm-stereo: CMD = md5  -i $(SAMPLES)/qt-surge-suite/surge-2-16-L-ms11.mov -f s16le
-FATE_TESTS += fate-qt-rawpcm-16bit-stereo-signed-be
-fate-qt-rawpcm-16bit-stereo-signed-be: CMD = md5  -i $(SAMPLES)/qt-surge-suite/surge-2-16-B-twos.mov -f s16le
-FATE_TESTS += fate-qt-rawpcm-16bit-stereo-signed-le
-fate-qt-rawpcm-16bit-stereo-signed-le: CMD = md5  -i $(SAMPLES)/qt-surge-suite/surge-2-16-L-sowt.mov -f s16le
-FATE_TESTS += fate-qt-rawpcm-8bit-mono-unsigned
-fate-qt-rawpcm-8bit-mono-unsigned: CMD = md5  -i $(SAMPLES)/qt-surge-suite/surge-1-8-raw.mov -f s16le
-FATE_TESTS += fate-qt-rawpcm-8bit-stereo-unsigned
-fate-qt-rawpcm-8bit-stereo-unsigned: CMD = md5  -i $(SAMPLES)/qt-surge-suite/surge-2-8-raw.mov -f s16le
 FATE_TESTS += fate-qt-ulaw-mono
 fate-qt-ulaw-mono: CMD = md5  -i $(SAMPLES)/qt-surge-suite/surge-1-16-B-ulaw.mov -f s16le
 FATE_TESTS += fate-qt-ulaw-stereo
@@ -222,8 +178,6 @@ FATE_TESTS += fate-svq1
 fate-svq1: CMD = framecrc  -i $(SAMPLES)/svq1/marymary-shackles.mov -an -t 10
 FATE_TESTS += fate-svq3
 fate-svq3: CMD = framecrc  -i $(SAMPLES)/svq3/Vertical400kbit.sorenson3.mov -t 6 -an
-FATE_TESTS += fate-thp-mjpeg-adpcm
-fate-thp-mjpeg-adpcm: CMD = framecrc  -idct simple -i $(SAMPLES)/thp/pikmin2-opening1-partial.thp
 FATE_TESTS += fate-tiertex-seq
 fate-tiertex-seq: CMD = framecrc  -i $(SAMPLES)/tiertex-seq/Gameover.seq -pix_fmt rgb24
 FATE_TESTS += fate-tmv
@@ -242,13 +196,9 @@ FATE_TESTS += fate-video-xl
 fate-video-xl: CMD = framecrc  -i $(SAMPLES)/vixl/pig-vixl.avi
 FATE_TESTS += fate-vqa-cc
 fate-vqa-cc: CMD = framecrc  -i $(SAMPLES)/vqa/cc-demo1-partial.vqa -pix_fmt rgb24
-FATE_TESTS += fate-w64
-fate-w64: CMD = crc  -i $(SAMPLES)/w64/w64-pcm16.w64
 FATE_TESTS += fate-wc3movie-xan
 fate-wc3movie-xan: CMD = framecrc  -i $(SAMPLES)/wc3movie/SC_32-part.MVE -pix_fmt rgb24
 FATE_TESTS += fate-westwood-aud
 fate-westwood-aud: CMD = md5  -i $(SAMPLES)/westwood-aud/excellent.aud -f s16le
 FATE_TESTS += fate-wnv1
 fate-wnv1: CMD = framecrc  -i $(SAMPLES)/wnv1/wnv1-codec.avi -an
-FATE_TESTS += fate-xan-dpcm
-fate-xan-dpcm: CMD = md5  -i $(SAMPLES)/wc4-xan/wc4_2.avi -vn -f s16le
diff --git a/tests/fate/dpcm.mak b/tests/fate/dpcm.mak
new file mode 100644
index 0000000000000000000000000000000000000000..4b3305f63fd17c83fd2366941e7c837ae4699ed3
--- /dev/null
+++ b/tests/fate/dpcm.mak
@@ -0,0 +1,42 @@
+FATE_TESTS += fate-adpcm-ea-r2
+fate-adpcm-ea-r2: CMD = crc -i $(SAMPLES)/ea-mpc/THX_logo.mpc -vn
+
+FATE_TESTS += fate-adpcm-ea-r3
+fate-adpcm-ea-r3: CMD = crc -i $(SAMPLES)/ea-vp6/THX_logo.vp6 -vn
+
+FATE_TESTS += fate-creative-adpcm
+fate-creative-adpcm: CMD = md5 -i $(SAMPLES)/creative/intro-partial.wav -f s16le
+
+FATE_TESTS += fate-creative-adpcm-8-2bit
+fate-creative-adpcm-8-2bit: CMD = md5 -i $(SAMPLES)/creative/BBC_2BIT.VOC -f s16le
+
+FATE_TESTS += fate-creative-adpcm-8-2.6bit
+fate-creative-adpcm-8-2.6bit: CMD = md5 -i $(SAMPLES)/creative/BBC_3BIT.VOC -f s16le
+
+FATE_TESTS += fate-creative-adpcm-8-4bit
+fate-creative-adpcm-8-4bit: CMD = md5 -i $(SAMPLES)/creative/BBC_4BIT.VOC -f s16le
+
+FATE_TESTS += fate-ea-mad-adpcm-ea-r1
+fate-ea-mad-adpcm-ea-r1: CMD = framecrc -i $(SAMPLES)/ea-mad/NFS6LogoE.mad
+
+FATE_TESTS += fate-ea-tqi-adpcm
+fate-ea-tqi-adpcm: CMD = framecrc -i $(SAMPLES)/ea-wve/networkBackbone-partial.wve -frames:v 26
+
+FATE_TESTS += fate-idroq-video-dpcm
+fate-idroq-video-dpcm: CMD = framecrc -i $(SAMPLES)/idroq/idlogo.roq
+
+FATE_TESTS += fate-psx-str-v3-adpcm_xa
+fate-psx-str-v3-adpcm_xa: CMD = framecrc -i $(SAMPLES)/psx-str/abc000_cut.str -vn
+
+FATE_TESTS += fate-qt-msadpcm-stereo
+fate-qt-msadpcm-stereo: CMD = md5 -i $(SAMPLES)/qt-surge-suite/surge-2-16-L-ms02.mov -f s16le
+
+FATE_TESTS += fate-qt-msimaadpcm-stereo
+fate-qt-msimaadpcm-stereo: CMD = md5 -i $(SAMPLES)/qt-surge-suite/surge-2-16-L-ms11.mov -f s16le
+
+FATE_TESTS += fate-thp-mjpeg-adpcm
+fate-thp-mjpeg-adpcm: CMD = framecrc -idct simple -i $(SAMPLES)/thp/pikmin2-opening1-partial.thp
+
+FATE_TESTS += fate-dpcm_xan_audio
+fate-dpcm_xan_audio: CMD = md5  -i $(SAMPLES)/wc4-xan/wc4_2.avi -vn -f s16le
+
diff --git a/tests/fate/microsoft.mak b/tests/fate/microsoft.mak
index c6c1c62e6fa523fb7851db1d36cf19063a6fd740..bb7e7018d701ed7049dbe3c8178361cc5cdb7b2d 100644
--- a/tests/fate/microsoft.mak
+++ b/tests/fate/microsoft.mak
@@ -16,3 +16,12 @@ fate-wmv8-drm-nodec: CMD = framecrc -cryptokey 137381538c84c068111902a59c5cf6c34
 
 FATE_TESTS += fate-vc1
 fate-vc1: CMD = framecrc -i $(SAMPLES)/vc1/SA00040.vc1
+
+FATE_TESTS += fate-vc1_sa00050
+fate-vc1_sa00050: CMD = framecrc -i $(SAMPLES)/vc1/SA00050.vc1
+
+FATE_TESTS += fate-vc1_sa10091
+fate-vc1_sa10091: CMD = framecrc -i $(SAMPLES)/vc1/SA10091.vc1
+
+FATE_TESTS += fate-vc1_sa20021
+fate-vc1_sa20021: CMD = framecrc -i $(SAMPLES)/vc1/SA20021.vc1
diff --git a/tests/fate/pcm.mak b/tests/fate/pcm.mak
new file mode 100644
index 0000000000000000000000000000000000000000..f8ee34aeca8f41ef1dc937a32e7f9923f54a6d8b
--- /dev/null
+++ b/tests/fate/pcm.mak
@@ -0,0 +1,32 @@
+FATE_TESTS += fate-duck-dk3
+fate-duck-dk3: CMD = md5  -i $(SAMPLES)/duck/sop-audio-only.avi -f s16le
+
+FATE_TESTS += fate-duck-dk4
+fate-duck-dk4: CMD = md5  -i $(SAMPLES)/duck/salsa-audio-only.avi -f s16le
+
+FATE_TESTS += fate-ea-mad-pcm-planar
+fate-ea-mad-pcm-planar: CMD = framecrc -i $(SAMPLES)/ea-mad/xeasport.mad
+
+FATE_TESTS += fate-film-cvid-pcm-stereo-8bit
+fate-film-cvid-pcm-stereo-8bit: CMD = framecrc -i $(SAMPLES)/film/logo-capcom.cpk
+
+FATE_TESTS += fate-iff-pcm
+fate-iff-pcm: CMD = md5 -i $(SAMPLES)/iff/Bells -f s16le
+
+FATE_TESTS += fate-pcm_dvd
+fate-pcm_dvd: CMD = framecrc -i $(SAMPLES)/pcm-dvd/coolitnow-partial.vob -vn
+
+FATE_TESTS += fate-qt-rawpcm-8bit-mono-unsigned
+fate-qt-rawpcm-8bit-mono-unsigned: CMD = md5 -i $(SAMPLES)/qt-surge-suite/surge-1-8-raw.mov -f s16le
+
+FATE_TESTS += fate-qt-rawpcm-8bit-stereo-unsigned
+fate-qt-rawpcm-8bit-stereo-unsigned: CMD = md5 -i $(SAMPLES)/qt-surge-suite/surge-2-8-raw.mov -f s16le
+
+FATE_TESTS += fate-qt-rawpcm-16bit-stereo-signed-be
+fate-qt-rawpcm-16bit-stereo-signed-be: CMD = md5 -i $(SAMPLES)/qt-surge-suite/surge-2-16-B-twos.mov -f s16le
+
+FATE_TESTS += fate-qt-rawpcm-16bit-stereo-signed-le
+fate-qt-rawpcm-16bit-stereo-signed-le: CMD = md5 -i $(SAMPLES)/qt-surge-suite/surge-2-16-L-sowt.mov -f s16le
+
+FATE_TESTS += fate-w64
+fate-w64: CMD = crc -i $(SAMPLES)/w64/w64-pcm16.w64
diff --git a/tests/fate2.mak b/tests/fate2.mak
index e925278d9b255888faebc6d09e0105a2e0740dd3..479adf60b733e3d26ac186462c41a5a25e0b4f15 100644
--- a/tests/fate2.mak
+++ b/tests/fate2.mak
@@ -87,3 +87,6 @@ fate-v410enc: CMD = md5 -f image2 -vcodec pgmyuv -i $(TARGET_PATH)/tests/vsynth1
 
 FATE_TESTS += fate-r210
 fate-r210: CMD = framecrc -i $(SAMPLES)/r210/r210.avi -pix_fmt rgb48le
+
+FATE_TESTS += fate-xxan_wc4_video
+fate-xxan_wc4_video: CMD = framecrc -i $(SAMPLES)/wc4-xan/wc4_2.avi -an -vframes 10
diff --git a/tests/ref/fate/xan-dpcm b/tests/ref/fate/dpcm_xan_audio
similarity index 100%
rename from tests/ref/fate/xan-dpcm
rename to tests/ref/fate/dpcm_xan_audio
diff --git a/tests/ref/fate/vc1_sa00050 b/tests/ref/fate/vc1_sa00050
new file mode 100644
index 0000000000000000000000000000000000000000..3eb27bd3c5aec0cc15198e6340b01f6673864277
--- /dev/null
+++ b/tests/ref/fate/vc1_sa00050
@@ -0,0 +1,30 @@
+0, 0, 115200, 0xb8830eef
+0, 3600, 115200, 0xb8830eef
+0, 7200, 115200, 0xb8830eef
+0, 10800, 115200, 0x952ff5e1
+0, 14400, 115200, 0xa4362b14
+0, 18000, 115200, 0x32bacbe7
+0, 21600, 115200, 0x509eb814
+0, 25200, 115200, 0x509eb814
+0, 28800, 115200, 0x11a76c3e
+0, 32400, 115200, 0x11a76c3e
+0, 36000, 115200, 0x00cf734a
+0, 39600, 115200, 0x00cf734a
+0, 43200, 115200, 0x00cf734a
+0, 46800, 115200, 0x00cf734a
+0, 50400, 115200, 0x00cf734a
+0, 54000, 115200, 0x00cf734a
+0, 57600, 115200, 0x00cf734a
+0, 61200, 115200, 0x00cf734a
+0, 64800, 115200, 0xfddf48e6
+0, 68400, 115200, 0xfddf48e6
+0, 72000, 115200, 0x1eccebbf
+0, 75600, 115200, 0x3da2f77e
+0, 79200, 115200, 0x7c232572
+0, 82800, 115200, 0xedf426e5
+0, 86400, 115200, 0x5324ab20
+0, 90000, 115200, 0x5324ab20
+0, 93600, 115200, 0xa23e66bb
+0, 97200, 115200, 0x680a50ff
+0, 100800, 115200, 0x680a50ff
+0, 104400, 115200, 0x680a50ff
diff --git a/tests/ref/fate/vc1_sa10091 b/tests/ref/fate/vc1_sa10091
new file mode 100644
index 0000000000000000000000000000000000000000..c12109009776dd7d8077040984b44d6f2e7f3dad
--- /dev/null
+++ b/tests/ref/fate/vc1_sa10091
@@ -0,0 +1,30 @@
+0, 0, 518400, 0xae20b4fa
+0, 3600, 518400, 0x2b4ccdf9
+0, 7200, 518400, 0x2b4ccdf9
+0, 10800, 518400, 0x2b4ccdf9
+0, 14400, 518400, 0x2b4ccdf9
+0, 18000, 518400, 0x2b4ccdf9
+0, 21600, 518400, 0x70d9a891
+0, 25200, 518400, 0x70d9a891
+0, 28800, 518400, 0x70d9a891
+0, 32400, 518400, 0xa461ee86
+0, 36000, 518400, 0x722bc6e8
+0, 39600, 518400, 0x722bc6e8
+0, 43200, 518400, 0x722bc6e8
+0, 46800, 518400, 0xf752fd2c
+0, 50400, 518400, 0xf752fd2c
+0, 54000, 518400, 0x91abcaca
+0, 57600, 518400, 0x572727c3
+0, 61200, 518400, 0x572727c3
+0, 64800, 518400, 0x24c12382
+0, 68400, 518400, 0x24c12382
+0, 72000, 518400, 0x9aa39fe8
+0, 75600, 518400, 0x9aa39fe8
+0, 79200, 518400, 0x5cb6bd19
+0, 82800, 518400, 0x704d9300
+0, 86400, 518400, 0x590fad49
+0, 90000, 518400, 0x590fad49
+0, 93600, 518400, 0x590fad49
+0, 97200, 518400, 0x46bea10b
+0, 100800, 518400, 0x46bea10b
+0, 104400, 518400, 0x46bea10b
diff --git a/tests/ref/fate/vc1_sa20021 b/tests/ref/fate/vc1_sa20021
new file mode 100644
index 0000000000000000000000000000000000000000..aae607bbbc0c772492895f1804e0abd56a9f931a
--- /dev/null
+++ b/tests/ref/fate/vc1_sa20021
@@ -0,0 +1,60 @@
+0, 0, 506880, 0x884bc093
+0, 3600, 506880, 0x4b09548f
+0, 7200, 506880, 0x195cbee1
+0, 10800, 506880, 0xc8141e28
+0, 14400, 506880, 0xb170c49b
+0, 18000, 506880, 0x2782268a
+0, 21600, 506880, 0x2782268a
+0, 25200, 506880, 0x2782268a
+0, 28800, 506880, 0x2782268a
+0, 32400, 506880, 0xe6803b32
+0, 36000, 506880, 0xe6803b32
+0, 39600, 506880, 0xa5ef9baf
+0, 43200, 506880, 0xa5ef9baf
+0, 46800, 506880, 0x46e8cbcb
+0, 50400, 506880, 0x28a2239b
+0, 54000, 506880, 0x7667af2f
+0, 57600, 506880, 0x7667af2f
+0, 61200, 506880, 0x8011bcaf
+0, 64800, 506880, 0xd422115b
+0, 68400, 506880, 0xd422115b
+0, 72000, 506880, 0xd422115b
+0, 75600, 506880, 0xbcee0b5b
+0, 79200, 506880, 0x08fe9ec8
+0, 82800, 506880, 0xc8fb8b37
+0, 86400, 506880, 0xc8fb8b37
+0, 90000, 506880, 0x2c698b52
+0, 93600, 506880, 0x2c698b52
+0, 97200, 506880, 0x2c698b52
+0, 100800, 506880, 0x2b4ad9bc
+0, 104400, 506880, 0x2b4ad9bc
+0, 108000, 506880, 0x2b4ad9bc
+0, 111600, 506880, 0x2b4ad9bc
+0, 115200, 506880, 0x92e84ebb
+0, 118800, 506880, 0x92e84ebb
+0, 122400, 506880, 0xdb877da3
+0, 126000, 506880, 0xdb877da3
+0, 129600, 506880, 0xdb877da3
+0, 133200, 506880, 0x44610654
+0, 136800, 506880, 0x44610654
+0, 140400, 506880, 0xe254ce67
+0, 144000, 506880, 0xa6085385
+0, 147600, 506880, 0x2d45d744
+0, 151200, 506880, 0x2d45d744
+0, 154800, 506880, 0x6e684f51
+0, 158400, 506880, 0xe96186cf
+0, 162000, 506880, 0xb535d369
+0, 165600, 506880, 0xb535d369
+0, 169200, 506880, 0xb535d369
+0, 172800, 506880, 0xeed0b7e0
+0, 176400, 506880, 0xeed0b7e0
+0, 180000, 506880, 0xeed0b7e0
+0, 183600, 506880, 0xeed0b7e0
+0, 187200, 506880, 0x8789b20b
+0, 190800, 506880, 0x0a0f42fb
+0, 194400, 506880, 0x09bbac2d
+0, 198000, 506880, 0x09bbac2d
+0, 201600, 506880, 0x09bbac2d
+0, 205200, 506880, 0x09bbac2d
+0, 208800, 506880, 0x09bbac2d
+0, 212400, 506880, 0xda77f0df
diff --git a/tests/ref/fate/xxan_wc4_video b/tests/ref/fate/xxan_wc4_video
new file mode 100644
index 0000000000000000000000000000000000000000..d31fbb697cc7a90acad2fade574821858a5e6e4f
--- /dev/null
+++ b/tests/ref/fate/xxan_wc4_video
@@ -0,0 +1,10 @@
+0, 0, 79360, 0x877eb3ed
+0, 6000, 79360, 0x9ff8707c
+0, 12000, 79360, 0x144dec86
+0, 18000, 79360, 0x56d59588
+0, 24000, 79360, 0x2d20f8ce
+0, 30000, 79360, 0x1a752c42
+0, 36000, 79360, 0x85705730
+0, 42000, 79360, 0xddea3741
+0, 48000, 79360, 0x46448efd
+0, 54000, 79360, 0x27186e2b
diff --git a/tools/qt-faststart.c b/tools/qt-faststart.c
index ace4c113c88850613aff2f9158a08a6164fc6bac..dbd2c141c654541f998ac19205bfe132ac7b63fc 100644
--- a/tools/qt-faststart.c
+++ b/tools/qt-faststart.c
@@ -137,17 +137,17 @@ int main(int argc, char *argv[])
             start_offset = ftello(infile);
         } else {
 
-        /* 64-bit special case */
-        if (atom_size == 1) {
-            if (fread(atom_bytes, ATOM_PREAMBLE_SIZE, 1, infile) != 1) {
-                break;
+            /* 64-bit special case */
+            if (atom_size == 1) {
+                if (fread(atom_bytes, ATOM_PREAMBLE_SIZE, 1, infile) != 1) {
+                    break;
+                }
+                atom_size = BE_64(&atom_bytes[0]);
+                fseeko(infile, atom_size - ATOM_PREAMBLE_SIZE * 2, SEEK_CUR);
+            } else {
+                fseeko(infile, atom_size - ATOM_PREAMBLE_SIZE, SEEK_CUR);
             }
-            atom_size = BE_64(&atom_bytes[0]);
-            fseeko(infile, atom_size - ATOM_PREAMBLE_SIZE * 2, SEEK_CUR);
-        } else {
-            fseeko(infile, atom_size - ATOM_PREAMBLE_SIZE, SEEK_CUR);
         }
-    }
         printf("%c%c%c%c %10"PRIu64" %"PRIu64"\n",
                (atom_type >> 24) & 255,
                (atom_type >> 16) & 255,