diff --git a/libavcodec/alpha/mpegvideo_alpha.c b/libavcodec/alpha/mpegvideo_alpha.c
index 7ba0f2e68f14730171448eaad9cb8b2cf6990654..126fe264a17456834f79210a849c06d398f130e4 100644
--- a/libavcodec/alpha/mpegvideo_alpha.c
+++ b/libavcodec/alpha/mpegvideo_alpha.c
@@ -103,7 +103,7 @@ static void dct_unquantize_h263_inter_axp(MpegEncContext *s, int16_t *block,
     dct_unquantize_h263_axp(block, n_coeffs, qscale, (qscale - 1) | 1);
 }
 
-av_cold void ff_MPV_common_init_axp(MpegEncContext *s)
+av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
 {
     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_axp;
     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_axp;
diff --git a/libavcodec/arm/mpegvideo_arm.c b/libavcodec/arm/mpegvideo_arm.c
index 5f5473d28c83b9c007d73ccd5e6c0f8af471daee..918be16d038696f7df83d7b2e8811590732dae7f 100644
--- a/libavcodec/arm/mpegvideo_arm.c
+++ b/libavcodec/arm/mpegvideo_arm.c
@@ -40,12 +40,12 @@ void ff_dct_unquantize_h263_inter_neon(MpegEncContext *s, int16_t *block,
 void ff_dct_unquantize_h263_intra_neon(MpegEncContext *s, int16_t *block,
                                        int n, int qscale);
 
-av_cold void ff_MPV_common_init_arm(MpegEncContext *s)
+av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
 {
     int cpu_flags = av_get_cpu_flags();
 
     if (have_armv5te(cpu_flags))
-        ff_MPV_common_init_armv5te(s);
+        ff_mpv_common_init_armv5te(s);
 
     if (have_neon(cpu_flags)) {
         s->dct_unquantize_h263_intra = ff_dct_unquantize_h263_intra_neon;
diff --git a/libavcodec/arm/mpegvideo_arm.h b/libavcodec/arm/mpegvideo_arm.h
index 78e07e15c50efafa6d7b023725009e50e8758f1a..709ae6b24772a8e7a6028186d14ba414345d5b60 100644
--- a/libavcodec/arm/mpegvideo_arm.h
+++ b/libavcodec/arm/mpegvideo_arm.h
@@ -21,6 +21,6 @@
 
 #include "libavcodec/mpegvideo.h"
 
-void ff_MPV_common_init_armv5te(MpegEncContext *s);
+void ff_mpv_common_init_armv5te(MpegEncContext *s);
 
 #endif /* AVCODEC_ARM_MPEGVIDEO_ARM_H */
diff --git a/libavcodec/arm/mpegvideo_armv5te.c b/libavcodec/arm/mpegvideo_armv5te.c
index a5722900a824b972db32e30d32a7c52512ccf080..88c5f4fdadac717a656ff741128274062684365a 100644
--- a/libavcodec/arm/mpegvideo_armv5te.c
+++ b/libavcodec/arm/mpegvideo_armv5te.c
@@ -95,7 +95,7 @@ static void dct_unquantize_h263_inter_armv5te(MpegEncContext *s,
     ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1);
 }
 
-av_cold void ff_MPV_common_init_armv5te(MpegEncContext *s)
+av_cold void ff_mpv_common_init_armv5te(MpegEncContext *s)
 {
     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_armv5te;
     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_armv5te;
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 3b6a75093e19b8ce79933d193b1751bf20d2b331..4f53359d65a57b21cd081e1ea30ffdd866c75d84 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -3307,7 +3307,7 @@ typedef struct AVHWAccel {
     /**
      * Called for every Macroblock in a slice.
      *
-     * XvMC uses it to replace the ff_MPV_decode_mb().
+     * XvMC uses it to replace the ff_mpv_decode_mb().
      * Instead of decoding to raw picture, MB parameters are
      * stored in an array provided by the video driver.
      *
diff --git a/libavcodec/flvenc.c b/libavcodec/flvenc.c
index 9421955b5ed34ed4cedb75a210b35b0687888da2..4c045f8be6c4bb4850374bd415736736fef24e0f 100644
--- a/libavcodec/flvenc.c
+++ b/libavcodec/flvenc.c
@@ -92,9 +92,9 @@ AVCodec ff_flv_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_FLV1,
     .priv_data_size = sizeof(MpegEncContext),
-    .init           = ff_MPV_encode_init,
-    .encode2        = ff_MPV_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .init           = ff_mpv_encode_init,
+    .encode2        = ff_mpv_encode_picture,
+    .close          = ff_mpv_encode_end,
     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
     .priv_class     = &flv_class,
 };
diff --git a/libavcodec/h261dec.c b/libavcodec/h261dec.c
index 5bc685e2c018659c56fde5e96704c4474cf5d387..301ecc111f9b75438cac313230a3f5a88dc2f2e9 100644
--- a/libavcodec/h261dec.c
+++ b/libavcodec/h261dec.c
@@ -75,7 +75,7 @@ static av_cold int h261_decode_init(AVCodecContext *avctx)
     MpegEncContext *const s = &h->s;
 
     // set defaults
-    ff_MPV_decode_defaults(s);
+    ff_mpv_decode_defaults(s);
     s->avctx       = avctx;
     s->width       = s->avctx->coded_width;
     s->height      = s->avctx->coded_height;
@@ -219,7 +219,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2)
         s->mb_skipped                  = 1;
         h->mtype                      &= ~MB_TYPE_H261_FIL;
 
-        ff_MPV_decode_mb(s, s->block);
+        ff_mpv_decode_mb(s, s->block);
     }
 
     return 0;
@@ -454,7 +454,7 @@ intra:
             s->block_last_index[i] = -1;
     }
 
-    ff_MPV_decode_mb(s, s->block);
+    ff_mpv_decode_mb(s, s->block);
 
     return SLICE_OK;
 }
@@ -605,12 +605,12 @@ retry:
     if (s->width != avctx->coded_width || s->height != avctx->coded_height) {
         ParseContext pc = s->parse_context; // FIXME move this demuxing hack to libavformat
         s->parse_context.buffer = 0;
-        ff_MPV_common_end(s);
+        ff_mpv_common_end(s);
         s->parse_context = pc;
     }
 
     if (!s->context_initialized) {
-        if ((ret = ff_MPV_common_init(s)) < 0)
+        if ((ret = ff_mpv_common_init(s)) < 0)
             return ret;
 
         ret = ff_set_dimensions(avctx, s->width, s->height);
@@ -629,7 +629,7 @@ retry:
          avctx->skip_frame >= AVDISCARD_ALL)
         return get_consumed_bytes(s, buf_size);
 
-    if (ff_MPV_frame_start(s, avctx) < 0)
+    if (ff_mpv_frame_start(s, avctx) < 0)
         return -1;
 
     ff_mpeg_er_frame_start(s);
@@ -643,7 +643,7 @@ retry:
             break;
         h261_decode_gob(h);
     }
-    ff_MPV_frame_end(s);
+    ff_mpv_frame_end(s);
 
     av_assert0(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type);
     av_assert0(s->current_picture.f->pict_type == s->pict_type);
@@ -662,7 +662,7 @@ static av_cold int h261_decode_end(AVCodecContext *avctx)
     H261Context *h    = avctx->priv_data;
     MpegEncContext *s = &h->s;
 
-    ff_MPV_common_end(s);
+    ff_mpv_common_end(s);
     return 0;
 }
 
diff --git a/libavcodec/h261enc.c b/libavcodec/h261enc.c
index 47696e583f9c9e34654c1eff396dacae98fed669..75211a0712d185a4c588d09fade65ed6987965d4 100644
--- a/libavcodec/h261enc.c
+++ b/libavcodec/h261enc.c
@@ -382,9 +382,9 @@ AVCodec ff_h261_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_H261,
     .priv_data_size = sizeof(H261Context),
-    .init           = ff_MPV_encode_init,
-    .encode2        = ff_MPV_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .init           = ff_mpv_encode_init,
+    .encode2        = ff_mpv_encode_picture,
+    .close          = ff_mpv_encode_end,
     .pix_fmts       = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
                                                      AV_PIX_FMT_NONE },
     .priv_class     = &h261_class,
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index 40d98142c49496ae33e4087e756fd02067896509..0e1161453b1a10c8eddf928bb4432e55d583c628 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -55,7 +55,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
     s->workaround_bugs = avctx->workaround_bugs;
 
     // set defaults
-    ff_MPV_decode_defaults(s);
+    ff_mpv_decode_defaults(s);
     s->quant_precision = 5;
     s->decode_mb       = ff_h263_decode_mb;
     s->low_delay       = 1;
@@ -123,7 +123,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
         avctx->codec->id != AV_CODEC_ID_H263P &&
         avctx->codec->id != AV_CODEC_ID_MPEG4) {
         ff_mpv_idct_init(s);
-        if ((ret = ff_MPV_common_init(s)) < 0)
+        if ((ret = ff_mpv_common_init(s)) < 0)
             return ret;
     }
 
@@ -138,7 +138,7 @@ av_cold int ff_h263_decode_end(AVCodecContext *avctx)
 {
     MpegEncContext *s = avctx->priv_data;
 
-    ff_MPV_common_end(s);
+    ff_mpv_common_end(s);
     return 0;
 }
 
@@ -249,7 +249,7 @@ static int decode_slice(MpegEncContext *s)
             if (ret < 0) {
                 const int xy = s->mb_x + s->mb_y * s->mb_stride;
                 if (ret == SLICE_END) {
-                    ff_MPV_decode_mb(s, s->block);
+                    ff_mpv_decode_mb(s, s->block);
                     if (s->loop_filter)
                         ff_h263_loop_filter(s);
 
@@ -261,7 +261,7 @@ static int decode_slice(MpegEncContext *s)
                     if (++s->mb_x >= s->mb_width) {
                         s->mb_x = 0;
                         ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size);
-                        ff_MPV_report_decode_progress(s);
+                        ff_mpv_report_decode_progress(s);
                         s->mb_y++;
                     }
                     return 0;
@@ -282,13 +282,13 @@ static int decode_slice(MpegEncContext *s)
                 return AVERROR_INVALIDDATA;
             }
 
-            ff_MPV_decode_mb(s, s->block);
+            ff_mpv_decode_mb(s, s->block);
             if (s->loop_filter)
                 ff_h263_loop_filter(s);
         }
 
         ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size);
-        ff_MPV_report_decode_progress(s);
+        ff_mpv_report_decode_progress(s);
 
         s->mb_x = 0;
     }
@@ -499,7 +499,7 @@ retry:
     }
 
     if (!s->context_initialized)
-        if ((ret = ff_MPV_common_init(s)) < 0)
+        if ((ret = ff_mpv_common_init(s)) < 0)
             return ret;
 
     if (s->current_picture_ptr == NULL || s->current_picture_ptr->f->data[0]) {
@@ -532,7 +532,7 @@ retry:
 
         ff_set_sar(avctx, avctx->sample_aspect_ratio);
 
-        if ((ret = ff_MPV_common_frame_size_change(s)))
+        if ((ret = ff_mpv_common_frame_size_change(s)))
             return ret;
     }
 
@@ -571,7 +571,7 @@ retry:
         s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab;
     }
 
-    if ((ret = ff_MPV_frame_start(s, avctx)) < 0)
+    if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
         return ret;
 
     if (!s->divx_packed && !avctx->hwaccel)
@@ -593,7 +593,7 @@ retry:
 
     /* the second part of the wmv2 header contains the MB skip bits which
      * are stored in current_picture->mb_type which is not available before
-     * ff_MPV_frame_start() */
+     * ff_mpv_frame_start() */
     if (CONFIG_WMV2_DECODER && s->msmpeg4_version == 5) {
         ret = ff_wmv2_decode_secondary_picture_header(s);
         if (ret < 0)
@@ -643,7 +643,7 @@ frame_end:
             return ret;
     }
 
-    ff_MPV_frame_end(s);
+    ff_mpv_frame_end(s);
 
     if (CONFIG_MPEG4_DECODER && avctx->codec_id == AV_CODEC_ID_MPEG4)
         ff_mpeg4_frame_end(avctx, buf, buf_size);
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index f1fbbdcbee9f885d7dac0236dd83d782d6eb9770..fc744f2f1a0fcced284b99d9775fb2d6e7eddd9a 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -549,7 +549,7 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
 #endif
     }
     /* update linesize on resize for h264. The h264 decoder doesn't
-     * necessarily call ff_MPV_frame_start in the new thread */
+     * necessarily call ff_mpv_frame_start in the new thread */
     h->linesize   = h1->linesize;
     h->uvlinesize = h1->uvlinesize;
 
@@ -783,7 +783,7 @@ static int h264_frame_start(H264Context *h)
 
     /* We mark the current picture as non-reference after allocating it, so
      * that if we break out due to an error it can be released automatically
-     * in the next ff_MPV_frame_start().
+     * in the next ff_mpv_frame_start().
      */
     h->cur_pic_ptr->reference = 0;
 
diff --git a/libavcodec/intrax8.c b/libavcodec/intrax8.c
index c6df8cf2e92c13b17147d970894991a1845befcd..017536d644ebb4e03abf9467ef7471d467d711e0 100644
--- a/libavcodec/intrax8.c
+++ b/libavcodec/intrax8.c
@@ -719,9 +719,9 @@ av_cold void ff_intrax8_common_end(IntraX8Context * w)
 /**
  * Decode single IntraX8 frame.
  * The parent codec must fill s->loopfilter and s->gb (bitstream).
- * The parent codec must call MPV_frame_start(), ff_er_frame_start() before calling this function.
- * The parent codec must call ff_er_frame_end(), MPV_frame_end() after calling this function.
- * This function does not use MPV_decode_mb().
+ * The parent codec must call ff_mpv_frame_start(), ff_er_frame_start() before calling this function.
+ * The parent codec must call ff_er_frame_end(), ff_mpv_frame_end() after calling this function.
+ * This function does not use ff_mpv_decode_mb().
  * lowres decoding is theoretically impossible.
  * @param w pointer to IntraX8Context
  * @param dquant doubled quantizer, it would be odd in case of VC-1 halfpq==1.
diff --git a/libavcodec/mjpegenc.c b/libavcodec/mjpegenc.c
index ce230ade274d00036f5293d5045b730a9ba376d9..6fbc3b48f8ceab23e73d6d66d20adf6993be7b06 100644
--- a/libavcodec/mjpegenc.c
+++ b/libavcodec/mjpegenc.c
@@ -209,7 +209,7 @@ static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
         pic->data[i] += pic->linesize[i] * (vsample * s->height / V_MAX - 1);
         pic->linesize[i] *= -1;
     }
-    ret = ff_MPV_encode_picture(avctx, pkt, pic, got_packet);
+    ret = ff_mpv_encode_picture(avctx, pkt, pic, got_packet);
     av_frame_free(&pic);
     return ret;
 }
@@ -221,9 +221,9 @@ AVCodec ff_mjpeg_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_MJPEG,
     .priv_data_size = sizeof(MpegEncContext),
-    .init           = ff_MPV_encode_init,
-    .encode2        = ff_MPV_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .init           = ff_mpv_encode_init,
+    .encode2        = ff_mpv_encode_picture,
+    .close          = ff_mpv_encode_end,
     .capabilities   = CODEC_CAP_SLICE_THREADS | CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
     .pix_fmts       = (const enum AVPixelFormat[]){
         AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE
@@ -237,9 +237,9 @@ AVCodec ff_amv_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_AMV,
     .priv_data_size = sizeof(MpegEncContext),
-    .init           = ff_MPV_encode_init,
+    .init           = ff_mpv_encode_init,
     .encode2        = amv_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .close          = ff_mpv_encode_end,
     .pix_fmts       = (const enum AVPixelFormat[]){
         AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_NONE
     },
diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c
index 1f2bf50a5b1bb82e2c3a2d8f371d9945bd33ee49..49a6ab3b416ded6a2822e75400fd86833d5290f0 100644
--- a/libavcodec/mpeg12dec.c
+++ b/libavcodec/mpeg12dec.c
@@ -1119,7 +1119,7 @@ static av_cold int mpeg_decode_init(AVCodecContext *avctx)
     Mpeg1Context *s    = avctx->priv_data;
     MpegEncContext *s2 = &s->mpeg_enc_ctx;
 
-    ff_MPV_decode_defaults(s2);
+    ff_mpv_decode_defaults(s2);
 
     s->mpeg_enc_ctx.avctx  = avctx;
     s->mpeg_enc_ctx.flags  = avctx->flags;
@@ -1268,7 +1268,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
         if (s1->mpeg_enc_ctx_allocated) {
             ParseContext pc = s->parse_context;
             s->parse_context.buffer = 0;
-            ff_MPV_common_end(s);
+            ff_mpv_common_end(s);
             s->parse_context = pc;
             s1->mpeg_enc_ctx_allocated = 0;
         }
@@ -1358,7 +1358,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
         memcpy(old_permutation, s->idsp.idct_permutation, 64 * sizeof(uint8_t));
 
         ff_mpv_idct_init(s);
-        if (ff_MPV_common_init(s) < 0)
+        if (ff_mpv_common_init(s) < 0)
             return -2;
 
         quant_matrix_rebuild(s->intra_matrix,        old_permutation, s->idsp.idct_permutation);
@@ -1618,7 +1618,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
     if (s->first_field || s->picture_structure == PICT_FRAME) {
         AVFrameSideData *pan_scan;
 
-        if (ff_MPV_frame_start(s, avctx) < 0)
+        if (ff_mpv_frame_start(s, avctx) < 0)
             return -1;
 
         ff_mpeg_er_frame_start(s);
@@ -1858,13 +1858,13 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
         s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
         s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
 
-        ff_MPV_decode_mb(s, s->block);
+        ff_mpv_decode_mb(s, s->block);
 
         if (++s->mb_x >= s->mb_width) {
             const int mb_size = 16 >> s->avctx->lowres;
 
             ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
-            ff_MPV_report_decode_progress(s);
+            ff_mpv_report_decode_progress(s);
 
             s->mb_x  = 0;
             s->mb_y += 1 << field_pic;
@@ -2054,7 +2054,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
 
         ff_er_frame_end(&s->er);
 
-        ff_MPV_frame_end(s);
+        ff_mpv_frame_end(s);
 
         if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
             int ret = av_frame_ref(pict, s->current_picture_ptr->f);
@@ -2175,7 +2175,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
     /* start new MPEG-1 context decoding */
     s->out_format = FMT_MPEG1;
     if (s1->mpeg_enc_ctx_allocated) {
-        ff_MPV_common_end(s);
+        ff_mpv_common_end(s);
         s1->mpeg_enc_ctx_allocated = 0;
     }
     s->width            = avctx->coded_width;
@@ -2187,7 +2187,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
     setup_hwaccel_for_pixfmt(avctx);
 
     ff_mpv_idct_init(s);
-    if (ff_MPV_common_init(s) < 0)
+    if (ff_mpv_common_init(s) < 0)
         return -1;
     s1->mpeg_enc_ctx_allocated = 1;
 
@@ -2780,7 +2780,7 @@ static av_cold int mpeg_decode_end(AVCodecContext *avctx)
     Mpeg1Context *s = avctx->priv_data;
 
     if (s->mpeg_enc_ctx_allocated)
-        ff_MPV_common_end(&s->mpeg_enc_ctx);
+        ff_mpv_common_end(&s->mpeg_enc_ctx);
     av_freep(&s->a53_caption);
     return 0;
 }
diff --git a/libavcodec/mpeg12enc.c b/libavcodec/mpeg12enc.c
index 0dbcda86bfd4e5ea7458833e7c628b2fe83ab822..6275a83667d4b7a3f092d9599565e7e92ade39c3 100644
--- a/libavcodec/mpeg12enc.c
+++ b/libavcodec/mpeg12enc.c
@@ -147,7 +147,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
     if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && avctx->height > 2800)
         avctx->thread_count = 1;
 
-    if (ff_MPV_encode_init(avctx) < 0)
+    if (ff_mpv_encode_init(avctx) < 0)
         return -1;
 
     if (find_frame_rate_index(s) < 0) {
@@ -1149,8 +1149,8 @@ AVCodec ff_mpeg1video_encoder = {
     .id                   = AV_CODEC_ID_MPEG1VIDEO,
     .priv_data_size       = sizeof(MpegEncContext),
     .init                 = encode_init,
-    .encode2              = ff_MPV_encode_picture,
-    .close                = ff_MPV_encode_end,
+    .encode2              = ff_mpv_encode_picture,
+    .close                = ff_mpv_encode_end,
     .supported_framerates = ff_mpeg12_frame_rate_tab + 1,
     .pix_fmts             = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
                                                            AV_PIX_FMT_NONE },
@@ -1165,8 +1165,8 @@ AVCodec ff_mpeg2video_encoder = {
     .id                   = AV_CODEC_ID_MPEG2VIDEO,
     .priv_data_size       = sizeof(MpegEncContext),
     .init                 = encode_init,
-    .encode2              = ff_MPV_encode_picture,
-    .close                = ff_MPV_encode_end,
+    .encode2              = ff_mpv_encode_picture,
+    .close                = ff_mpv_encode_end,
     .supported_framerates = ff_mpeg2_frame_rate_tab,
     .pix_fmts             = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
                                                            AV_PIX_FMT_YUV422P,
diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c
index 6a3d7e73310df4138cdb260857deae194fd8cf7f..5751432a3164c39bf2d1ceeec90584afe3e87da2 100644
--- a/libavcodec/mpeg4videoenc.c
+++ b/libavcodec/mpeg4videoenc.c
@@ -1300,7 +1300,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
         return AVERROR(EINVAL);
     }
 
-    if ((ret = ff_MPV_encode_init(avctx)) < 0)
+    if ((ret = ff_mpv_encode_init(avctx)) < 0)
         return ret;
 
     if (!done) {
@@ -1414,8 +1414,8 @@ AVCodec ff_mpeg4_encoder = {
     .id             = AV_CODEC_ID_MPEG4,
     .priv_data_size = sizeof(MpegEncContext),
     .init           = encode_init,
-    .encode2        = ff_MPV_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .encode2        = ff_mpv_encode_picture,
+    .close          = ff_mpv_encode_end,
     .pix_fmts       = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
     .capabilities   = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
     .priv_class     = &mpeg4enc_class,
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index f6b9a0409499d0b8df36f7b3838ecf03217d2d83..119aae2e82be31c6a1b6302d903650864f378146 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -361,7 +361,7 @@ static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
 
     if (ref)
         av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
-    ff_MPV_decode_mb(s, s->block);
+    ff_mpv_decode_mb(s, s->block);
 }
 
 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
@@ -409,16 +409,16 @@ static av_cold int dct_init(MpegEncContext *s)
     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
 
     if (HAVE_INTRINSICS_NEON)
-        ff_MPV_common_init_neon(s);
+        ff_mpv_common_init_neon(s);
 
     if (ARCH_ALPHA)
-        ff_MPV_common_init_axp(s);
+        ff_mpv_common_init_axp(s);
     if (ARCH_ARM)
-        ff_MPV_common_init_arm(s);
+        ff_mpv_common_init_arm(s);
     if (ARCH_PPC)
-        ff_MPV_common_init_ppc(s);
+        ff_mpv_common_init_ppc(s);
     if (ARCH_X86)
-        ff_MPV_common_init_x86(s);
+        ff_mpv_common_init_x86(s);
 
     return 0;
 }
@@ -865,7 +865,7 @@ static int init_duplicate_context(MpegEncContext *s)
 
     return 0;
 fail:
-    return -1; // free() through ff_MPV_common_end()
+    return -1; // free() through ff_mpv_common_end()
 }
 
 static void free_duplicate_context(MpegEncContext *s)
@@ -964,7 +964,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
 //             s->picture_range_start  += MAX_PICTURE_COUNT;
 //             s->picture_range_end    += MAX_PICTURE_COUNT;
             ff_mpv_idct_init(s);
-            if((ret = ff_MPV_common_init(s)) < 0){
+            if((ret = ff_mpv_common_init(s)) < 0){
                 memset(s, 0, sizeof(MpegEncContext));
                 s->avctx = dst;
                 return ret;
@@ -976,7 +976,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
         s->context_reinit = 0;
         s->height = s1->height;
         s->width  = s1->width;
-        if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
+        if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
             return ret;
     }
 
@@ -1079,7 +1079,7 @@ do {\
  * The changed fields will not depend upon the
  * prior state of the MpegEncContext.
  */
-void ff_MPV_common_defaults(MpegEncContext *s)
+void ff_mpv_common_defaults(MpegEncContext *s)
 {
     s->y_dc_scale_table      =
     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
@@ -1102,9 +1102,9 @@ void ff_MPV_common_defaults(MpegEncContext *s)
  * the changed fields will not depend upon
  * the prior state of the MpegEncContext.
  */
-void ff_MPV_decode_defaults(MpegEncContext *s)
+void ff_mpv_decode_defaults(MpegEncContext *s)
 {
-    ff_MPV_common_defaults(s);
+    ff_mpv_common_defaults(s);
 }
 
 static int init_er(MpegEncContext *s)
@@ -1271,7 +1271,7 @@ fail:
  * init common structure for both encoder and decoder.
  * this assumes that some variables like width/height are already set
  */
-av_cold int ff_MPV_common_init(MpegEncContext *s)
+av_cold int ff_mpv_common_init(MpegEncContext *s)
 {
     int i;
     int nb_slices = (HAVE_THREADS &&
@@ -1380,7 +1380,7 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
 
     return 0;
  fail:
-    ff_MPV_common_end(s);
+    ff_mpv_common_end(s);
     return -1;
 }
 
@@ -1440,7 +1440,7 @@ static int free_context_frame(MpegEncContext *s)
     return 0;
 }
 
-int ff_MPV_common_frame_size_change(MpegEncContext *s)
+int ff_mpv_common_frame_size_change(MpegEncContext *s)
 {
     int i, err = 0;
 
@@ -1509,12 +1509,12 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
 
     return 0;
  fail:
-    ff_MPV_common_end(s);
+    ff_mpv_common_end(s);
     return err;
 }
 
 /* init common structure for both encoder and decoder */
-void ff_MPV_common_end(MpegEncContext *s)
+void ff_mpv_common_end(MpegEncContext *s)
 {
     int i;
 
@@ -1745,7 +1745,7 @@ static void gray_frame(AVFrame *frame)
  * generic function called after decoding
  * the header and before a frame is decoded.
  */
-int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
+int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
 {
     int i, ret;
     Picture *pic;
@@ -1975,7 +1975,7 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
 }
 
 /* called after a frame has been decoded. */
-void ff_MPV_frame_end(MpegEncContext *s)
+void ff_mpv_frame_end(MpegEncContext *s)
 {
     emms_c();
 
@@ -2804,7 +2804,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
 /**
  * find the lowest MB row referenced in the MVs
  */
-int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
+int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
 {
     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
     int my, off, i, mvs;
@@ -2910,7 +2910,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s)
    s->interlaced_dct : true if interlaced dct used (mpeg2)
  */
 static av_always_inline
-void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
+void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
                             int lowres_flag, int is_mpeg12)
 {
     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
@@ -2999,12 +2999,12 @@ void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
                     if (s->mv_dir & MV_DIR_FORWARD) {
                         ff_thread_await_progress(&s->last_picture_ptr->tf,
-                                                 ff_MPV_lowest_referenced_row(s, 0),
+                                                 ff_mpv_lowest_referenced_row(s, 0),
                                                  0);
                     }
                     if (s->mv_dir & MV_DIR_BACKWARD) {
                         ff_thread_await_progress(&s->next_picture_ptr->tf,
-                                                 ff_MPV_lowest_referenced_row(s, 1),
+                                                 ff_mpv_lowest_referenced_row(s, 1),
                                                  0);
                     }
                 }
@@ -3027,12 +3027,12 @@ void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
                         op_pix = s->hdsp.put_no_rnd_pixels_tab;
                     }
                     if (s->mv_dir & MV_DIR_FORWARD) {
-                        ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
+                        ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
                         op_pix = s->hdsp.avg_pixels_tab;
                         op_qpix= s->me.qpel_avg;
                     }
                     if (s->mv_dir & MV_DIR_BACKWARD) {
-                        ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
+                        ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
                     }
                 }
             }
@@ -3156,15 +3156,16 @@ skip_idct:
     }
 }
 
-void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
+void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
+{
 #if !CONFIG_SMALL
     if(s->out_format == FMT_MPEG1) {
-        if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
-        else                 MPV_decode_mb_internal(s, block, 0, 1);
+        if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 1);
+        else                 mpv_decode_mb_internal(s, block, 0, 1);
     } else
 #endif
-    if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
-    else                  MPV_decode_mb_internal(s, block, 0, 0);
+    if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 0);
+    else                  mpv_decode_mb_internal(s, block, 0, 0);
 }
 
 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
@@ -3280,7 +3281,7 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
 }
 
-void ff_MPV_report_decode_progress(MpegEncContext *s)
+void ff_mpv_report_decode_progress(MpegEncContext *s)
 {
     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
         ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index 6a27a557238201c683bb37a8d5f5417dd24d2b05..fa20665f8935c903e7ea4b9a1312a5099442a5a6 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -711,25 +711,36 @@ static const AVClass name ## _class = {\
  * and decoding).  The changed fields will not depend upon the prior
  * state of the MpegEncContext.
  */
-void ff_MPV_common_defaults(MpegEncContext *s);
-
-void ff_MPV_decode_defaults(MpegEncContext *s);
-int ff_MPV_common_init(MpegEncContext *s);
-int ff_MPV_common_frame_size_change(MpegEncContext *s);
-void ff_MPV_common_end(MpegEncContext *s);
-void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]);
-int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx);
-void ff_MPV_frame_end(MpegEncContext *s);
-int ff_MPV_encode_init(AVCodecContext *avctx);
-int ff_MPV_encode_end(AVCodecContext *avctx);
-int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
-                          const AVFrame *frame, int *got_packet);
+void ff_mpv_common_defaults(MpegEncContext *s);
+
 void ff_dct_encode_init_x86(MpegEncContext *s);
-void ff_MPV_common_init_x86(MpegEncContext *s);
-void ff_MPV_common_init_axp(MpegEncContext *s);
-void ff_MPV_common_init_arm(MpegEncContext *s);
-void ff_MPV_common_init_neon(MpegEncContext *s);
-void ff_MPV_common_init_ppc(MpegEncContext *s);
+
+int ff_mpv_common_init(MpegEncContext *s);
+void ff_mpv_common_init_arm(MpegEncContext *s);
+void ff_mpv_common_init_axp(MpegEncContext *s);
+void ff_mpv_common_init_neon(MpegEncContext *s);
+void ff_mpv_common_init_ppc(MpegEncContext *s);
+void ff_mpv_common_init_x86(MpegEncContext *s);
+
+int ff_mpv_common_frame_size_change(MpegEncContext *s);
+void ff_mpv_common_end(MpegEncContext *s);
+
+void ff_mpv_decode_defaults(MpegEncContext *s);
+void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64]);
+void ff_mpv_report_decode_progress(MpegEncContext *s);
+
+int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx);
+void ff_mpv_frame_end(MpegEncContext *s);
+
+int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir);
+
+int ff_mpv_encode_init(AVCodecContext *avctx);
+void ff_mpv_encode_init_x86(MpegEncContext *s);
+
+int ff_mpv_encode_end(AVCodecContext *avctx);
+int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
+                          const AVFrame *frame, int *got_packet);
+
 void ff_clean_intra_table_entries(MpegEncContext *s);
 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h);
 void ff_mpeg_flush(AVCodecContext *avctx);
@@ -746,8 +757,6 @@ void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix);
 int ff_find_unused_picture(MpegEncContext *s, int shared);
 void ff_denoise_dct(MpegEncContext *s, int16_t *block);
 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src);
-int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir);
-void ff_MPV_report_decode_progress(MpegEncContext *s);
 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src);
 void ff_set_qscale(MpegEncContext * s, int qscale);
 
@@ -759,7 +768,7 @@ int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int
 
 void ff_init_block_index(MpegEncContext *s);
 
-void ff_MPV_motion(MpegEncContext *s,
+void ff_mpv_motion(MpegEncContext *s,
                    uint8_t *dest_y, uint8_t *dest_cb,
                    uint8_t *dest_cr, int dir,
                    uint8_t **ref_picture,
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index 774e235dd414b2b88b8ac8fab959c9b109ec095a..ffcd9da36ca089d3d3e3dfcafe13f7fbba093bfe 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -218,10 +218,10 @@ static void update_duplicate_context_after_me(MpegEncContext *dst,
  * Set the given MpegEncContext to defaults for encoding.
  * the changed fields will not depend upon the prior state of the MpegEncContext.
  */
-static void MPV_encode_defaults(MpegEncContext *s)
+static void mpv_encode_defaults(MpegEncContext *s)
 {
     int i;
-    ff_MPV_common_defaults(s);
+    ff_mpv_common_defaults(s);
 
     for (i = -16; i < 16; i++) {
         default_fcode_tab[i + MAX_MV] = 1;
@@ -251,12 +251,12 @@ av_cold int ff_dct_encode_init(MpegEncContext *s) {
 }
 
 /* init video encoder */
-av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
+av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
 {
     MpegEncContext *s = avctx->priv_data;
     int i, ret, format_supported;
 
-    MPV_encode_defaults(s);
+    mpv_encode_defaults(s);
 
     switch (avctx->codec_id) {
     case AV_CODEC_ID_MPEG2VIDEO:
@@ -838,7 +838,7 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
 
     /* init */
     ff_mpv_idct_init(s);
-    if (ff_MPV_common_init(s) < 0)
+    if (ff_mpv_common_init(s) < 0)
         return -1;
 
     ff_fdctdsp_init(&s->fdsp, avctx);
@@ -966,18 +966,18 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
 
     return 0;
 fail:
-    ff_MPV_encode_end(avctx);
+    ff_mpv_encode_end(avctx);
     return AVERROR_UNKNOWN;
 }
 
-av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
+av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
 {
     MpegEncContext *s = avctx->priv_data;
     int i;
 
     ff_rate_control_uninit(s);
 
-    ff_MPV_common_end(s);
+    ff_mpv_common_end(s);
     if (CONFIG_MJPEG_ENCODER &&
         s->out_format == FMT_MJPEG)
         ff_mjpeg_encode_close(s);
@@ -1681,7 +1681,7 @@ static int frame_start(MpegEncContext *s)
     return 0;
 }
 
-int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
+int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
                           const AVFrame *pic_arg, int *got_packet)
 {
     MpegEncContext *s = avctx->priv_data;
@@ -2159,14 +2159,14 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
         }
 
         if (s->mv_dir & MV_DIR_FORWARD) {
-            ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
+            ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
                           s->last_picture.f->data,
                           op_pix, op_qpix);
             op_pix  = s->hdsp.avg_pixels_tab;
             op_qpix = s->qdsp.avg_qpel_pixels_tab;
         }
         if (s->mv_dir & MV_DIR_BACKWARD) {
-            ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
+            ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
                           s->next_picture.f->data,
                           op_pix, op_qpix);
         }
@@ -2496,7 +2496,7 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE
     }
 
     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
-        ff_MPV_decode_mb(s, s->block);
+        ff_mpv_decode_mb(s, s->block);
 
         score *= s->lambda2;
         score += sse_mb(s) << FF_LAMBDA_SHIFT;
@@ -3156,7 +3156,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
                 }
 
                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
-                    ff_MPV_decode_mb(s, s->block);
+                    ff_mpv_decode_mb(s, s->block);
             } else {
                 int motion_x = 0, motion_y = 0;
                 s->mv_type=MV_TYPE_16X16;
@@ -3275,7 +3275,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
                     ff_h263_update_motion_val(s);
 
-                ff_MPV_decode_mb(s, s->block);
+                ff_mpv_decode_mb(s, s->block);
             }
 
             /* clean the MV table in IPS frames for direct mode in B frames */
@@ -4499,9 +4499,9 @@ AVCodec ff_h263_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_H263,
     .priv_data_size = sizeof(MpegEncContext),
-    .init           = ff_MPV_encode_init,
-    .encode2        = ff_MPV_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .init           = ff_mpv_encode_init,
+    .encode2        = ff_mpv_encode_picture,
+    .close          = ff_mpv_encode_end,
     .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
     .priv_class     = &h263_class,
 };
@@ -4527,9 +4527,9 @@ AVCodec ff_h263p_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_H263P,
     .priv_data_size = sizeof(MpegEncContext),
-    .init           = ff_MPV_encode_init,
-    .encode2        = ff_MPV_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .init           = ff_mpv_encode_init,
+    .encode2        = ff_mpv_encode_picture,
+    .close          = ff_mpv_encode_end,
     .capabilities   = CODEC_CAP_SLICE_THREADS,
     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
     .priv_class     = &h263p_class,
@@ -4543,9 +4543,9 @@ AVCodec ff_msmpeg4v2_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_MSMPEG4V2,
     .priv_data_size = sizeof(MpegEncContext),
-    .init           = ff_MPV_encode_init,
-    .encode2        = ff_MPV_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .init           = ff_mpv_encode_init,
+    .encode2        = ff_mpv_encode_picture,
+    .close          = ff_mpv_encode_end,
     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
     .priv_class     = &msmpeg4v2_class,
 };
@@ -4558,9 +4558,9 @@ AVCodec ff_msmpeg4v3_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_MSMPEG4V3,
     .priv_data_size = sizeof(MpegEncContext),
-    .init           = ff_MPV_encode_init,
-    .encode2        = ff_MPV_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .init           = ff_mpv_encode_init,
+    .encode2        = ff_mpv_encode_picture,
+    .close          = ff_mpv_encode_end,
     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
     .priv_class     = &msmpeg4v3_class,
 };
@@ -4573,9 +4573,9 @@ AVCodec ff_wmv1_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_WMV1,
     .priv_data_size = sizeof(MpegEncContext),
-    .init           = ff_MPV_encode_init,
-    .encode2        = ff_MPV_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .init           = ff_mpv_encode_init,
+    .encode2        = ff_mpv_encode_picture,
+    .close          = ff_mpv_encode_end,
     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
     .priv_class     = &wmv1_class,
 };
diff --git a/libavcodec/mpegvideo_motion.c b/libavcodec/mpegvideo_motion.c
index 0d3ba08deed3f49969e81f49250c7bb1f8a75093..e7a585dd5f9a1d523ede324dd9ba8f9c8f1a8bc4 100644
--- a/libavcodec/mpegvideo_motion.c
+++ b/libavcodec/mpegvideo_motion.c
@@ -827,7 +827,7 @@ static inline void apply_8x8(MpegEncContext *s,
  * @param qpix_op qpel motion compensation function (average or put normally)
  * the motion vectors are taken from s->mv and the MV type from s->mv_type
  */
-static av_always_inline void MPV_motion_internal(MpegEncContext *s,
+static av_always_inline void mpv_motion_internal(MpegEncContext *s,
                                                  uint8_t *dest_y,
                                                  uint8_t *dest_cb,
                                                  uint8_t *dest_cr,
@@ -969,7 +969,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
     }
 }
 
-void ff_MPV_motion(MpegEncContext *s,
+void ff_mpv_motion(MpegEncContext *s,
                    uint8_t *dest_y, uint8_t *dest_cb,
                    uint8_t *dest_cr, int dir,
                    uint8_t **ref_picture,
@@ -978,10 +978,10 @@ void ff_MPV_motion(MpegEncContext *s,
 {
 #if !CONFIG_SMALL
     if (s->out_format == FMT_MPEG1)
-        MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
+        mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
                             ref_picture, pix_op, qpix_op, 1);
     else
 #endif
-        MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
+        mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
                             ref_picture, pix_op, qpix_op, 0);
 }
diff --git a/libavcodec/mpegvideo_xvmc.c b/libavcodec/mpegvideo_xvmc.c
index 7d7f7ecb9fd2d28d9fde96523fb3efdb4d62a5c0..d4631cba1e09a851ed53c723d28cba2d10187b45 100644
--- a/libavcodec/mpegvideo_xvmc.c
+++ b/libavcodec/mpegvideo_xvmc.c
@@ -182,7 +182,7 @@ static void ff_xvmc_decode_mb(struct MpegEncContext *s)
         return;
     }
 
-    // from MPV_decode_mb(), update DC predictors for P macroblocks
+    // from ff_mpv_decode_mb(), update DC predictors for P macroblocks
     if (!s->mb_intra) {
         s->last_dc[0] =
         s->last_dc[1] =
diff --git a/libavcodec/mss2.c b/libavcodec/mss2.c
index 35c5d2722df62ff28e5c0c05f941f028b3bb9790..4d53f8ab8f47c6c4894984240344416f8ad53f9c 100644
--- a/libavcodec/mss2.c
+++ b/libavcodec/mss2.c
@@ -397,8 +397,8 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
 
     avctx->pix_fmt = AV_PIX_FMT_YUV420P;
 
-    if ((ret = ff_MPV_frame_start(s, avctx)) < 0) {
-        av_log(v->s.avctx, AV_LOG_ERROR, "ff_MPV_frame_start error\n");
+    if ((ret = ff_mpv_frame_start(s, avctx)) < 0) {
+        av_log(v->s.avctx, AV_LOG_ERROR, "ff_mpv_frame_start error\n");
         avctx->pix_fmt = AV_PIX_FMT_RGB24;
         return ret;
     }
@@ -418,7 +418,7 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
 
     ff_er_frame_end(&s->er);
 
-    ff_MPV_frame_end(s);
+    ff_mpv_frame_end(s);
 
     f = s->current_picture.f;
 
diff --git a/libavcodec/neon/mpegvideo.c b/libavcodec/neon/mpegvideo.c
index f56972509fede86e6b8bba97be9d5b2027861476..a96ae519f533d36a998f8ad8bcf7678f0996df48 100644
--- a/libavcodec/neon/mpegvideo.c
+++ b/libavcodec/neon/mpegvideo.c
@@ -123,7 +123,7 @@ static void dct_unquantize_h263_intra_neon(MpegEncContext *s, int16_t *block,
 }
 
 
-av_cold void ff_MPV_common_init_neon(MpegEncContext *s)
+av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
 {
     int cpu_flags = av_get_cpu_flags();
 
diff --git a/libavcodec/ppc/mpegvideo_altivec.c b/libavcodec/ppc/mpegvideo_altivec.c
index 06eed2f200feee8c51c89fc07635818b21eaf0cd..ce53ae4b613b5ac001e003ccb67cde33b8f6fbfc 100644
--- a/libavcodec/ppc/mpegvideo_altivec.c
+++ b/libavcodec/ppc/mpegvideo_altivec.c
@@ -115,7 +115,7 @@ static void dct_unquantize_h263_altivec(MpegEncContext *s,
 
 #endif /* HAVE_ALTIVEC */
 
-av_cold void ff_MPV_common_init_ppc(MpegEncContext *s)
+av_cold void ff_mpv_common_init_ppc(MpegEncContext *s)
 {
 #if HAVE_ALTIVEC
     if (!PPC_ALTIVEC(av_get_cpu_flags()))
diff --git a/libavcodec/rv10.c b/libavcodec/rv10.c
index 985644d7f7b4060568fe0c7eed791efcd4bc1a0a..7bec038a9bb2f10869c2959c173de93086e3635d 100644
--- a/libavcodec/rv10.c
+++ b/libavcodec/rv10.c
@@ -379,7 +379,7 @@ static int rv20_decode_picture_header(RVDecContext *rv)
                    "attempting to change resolution to %dx%d\n", new_w, new_h);
             if (av_image_check_size(new_w, new_h, 0, s->avctx) < 0)
                 return AVERROR_INVALIDDATA;
-            ff_MPV_common_end(s);
+            ff_mpv_common_end(s);
 
             // attempt to keep aspect during typical resolution switches
             if (!old_aspect.num)
@@ -395,7 +395,7 @@ static int rv20_decode_picture_header(RVDecContext *rv)
 
             s->width  = new_w;
             s->height = new_h;
-            if ((ret = ff_MPV_common_init(s)) < 0)
+            if ((ret = ff_mpv_common_init(s)) < 0)
                 return ret;
         }
 
@@ -474,7 +474,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx)
                                    avctx->coded_height, 0, avctx)) < 0)
         return ret;
 
-    ff_MPV_decode_defaults(s);
+    ff_mpv_decode_defaults(s);
 
     s->avctx       = avctx;
     s->out_format  = FMT_H263;
@@ -518,7 +518,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx)
     avctx->pix_fmt = AV_PIX_FMT_YUV420P;
 
     ff_mpv_idct_init(s);
-    if ((ret = ff_MPV_common_init(s)) < 0)
+    if ((ret = ff_mpv_common_init(s)) < 0)
         return ret;
 
     ff_h263dsp_init(&s->h263dsp);
@@ -542,7 +542,7 @@ static av_cold int rv10_decode_end(AVCodecContext *avctx)
 {
     MpegEncContext *s = avctx->priv_data;
 
-    ff_MPV_common_end(s);
+    ff_mpv_common_end(s);
     return 0;
 }
 
@@ -581,10 +581,10 @@ static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf,
         // FIXME write parser so we always have complete frames?
         if (s->current_picture_ptr) {
             ff_er_frame_end(&s->er);
-            ff_MPV_frame_end(s);
+            ff_mpv_frame_end(s);
             s->mb_x = s->mb_y = s->resync_mb_x = s->resync_mb_y = 0;
         }
-        if ((ret = ff_MPV_frame_start(s, avctx)) < 0)
+        if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
             return ret;
         ff_mpeg_er_frame_start(s);
     } else {
@@ -667,7 +667,7 @@ static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf,
         }
         if (s->pict_type != AV_PICTURE_TYPE_B)
             ff_h263_update_motion_val(s);
-        ff_MPV_decode_mb(s, s->block);
+        ff_mpv_decode_mb(s, s->block);
         if (s->loop_filter)
             ff_h263_loop_filter(s);
 
@@ -762,7 +762,7 @@ static int rv10_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
 
     if (s->current_picture_ptr != NULL && s->mb_y >= s->mb_height) {
         ff_er_frame_end(&s->er);
-        ff_MPV_frame_end(s);
+        ff_mpv_frame_end(s);
 
         if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
             if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
diff --git a/libavcodec/rv10enc.c b/libavcodec/rv10enc.c
index 1f85743b7bbcd84a4a808d582a024db3092ba891..25411322a6632073c1a8ef13b766252a316494be 100644
--- a/libavcodec/rv10enc.c
+++ b/libavcodec/rv10enc.c
@@ -64,9 +64,9 @@ AVCodec ff_rv10_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_RV10,
     .priv_data_size = sizeof(MpegEncContext),
-    .init           = ff_MPV_encode_init,
-    .encode2        = ff_MPV_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .init           = ff_mpv_encode_init,
+    .encode2        = ff_mpv_encode_picture,
+    .close          = ff_mpv_encode_end,
     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
     .priv_class     = &rv10_class,
 };
diff --git a/libavcodec/rv20enc.c b/libavcodec/rv20enc.c
index b9431166c7447f08ec1ab20b1971b8ef4b8f62fb..03275c7654369b5357b6196b01676adfeefdccfd 100644
--- a/libavcodec/rv20enc.c
+++ b/libavcodec/rv20enc.c
@@ -65,9 +65,9 @@ AVCodec ff_rv20_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_RV20,
     .priv_data_size = sizeof(MpegEncContext),
-    .init           = ff_MPV_encode_init,
-    .encode2        = ff_MPV_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .init           = ff_mpv_encode_init,
+    .encode2        = ff_mpv_encode_picture,
+    .close          = ff_mpv_encode_end,
     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
     .priv_class     = &rv20_class,
 };
diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c
index 19e4ce024b92bdd98e8ed36a470650de7d993053..d8c5510602435fd89bb6fd7cf6bb46fbbccc83d3 100644
--- a/libavcodec/rv34.c
+++ b/libavcodec/rv34.c
@@ -1489,7 +1489,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
     MpegEncContext *s = &r->s;
     int ret;
 
-    ff_MPV_decode_defaults(s);
+    ff_mpv_decode_defaults(s);
     s->avctx      = avctx;
     s->out_format = FMT_H263;
     s->codec_id   = avctx->codec_id;
@@ -1503,7 +1503,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
     s->low_delay = 0;
 
     ff_mpv_idct_init(s);
-    if ((ret = ff_MPV_common_init(s)) < 0)
+    if ((ret = ff_mpv_common_init(s)) < 0)
         return ret;
 
     ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
@@ -1518,7 +1518,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
 #endif
 
     if ((ret = rv34_decoder_alloc(r)) < 0) {
-        ff_MPV_common_end(&r->s);
+        ff_mpv_common_end(&r->s);
         return ret;
     }
 
@@ -1540,10 +1540,10 @@ int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx)
     if (avctx->internal->is_copy) {
         r->tmp_b_block_base = NULL;
         ff_mpv_idct_init(&r->s);
-        if ((err = ff_MPV_common_init(&r->s)) < 0)
+        if ((err = ff_mpv_common_init(&r->s)) < 0)
             return err;
         if ((err = rv34_decoder_alloc(r)) < 0) {
-            ff_MPV_common_end(&r->s);
+            ff_mpv_common_end(&r->s);
             return err;
         }
     }
@@ -1563,7 +1563,7 @@ int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecConte
     if (s->height != s1->height || s->width != s1->width) {
         s->height = s1->height;
         s->width  = s1->width;
-        if ((err = ff_MPV_common_frame_size_change(s)) < 0)
+        if ((err = ff_mpv_common_frame_size_change(s)) < 0)
             return err;
         if ((err = rv34_decoder_realloc(r)) < 0)
             return err;
@@ -1594,7 +1594,7 @@ static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
     int got_picture = 0, ret;
 
     ff_er_frame_end(&s->er);
-    ff_MPV_frame_end(s);
+    ff_mpv_frame_end(s);
     s->mb_num_left = 0;
 
     if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
@@ -1691,7 +1691,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
             av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
                    s->mb_num_left);
             ff_er_frame_end(&s->er);
-            ff_MPV_frame_end(s);
+            ff_mpv_frame_end(s);
         }
 
         if (s->width != si.width || s->height != si.height) {
@@ -1713,13 +1713,13 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
             if (err < 0)
                 return err;
 
-            if ((err = ff_MPV_common_frame_size_change(s)) < 0)
+            if ((err = ff_mpv_common_frame_size_change(s)) < 0)
                 return err;
             if ((err = rv34_decoder_realloc(r)) < 0)
                 return err;
         }
         s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
-        if (ff_MPV_frame_start(s, s->avctx) < 0)
+        if (ff_mpv_frame_start(s, s->avctx) < 0)
             return -1;
         ff_mpeg_er_frame_start(s);
         if (!r->tmp_b_block_base) {
@@ -1824,7 +1824,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
             /* always mark the current frame as finished, frame-mt supports
              * only complete frames */
             ff_er_frame_end(&s->er);
-            ff_MPV_frame_end(s);
+            ff_mpv_frame_end(s);
             s->mb_num_left = 0;
             ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
             return AVERROR_INVALIDDATA;
@@ -1838,7 +1838,7 @@ av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
 {
     RV34DecContext *r = avctx->priv_data;
 
-    ff_MPV_common_end(&r->s);
+    ff_mpv_common_end(&r->s);
     rv34_decoder_free(r);
 
     return 0;
diff --git a/libavcodec/svq1enc.c b/libavcodec/svq1enc.c
index 95dcee44661ca49c58b944c525b1e9dc9efe4650..af18d38862530ec68addf77244dc3d76e47e3a98 100644
--- a/libavcodec/svq1enc.c
+++ b/libavcodec/svq1enc.c
@@ -489,7 +489,7 @@ static av_cold int svq1_encode_end(AVCodecContext *avctx)
                                   avctx->frame_number));
 
     s->m.mb_type = NULL;
-    ff_MPV_common_end(&s->m);
+    ff_mpv_common_end(&s->m);
 
     av_freep(&s->m.me.scratchpad);
     av_freep(&s->m.me.map);
@@ -539,7 +539,7 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx)
     s->avctx               = avctx;
     s->m.avctx             = avctx;
 
-    if ((ret = ff_MPV_common_init(&s->m)) < 0) {
+    if ((ret = ff_mpv_common_init(&s->m)) < 0) {
         svq1_encode_end(avctx);
         return ret;
     }
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index a73d6150b46dd47128029fd1f77c37e4cef81e45..e1d8ca6ed55c5f545d6510bbf229be4ff577606a 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -5777,7 +5777,7 @@ av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
         av_freep(&v->sr_rows[i >> 1][i & 1]);
     av_freep(&v->hrd_rate);
     av_freep(&v->hrd_buffer);
-    ff_MPV_common_end(&v->s);
+    ff_mpv_common_end(&v->s);
     av_freep(&v->mv_type_mb_plane);
     av_freep(&v->direct_mb_plane);
     av_freep(&v->forward_mb_plane);
@@ -5976,7 +5976,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
         if (ff_msmpeg4_decode_init(avctx) < 0)
             goto err;
         if (ff_vc1_decode_init_alloc_tables(v) < 0) {
-            ff_MPV_common_end(s);
+            ff_mpv_common_end(s);
             goto err;
         }
 
@@ -6040,7 +6040,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
             s->next_p_frame_damaged = 0;
     }
 
-    if (ff_MPV_frame_start(s, avctx) < 0) {
+    if (ff_mpv_frame_start(s, avctx) < 0) {
         goto err;
     }
 
@@ -6208,7 +6208,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
             ff_er_frame_end(&s->er);
     }
 
-    ff_MPV_frame_end(s);
+    ff_mpv_frame_end(s);
 
     if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
 image:
diff --git a/libavcodec/wmv2enc.c b/libavcodec/wmv2enc.c
index 869b3c7fe41d766d498a019cb26b51e9382b329e..5470cd06cb4448487fe6e81976911b2d94b24391 100644
--- a/libavcodec/wmv2enc.c
+++ b/libavcodec/wmv2enc.c
@@ -54,7 +54,7 @@ static int encode_ext_header(Wmv2Context *w){
 static av_cold int wmv2_encode_init(AVCodecContext *avctx){
     Wmv2Context * const w= avctx->priv_data;
 
-    if(ff_MPV_encode_init(avctx) < 0)
+    if (ff_mpv_encode_init(avctx) < 0)
         return -1;
 
     ff_wmv2_common_init(w);
@@ -226,7 +226,7 @@ AVCodec ff_wmv2_encoder = {
     .id             = AV_CODEC_ID_WMV2,
     .priv_data_size = sizeof(Wmv2Context),
     .init           = wmv2_encode_init,
-    .encode2        = ff_MPV_encode_picture,
-    .close          = ff_MPV_encode_end,
+    .encode2        = ff_mpv_encode_picture,
+    .close          = ff_mpv_encode_end,
     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
 };
diff --git a/libavcodec/x86/mpegvideo.c b/libavcodec/x86/mpegvideo.c
index 9353a82933882ec7ee0c6d44324b0094d714b3d9..b0028ce27618ba19f246a83a78401cf8f0b4a497 100644
--- a/libavcodec/x86/mpegvideo.c
+++ b/libavcodec/x86/mpegvideo.c
@@ -444,7 +444,7 @@ __asm__ volatile(
 
 #endif /* HAVE_MMX_INLINE */
 
-av_cold void ff_MPV_common_init_x86(MpegEncContext *s)
+av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
 {
 #if HAVE_MMX_INLINE
     int cpu_flags = av_get_cpu_flags();