diff --git a/libavcodec/i386/dsputil_mmx.c b/libavcodec/i386/dsputil_mmx.c index 3b4446a226d31a424fd48ff0cf8d5d2d5d07e1dd..5675828a4173336df499ff130791702561c9321d 100644 --- a/libavcodec/i386/dsputil_mmx.c +++ b/libavcodec/i386/dsputil_mmx.c @@ -2667,7 +2667,7 @@ static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){ #endif /* CONFIG_ENCODERS */ #define PREFETCH(name, op) \ -void name(void *mem, int stride, int h){\ +static void name(void *mem, int stride, int h){\ const uint8_t *p= mem;\ do{\ asm volatile(#op" %0" :: "m"(*p));\ @@ -3007,7 +3007,7 @@ static void vector_fmul_add_add_sse(float *dst, const float *src0, const float * ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step); } -void float_to_int16_3dnow(int16_t *dst, const float *src, int len){ +static void float_to_int16_3dnow(int16_t *dst, const float *src, int len){ // not bit-exact: pf2id uses different rounding than C and SSE int i; for(i=0; i<len; i+=4) { @@ -3022,7 +3022,7 @@ void float_to_int16_3dnow(int16_t *dst, const float *src, int len){ } asm volatile("femms"); } -void float_to_int16_sse(int16_t *dst, const float *src, int len){ +static void float_to_int16_sse(int16_t *dst, const float *src, int len){ int i; for(i=0; i<len; i+=4) { asm volatile( diff --git a/libavcodec/i386/fdct_mmx.c b/libavcodec/i386/fdct_mmx.c index ad7272bf5f535b036236a6c793a2b7b4799f3036..2ffbfecf64df62aa2053c398f78d4fa38bd2bcab 100644 --- a/libavcodec/i386/fdct_mmx.c +++ b/libavcodec/i386/fdct_mmx.c @@ -67,7 +67,7 @@ static const int64_t fdct_one_corr ATTR_ALIGN(8) = 0x0001000100010001LL; static const int32_t fdct_r_row[2] ATTR_ALIGN(8) = {RND_FRW_ROW, RND_FRW_ROW }; -struct +static struct { const int32_t fdct_r_row_sse2[4] ATTR_ALIGN(16); } fdct_r_row_sse2 ATTR_ALIGN(16)= @@ -150,7 +150,7 @@ static const int16_t tab_frw_01234567[] ATTR_ALIGN(8) = { // forward_dct coeff 29692, -12299, 26722, -31521, }; -struct +static struct { const int16_t tab_frw_01234567_sse2[256] ATTR_ALIGN(16); } tab_frw_01234567_sse2 ATTR_ALIGN(16) =