Newer
Older
"pcmpgtw %%mm0, %%mm6 \n\t"
"pxor %%mm6, %%mm0 \n\t"
"psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm1, %%mm6 \n\t"
"pxor %%mm6, %%mm1 \n\t"
"psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm2, %%mm6 \n\t"
"pxor %%mm6, %%mm2 \n\t"
"psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm3, %%mm6 \n\t"
"pxor %%mm6, %%mm3 \n\t"
"psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
#ifdef HAVE_MMX2
"pminsw %%mm2, %%mm0 \n\t"
"pminsw %%mm3, %%mm1 \n\t"
#else
"movq %%mm0, %%mm6 \n\t"
"psubusw %%mm2, %%mm6 \n\t"
"psubw %%mm6, %%mm0 \n\t"
"movq %%mm1, %%mm6 \n\t"
"psubusw %%mm3, %%mm6 \n\t"
"psubw %%mm6, %%mm1 \n\t"
#endif
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
"pxor %%mm6, %%mm4 \n\t"
"psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
"pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
"pxor %%mm7, %%mm5 \n\t"
"psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
// 100 opcodes
"movd %2, %%mm2 \n\t" // QP
"psllw $3, %%mm2 \n\t" // 8QP
"movq %%mm2, %%mm3 \n\t" // 8QP
"pcmpgtw %%mm4, %%mm2 \n\t"
"pcmpgtw %%mm5, %%mm3 \n\t"
"pand %%mm2, %%mm4 \n\t"
"pand %%mm3, %%mm5 \n\t"
"psubusw %%mm0, %%mm4 \n\t" // hd
"psubusw %%mm1, %%mm5 \n\t" // ld
"movq "MANGLE(w05)", %%mm2 \n\t" // 5
"pmullw %%mm2, %%mm4 \n\t"
"pmullw %%mm2, %%mm5 \n\t"
"movq "MANGLE(w20)", %%mm2 \n\t" // 32
"paddw %%mm2, %%mm4 \n\t"
"paddw %%mm2, %%mm5 \n\t"
"psrlw $6, %%mm4 \n\t"
"psrlw $6, %%mm5 \n\t"
"movq 16(%%ecx), %%mm0 \n\t" // L3 - L4
"movq 24(%%ecx), %%mm1 \n\t" // H3 - H4
"pxor %%mm2, %%mm2 \n\t"
"pxor %%mm3, %%mm3 \n\t"
"pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
"pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"psubw %%mm2, %%mm0 \n\t" // |L3-L4|
"psubw %%mm3, %%mm1 \n\t" // |H3-H4|
Michael Niedermayer
committed
"psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
"psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
"pxor %%mm6, %%mm2 \n\t"
"pxor %%mm7, %%mm3 \n\t"
"pand %%mm2, %%mm4 \n\t"
"pand %%mm3, %%mm5 \n\t"
#ifdef HAVE_MMX2
"pminsw %%mm0, %%mm4 \n\t"
"pminsw %%mm1, %%mm5 \n\t"
#else
"movq %%mm4, %%mm2 \n\t"
"psubusw %%mm0, %%mm2 \n\t"
"psubw %%mm2, %%mm4 \n\t"
"movq %%mm5, %%mm2 \n\t"
"psubusw %%mm1, %%mm2 \n\t"
"psubw %%mm2, %%mm5 \n\t"
#endif
"pxor %%mm6, %%mm4 \n\t"
"pxor %%mm7, %%mm5 \n\t"
"psubw %%mm6, %%mm4 \n\t"
"psubw %%mm7, %%mm5 \n\t"
"packsswb %%mm5, %%mm4 \n\t"
"movq (%0), %%mm0 \n\t"
"paddb %%mm4, %%mm0 \n\t"
"movq %%mm0, (%0) \n\t"
"movq (%0, %1), %%mm0 \n\t"
"psubb %%mm4, %%mm0 \n\t"
"movq %%mm0, (%0, %1) \n\t"
: "+r" (src)
: "r" (stride), "m" (c->pQPb)
: "%eax", "%ecx"
);
#else
const int l1= stride;
const int l2= stride + l1;
const int l3= stride + l2;
const int l4= stride + l3;
const int l5= stride + l4;
const int l6= stride + l5;
const int l7= stride + l6;
const int l8= stride + l7;
// const int l9= stride + l8;
for(x=0; x<BLOCK_SIZE; x++)
{
const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
{
const int q=(src[l4] - src[l5])/2;
const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
d= MAX(d, 0);
d= (5*d + 32) >> 6;
d*= SIGN(-middleEnergy);
if(q>0)
{
d= d<0 ? 0 : d;
d= d>q ? q : d;
}
else
{
d= d>0 ? 0 : d;
d= d<q ? q : d;
}
src[l4]-= d;
src[l5]+= d;
}
src++;
}
#endif
}
static inline void RENAME(dering)(uint8_t src[], int stride, PPContext *c)
"pxor %%mm6, %%mm6 \n\t"
"pcmpeqb %%mm7, %%mm7 \n\t"
"movq %2, %%mm0 \n\t"
"punpcklbw %%mm6, %%mm0 \n\t"
"psrlw $1, %%mm0 \n\t"
"psubw %%mm7, %%mm0 \n\t"
"packuswb %%mm0, %%mm0 \n\t"
"movq %%mm0, %3 \n\t"
"leal (%0, %1), %%eax \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
#define FIND_MIN_MAX(addr)\
"pminub %%mm0, %%mm7 \n\t"\
"pmaxub %%mm0, %%mm6 \n\t"
#else
#define FIND_MIN_MAX(addr)\
"movq " #addr ", %%mm0 \n\t"\
"movq %%mm7, %%mm1 \n\t"\
"psubusb %%mm0, %%mm6 \n\t"\
"paddb %%mm0, %%mm6 \n\t"\
FIND_MIN_MAX((%%eax))
FIND_MIN_MAX((%%eax, %1))
FIND_MIN_MAX((%%eax, %1, 2))
FIND_MIN_MAX((%0, %1, 4))
FIND_MIN_MAX((%%edx))
FIND_MIN_MAX((%%edx, %1))
FIND_MIN_MAX((%%edx, %1, 2))
"movq %%mm7, %%mm4 \n\t"
Michael Niedermayer
committed
"psrlq $8, %%mm7 \n\t"
#ifdef HAVE_MMX2
Michael Niedermayer
committed
"pshufw $0xF9, %%mm7, %%mm4 \n\t"
Michael Niedermayer
committed
"pshufw $0xFE, %%mm7, %%mm4 \n\t"
Michael Niedermayer
committed
#else
"movq %%mm7, %%mm1 \n\t"
"psubusb %%mm4, %%mm1 \n\t"
"psubb %%mm1, %%mm7 \n\t"
"movq %%mm7, %%mm4 \n\t"
"psrlq $16, %%mm7 \n\t"
"movq %%mm7, %%mm1 \n\t"
"psubusb %%mm4, %%mm1 \n\t"
"psubb %%mm1, %%mm7 \n\t"
"movq %%mm7, %%mm4 \n\t"
Michael Niedermayer
committed
"psrlq $32, %%mm7 \n\t"
"movq %%mm7, %%mm1 \n\t"
"psubusb %%mm4, %%mm1 \n\t"
"psubb %%mm1, %%mm7 \n\t"
Michael Niedermayer
committed
#endif
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
"movq %%mm6, %%mm4 \n\t"
"psrlq $8, %%mm6 \n\t"
#ifdef HAVE_MMX2
"pmaxub %%mm4, %%mm6 \n\t" // max of pixels
"pshufw $0xF9, %%mm6, %%mm4 \n\t"
"pmaxub %%mm4, %%mm6 \n\t"
"pshufw $0xFE, %%mm6, %%mm4 \n\t"
"pmaxub %%mm4, %%mm6 \n\t"
#else
"psubusb %%mm4, %%mm6 \n\t"
"paddb %%mm4, %%mm6 \n\t"
"movq %%mm6, %%mm4 \n\t"
"psrlq $16, %%mm6 \n\t"
"psubusb %%mm4, %%mm6 \n\t"
"paddb %%mm4, %%mm6 \n\t"
"movq %%mm6, %%mm4 \n\t"
"psrlq $32, %%mm6 \n\t"
"psubusb %%mm4, %%mm6 \n\t"
"paddb %%mm4, %%mm6 \n\t"
#endif
"movq %%mm6, %%mm0 \n\t" // max
"psubb %%mm7, %%mm6 \n\t" // max - min
"movd %%mm6, %%ecx \n\t"
"cmpb "MANGLE(deringThreshold)", %%cl \n\t"
"leal -24(%%esp), %%ecx \n\t"
"andl $0xFFFFFFF8, %%ecx \n\t"
Michael Niedermayer
committed
"punpcklbw %%mm7, %%mm7 \n\t"
"punpcklbw %%mm7, %%mm7 \n\t"
"punpcklbw %%mm7, %%mm7 \n\t"
"movq (%0), %%mm0 \n\t" // L10
"movq %%mm0, %%mm1 \n\t" // L10
"movq %%mm0, %%mm2 \n\t" // L10
"psllq $8, %%mm1 \n\t"
"psrlq $8, %%mm2 \n\t"
"movd -4(%0), %%mm3 \n\t"
"movd 8(%0), %%mm4 \n\t"
"psrlq $24, %%mm3 \n\t"
"psllq $56, %%mm4 \n\t"
"por %%mm3, %%mm1 \n\t" // L00
"por %%mm4, %%mm2 \n\t" // L20
"movq %%mm1, %%mm3 \n\t" // L00
PAVGB(%%mm2, %%mm1) // (L20 + L00)/2
PAVGB(%%mm0, %%mm1) // (L20 + L00 + 2L10)/4
"psubusb %%mm7, %%mm0 \n\t"
"psubusb %%mm7, %%mm2 \n\t"
"psubusb %%mm7, %%mm3 \n\t"
"pcmpeqb "MANGLE(b00)", %%mm0 \n\t" // L10 > a ? 0 : -1
"pcmpeqb "MANGLE(b00)", %%mm2 \n\t" // L20 > a ? 0 : -1
"pcmpeqb "MANGLE(b00)", %%mm3 \n\t" // L00 > a ? 0 : -1
"paddb %%mm2, %%mm0 \n\t"
"paddb %%mm3, %%mm0 \n\t"
"movq (%%eax), %%mm2 \n\t" // L11
"movq %%mm2, %%mm3 \n\t" // L11
"movq %%mm2, %%mm4 \n\t" // L11
"psllq $8, %%mm3 \n\t"
"psrlq $8, %%mm4 \n\t"
"movd -4(%%eax), %%mm5 \n\t"
"movd 8(%%eax), %%mm6 \n\t"
"psrlq $24, %%mm5 \n\t"
"psllq $56, %%mm6 \n\t"
"por %%mm5, %%mm3 \n\t" // L01
"por %%mm6, %%mm4 \n\t" // L21
"movq %%mm3, %%mm5 \n\t" // L01
PAVGB(%%mm4, %%mm3) // (L21 + L01)/2
PAVGB(%%mm2, %%mm3) // (L21 + L01 + 2L11)/4
"psubusb %%mm7, %%mm2 \n\t"
"psubusb %%mm7, %%mm4 \n\t"
"psubusb %%mm7, %%mm5 \n\t"
"pcmpeqb "MANGLE(b00)", %%mm2 \n\t" // L11 > a ? 0 : -1
"pcmpeqb "MANGLE(b00)", %%mm4 \n\t" // L21 > a ? 0 : -1
"pcmpeqb "MANGLE(b00)", %%mm5 \n\t" // L01 > a ? 0 : -1
"paddb %%mm4, %%mm2 \n\t"
"paddb %%mm5, %%mm2 \n\t"
// 0, 2, 3, 1
#define DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1) \
"movq " #src ", " #sx " \n\t" /* src[0] */\
"movq " #sx ", " #lx " \n\t" /* src[0] */\
"movq " #sx ", " #t0 " \n\t" /* src[0] */\
"psllq $8, " #lx " \n\t"\
"psrlq $8, " #t0 " \n\t"\
"movd -4" #src ", " #t1 " \n\t"\
"psrlq $24, " #t1 " \n\t"\
"por " #t1 ", " #lx " \n\t" /* src[-1] */\
"movd 8" #src ", " #t1 " \n\t"\
"psllq $56, " #t1 " \n\t"\
"por " #t1 ", " #t0 " \n\t" /* src[+1] */\
"movq " #lx ", " #t1 " \n\t" /* src[-1] */\
PAVGB(t0, lx) /* (src[-1] + src[+1])/2 */\
PAVGB(sx, lx) /* (src[-1] + 2src[0] + src[+1])/4 */\
"movq " #lx ", 8(%%ecx) \n\t"\
"movq (%%ecx), " #lx " \n\t"\
"psubusb " #lx ", " #t1 " \n\t"\
"psubusb " #lx ", " #t0 " \n\t"\
"psubusb " #lx ", " #sx " \n\t"\
"pcmpeqb " #lx ", " #t1 " \n\t" /* src[-1] > a ? 0 : -1*/\
"pcmpeqb " #lx ", " #t0 " \n\t" /* src[+1] > a ? 0 : -1*/\
"pcmpeqb " #lx ", " #sx " \n\t" /* src[0] > a ? 0 : -1*/\
"paddb " #t1 ", " #t0 " \n\t"\
"paddb " #t0 ", " #sx " \n\t"\
\
PAVGB(plx, pplx) /* filtered */\
"movq " #dst ", " #t0 " \n\t" /* dst */\
"psubusb %3, " #t0 " \n\t"\
"paddusb %3, " #t1 " \n\t"\
"paddb " #sx ", " #ppsx " \n\t"\
"paddb " #psx ", " #ppsx " \n\t"\
"#paddb "MANGLE(b02)", " #ppsx " \n\t"\
"pand "MANGLE(b08)", " #ppsx " \n\t"\
Michael Niedermayer
committed
1111110
1111101
1111100
1111011
1111010
1111001
Michael Niedermayer
committed
Michael Niedermayer
committed
*/
//DERING_CORE(dst,src ,ppsx ,psx ,sx ,pplx ,plx ,lx ,t0 ,t1)
DERING_CORE((%%eax),(%%eax, %1) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
DERING_CORE((%%eax, %1),(%%eax, %1, 2) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
DERING_CORE((%%eax, %1, 2),(%0, %1, 4) ,%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
DERING_CORE((%0, %1, 4),(%%edx) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
DERING_CORE((%%edx),(%%edx, %1) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
DERING_CORE((%%edx, %1), (%%edx, %1, 2),%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
DERING_CORE((%%edx, %1, 2),(%0, %1, 8) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
DERING_CORE((%0, %1, 8),(%%edx, %1, 4) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
: : "r" (src), "r" (stride), "m" (c->pQPb), "m"(c->pQPb2)
: "%eax", "%edx", "%ecx"
int y;
int min=255;
int max=0;
int avg;
uint8_t *p;
int s[10];
for(y=1; y<9; y++)
{
int x;
p= src + stride*y;
for(x=1; x<9; x++)
{
p++;
if(*p > max) max= *p;
if(*p < min) min= *p;
}
}
if(src[stride*y + 0] > avg) t+= 1;
if(src[stride*y + 1] > avg) t+= 2;
if(src[stride*y + 2] > avg) t+= 4;
if(src[stride*y + 3] > avg) t+= 8;
if(src[stride*y + 4] > avg) t+= 16;
if(src[stride*y + 5] > avg) t+= 32;
if(src[stride*y + 6] > avg) t+= 64;
if(src[stride*y + 7] > avg) t+= 128;
if(src[stride*y + 8] > avg) t+= 256;
if(src[stride*y + 9] > avg) t+= 512;
t |= (~t)<<16;
t &= (t<<1) & (t>>1);
s[y] = t;
}
for(y=1; y<9; y++)
{
int t = s[y-1] & s[y] & s[y+1];
t|= t>>16;
s[y-1]= t;
}
for(y=1; y<9; y++)
{
int x;
int t = s[y-1];
p= src + stride*y;
for(x=1; x<9; x++)
{
p++;
if(t & (1<<x))
{
int f= (*(p-stride-1)) + 2*(*(p-stride)) + (*(p-stride+1))
+2*(*(p -1)) + 4*(*p ) + 2*(*(p +1))
+(*(p+stride-1)) + 2*(*(p+stride)) + (*(p+stride+1));
f= (f + 8)>>4;
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
#ifdef DEBUG_DERING_THRESHOLD
asm volatile("emms\n\t":);
{
static long long numPixels=0;
if(x!=1 && x!=8 && y!=1 && y!=8) numPixels++;
// if((max-min)<20 || (max-min)*QP<200)
// if((max-min)*QP < 500)
// if(max-min<QP/2)
if(max-min < 20)
{
static int numSkiped=0;
static int errorSum=0;
static int worstQP=0;
static int worstRange=0;
static int worstDiff=0;
int diff= (f - *p);
int absDiff= ABS(diff);
int error= diff*diff;
if(x==1 || x==8 || y==1 || y==8) continue;
numSkiped++;
if(absDiff > worstDiff)
{
worstDiff= absDiff;
worstQP= QP;
worstRange= max-min;
}
errorSum+= error;
if(1024LL*1024LL*1024LL % numSkiped == 0)
{
printf( "sum:%1.3f, skip:%d, wQP:%d, "
"wRange:%d, wDiff:%d, relSkip:%1.3f\n",
(float)errorSum/numSkiped, numSkiped, worstQP, worstRange,
worstDiff, (float)numSkiped/numPixels);
}
}
}
#endif
if (*p + QP2 < f) *p= *p + QP2;
else if(*p - QP2 > f) *p= *p - QP2;
#ifdef DEBUG_DERING_THRESHOLD
if(max-min < 20)
{
for(y=1; y<9; y++)
{
int x;
int t = 0;
p= src + stride*y;
for(x=1; x<9; x++)
{
p++;
*p = MIN(*p + 20, 255);
}
}
// src[0] = src[7]=src[stride*7]=src[stride*7 + 7]=255;
}
#endif
#endif
}
Michael Niedermayer
committed
/**
* Deinterlaces the given block by linearly interpolating every second line.
* will be called for every 8x8 block and can read & write from line 4-15
* lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
* lines 4-12 will be read into the deblocking filter and should be deinterlaced
Michael Niedermayer
committed
*/
static inline void RENAME(deInterlaceInterpolateLinear)(uint8_t src[], int stride)
Michael Niedermayer
committed
{
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
Michael Niedermayer
committed
asm volatile(
"leal (%0, %1), %%eax \n\t"
Michael Niedermayer
committed
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %0+8%1 ecx+4%1
Michael Niedermayer
committed
"movq (%0), %%mm0 \n\t"
"movq (%%eax, %1), %%mm1 \n\t"
Michael Niedermayer
committed
"movq %%mm0, (%%eax) \n\t"
"movq (%0, %1, 4), %%mm0 \n\t"
Michael Niedermayer
committed
"movq %%mm1, (%%eax, %1, 2) \n\t"
Michael Niedermayer
committed
"movq (%0, %1, 8), %%mm0 \n\t"
Michael Niedermayer
committed
: : "r" (src), "r" (stride)
Michael Niedermayer
committed
);
#else
for(x=0; x<2; x++){
a= *(uint32_t*)&src[stride*0];
b= *(uint32_t*)&src[stride*2];
*(uint32_t*)&src[stride*1]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
a= *(uint32_t*)&src[stride*4];
*(uint32_t*)&src[stride*3]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
b= *(uint32_t*)&src[stride*6];
*(uint32_t*)&src[stride*5]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
a= *(uint32_t*)&src[stride*8];
*(uint32_t*)&src[stride*7]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
src += 4;
Michael Niedermayer
committed
}
#endif
}
/**
* Deinterlaces the given block by cubic interpolating every second line.
* will be called for every 8x8 block and can read & write from line 4-15
* lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
* lines 4-12 will be read into the deblocking filter and should be deinterlaced
* this filter will read lines 3-15 and write 7-13
Michael Niedermayer
committed
*/
static inline void RENAME(deInterlaceInterpolateCubic)(uint8_t src[], int stride)
Michael Niedermayer
committed
{
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
Michael Niedermayer
committed
asm volatile(
"leal (%0, %1), %%eax \n\t"
"leal (%%eax, %1, 4), %%edx \n\t"
"leal (%%edx, %1, 4), %%ecx \n\t"
"addl %1, %%ecx \n\t"
"pxor %%mm7, %%mm7 \n\t"
// 0 1 2 3 4 5 6 7 8 9 10
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 ecx
Michael Niedermayer
committed
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
#define DEINT_CUBIC(a,b,c,d,e)\
"movq " #a ", %%mm0 \n\t"\
"movq " #b ", %%mm1 \n\t"\
"movq " #d ", %%mm2 \n\t"\
"movq " #e ", %%mm3 \n\t"\
PAVGB(%%mm2, %%mm1) /* (b+d) /2 */\
PAVGB(%%mm3, %%mm0) /* a(a+e) /2 */\
"movq %%mm0, %%mm2 \n\t"\
"punpcklbw %%mm7, %%mm0 \n\t"\
"punpckhbw %%mm7, %%mm2 \n\t"\
"movq %%mm1, %%mm3 \n\t"\
"punpcklbw %%mm7, %%mm1 \n\t"\
"punpckhbw %%mm7, %%mm3 \n\t"\
"psubw %%mm1, %%mm0 \n\t" /* L(a+e - (b+d))/2 */\
"psubw %%mm3, %%mm2 \n\t" /* H(a+e - (b+d))/2 */\
"psraw $3, %%mm0 \n\t" /* L(a+e - (b+d))/16 */\
"psraw $3, %%mm2 \n\t" /* H(a+e - (b+d))/16 */\
"psubw %%mm0, %%mm1 \n\t" /* L(9b + 9d - a - e)/16 */\
"psubw %%mm2, %%mm3 \n\t" /* H(9b + 9d - a - e)/16 */\
"packuswb %%mm3, %%mm1 \n\t"\
"movq %%mm1, " #c " \n\t"
DEINT_CUBIC((%0), (%%eax, %1), (%%eax, %1, 2), (%0, %1, 4), (%%edx, %1))
DEINT_CUBIC((%%eax, %1), (%0, %1, 4), (%%edx), (%%edx, %1), (%0, %1, 8))
DEINT_CUBIC((%0, %1, 4), (%%edx, %1), (%%edx, %1, 2), (%0, %1, 8), (%%ecx))
DEINT_CUBIC((%%edx, %1), (%0, %1, 8), (%%edx, %1, 4), (%%ecx), (%%ecx, %1, 2))
Michael Niedermayer
committed
: : "r" (src), "r" (stride)
Michael Niedermayer
committed
);
#else
int x;
Michael Niedermayer
committed
for(x=0; x<8; x++)
{
src[stride*3] = CLIP((-src[0] + 9*src[stride*2] + 9*src[stride*4] - src[stride*6])>>4);
src[stride*5] = CLIP((-src[stride*2] + 9*src[stride*4] + 9*src[stride*6] - src[stride*8])>>4);
src[stride*7] = CLIP((-src[stride*4] + 9*src[stride*6] + 9*src[stride*8] - src[stride*10])>>4);
src[stride*9] = CLIP((-src[stride*6] + 9*src[stride*8] + 9*src[stride*10] - src[stride*12])>>4);
Michael Niedermayer
committed
src++;
}
#endif
}
* Deinterlaces the given block by filtering every second line with a (-1 4 2 4 -1) filter.
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
* will be called for every 8x8 block and can read & write from line 4-15
* lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
* lines 4-12 will be read into the deblocking filter and should be deinterlaced
* this filter will read lines 4-13 and write 5-11
*/
static inline void RENAME(deInterlaceFF)(uint8_t src[], int stride, uint8_t *tmp)
{
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
src+= stride*4;
asm volatile(
"leal (%0, %1), %%eax \n\t"
"leal (%%eax, %1, 4), %%edx \n\t"
"pxor %%mm7, %%mm7 \n\t"
"movq (%2), %%mm0 \n\t"
// 0 1 2 3 4 5 6 7 8 9 10
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 ecx
#define DEINT_FF(a,b,c,d)\
"movq " #a ", %%mm1 \n\t"\
"movq " #b ", %%mm2 \n\t"\
"movq " #c ", %%mm3 \n\t"\
"movq " #d ", %%mm4 \n\t"\
PAVGB(%%mm3, %%mm1) \
PAVGB(%%mm4, %%mm0) \
"movq %%mm0, %%mm3 \n\t"\
"punpcklbw %%mm7, %%mm0 \n\t"\
"punpckhbw %%mm7, %%mm3 \n\t"\
"movq %%mm1, %%mm4 \n\t"\
"punpcklbw %%mm7, %%mm1 \n\t"\
"punpckhbw %%mm7, %%mm4 \n\t"\
"psllw $2, %%mm1 \n\t"\
"psllw $2, %%mm4 \n\t"\
"psubw %%mm0, %%mm1 \n\t"\
"psubw %%mm3, %%mm4 \n\t"\
"movq %%mm2, %%mm5 \n\t"\
"movq %%mm2, %%mm0 \n\t"\
"punpcklbw %%mm7, %%mm2 \n\t"\
"punpckhbw %%mm7, %%mm5 \n\t"\
"paddw %%mm2, %%mm1 \n\t"\
"paddw %%mm5, %%mm4 \n\t"\
"psraw $2, %%mm1 \n\t"\
"psraw $2, %%mm4 \n\t"\
"packuswb %%mm4, %%mm1 \n\t"\
"movq %%mm1, " #b " \n\t"\
DEINT_FF((%0) , (%%eax) , (%%eax, %1), (%%eax, %1, 2))
DEINT_FF((%%eax, %1), (%%eax, %1, 2), (%0, %1, 4), (%%edx) )
DEINT_FF((%0, %1, 4), (%%edx) , (%%edx, %1), (%%edx, %1, 2))
DEINT_FF((%%edx, %1), (%%edx, %1, 2), (%0, %1, 8), (%%edx, %1, 4))
"movq %%mm0, (%2) \n\t"
: : "r" (src), "r" (stride), "r"(tmp)
: "%eax", "%edx"
);
#else
int x;
src+= stride*4;
for(x=0; x<8; x++)
{
int t1= tmp[x];
int t2= src[stride*1];
src[stride*1]= CLIP((-t1 + 4*src[stride*0] + 2*t2 + 4*src[stride*2] - src[stride*3] + 4)>>3);
src[stride*3]= CLIP((-t2 + 4*src[stride*2] + 2*t1 + 4*src[stride*4] - src[stride*5] + 4)>>3);
src[stride*5]= CLIP((-t1 + 4*src[stride*4] + 2*t2 + 4*src[stride*6] - src[stride*7] + 4)>>3);
src[stride*7]= CLIP((-t2 + 4*src[stride*6] + 2*t1 + 4*src[stride*8] - src[stride*9] + 4)>>3);
tmp[x]= t1;
src++;
}
#endif
}
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
/**
* Deinterlaces the given block by filtering every line with a (-1 2 6 2 -1) filter.
* will be called for every 8x8 block and can read & write from line 4-15
* lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
* lines 4-12 will be read into the deblocking filter and should be deinterlaced
* this filter will read lines 4-13 and write 4-11
*/
static inline void RENAME(deInterlaceL5)(uint8_t src[], int stride, uint8_t *tmp, uint8_t *tmp2)
{
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
src+= stride*4;
asm volatile(
"leal (%0, %1), %%eax \n\t"
"leal (%%eax, %1, 4), %%edx \n\t"
"pxor %%mm7, %%mm7 \n\t"
"movq (%2), %%mm0 \n\t"
"movq (%3), %%mm1 \n\t"
// 0 1 2 3 4 5 6 7 8 9 10
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 ecx
#define DEINT_L5(t1,t2,a,b,c)\
"movq " #a ", %%mm2 \n\t"\
"movq " #b ", %%mm3 \n\t"\
"movq " #c ", %%mm4 \n\t"\
PAVGB(t2, %%mm3) \
PAVGB(t1, %%mm4) \
"movq %%mm2, %%mm5 \n\t"\
"movq %%mm2, " #t1 " \n\t"\
"punpcklbw %%mm7, %%mm2 \n\t"\
"punpckhbw %%mm7, %%mm5 \n\t"\
"movq %%mm2, %%mm6 \n\t"\
"paddw %%mm2, %%mm2 \n\t"\
"paddw %%mm6, %%mm2 \n\t"\
"movq %%mm5, %%mm6 \n\t"\
"paddw %%mm5, %%mm5 \n\t"\
"paddw %%mm6, %%mm5 \n\t"\
"movq %%mm3, %%mm6 \n\t"\
"punpcklbw %%mm7, %%mm3 \n\t"\
"punpckhbw %%mm7, %%mm6 \n\t"\
"paddw %%mm3, %%mm3 \n\t"\
"paddw %%mm6, %%mm6 \n\t"\
"paddw %%mm3, %%mm2 \n\t"\
"paddw %%mm6, %%mm5 \n\t"\
"movq %%mm4, %%mm6 \n\t"\
"punpcklbw %%mm7, %%mm4 \n\t"\
"punpckhbw %%mm7, %%mm6 \n\t"\
"psubw %%mm4, %%mm2 \n\t"\
"psubw %%mm6, %%mm5 \n\t"\
"psraw $2, %%mm2 \n\t"\
"psraw $2, %%mm5 \n\t"\
"packuswb %%mm5, %%mm2 \n\t"\
"movq %%mm2, " #a " \n\t"\
DEINT_L5(%%mm0, %%mm1, (%0) , (%%eax) , (%%eax, %1) )
DEINT_L5(%%mm1, %%mm0, (%%eax) , (%%eax, %1) , (%%eax, %1, 2))
DEINT_L5(%%mm0, %%mm1, (%%eax, %1) , (%%eax, %1, 2), (%0, %1, 4) )
DEINT_L5(%%mm1, %%mm0, (%%eax, %1, 2), (%0, %1, 4) , (%%edx) )
DEINT_L5(%%mm0, %%mm1, (%0, %1, 4) , (%%edx) , (%%edx, %1) )
DEINT_L5(%%mm1, %%mm0, (%%edx) , (%%edx, %1) , (%%edx, %1, 2))
DEINT_L5(%%mm0, %%mm1, (%%edx, %1) , (%%edx, %1, 2), (%0, %1, 8) )
DEINT_L5(%%mm1, %%mm0, (%%edx, %1, 2), (%0, %1, 8) , (%%edx, %1, 4))
"movq %%mm0, (%2) \n\t"
"movq %%mm1, (%3) \n\t"
: : "r" (src), "r" (stride), "r"(tmp), "r"(tmp2)
: "%eax", "%edx"
);
#else
int x;
src+= stride*4;
for(x=0; x<8; x++)
{
int t1= tmp[x];
int t2= tmp2[x];
int t3= src[0];
src[stride*0]= CLIP((-(t1 + src[stride*2]) + 2*(t2 + src[stride*1]) + 6*t3 + 4)>>3);
t1= src[stride*1];
src[stride*1]= CLIP((-(t2 + src[stride*3]) + 2*(t3 + src[stride*2]) + 6*t1 + 4)>>3);
t2= src[stride*2];
src[stride*2]= CLIP((-(t3 + src[stride*4]) + 2*(t1 + src[stride*3]) + 6*t2 + 4)>>3);
t3= src[stride*3];
src[stride*3]= CLIP((-(t1 + src[stride*5]) + 2*(t2 + src[stride*4]) + 6*t3 + 4)>>3);
t1= src[stride*4];
src[stride*4]= CLIP((-(t2 + src[stride*6]) + 2*(t3 + src[stride*5]) + 6*t1 + 4)>>3);
t2= src[stride*5];
src[stride*5]= CLIP((-(t3 + src[stride*7]) + 2*(t1 + src[stride*6]) + 6*t2 + 4)>>3);
t3= src[stride*6];
src[stride*6]= CLIP((-(t1 + src[stride*8]) + 2*(t2 + src[stride*7]) + 6*t3 + 4)>>3);
t1= src[stride*7];
src[stride*7]= CLIP((-(t2 + src[stride*9]) + 2*(t3 + src[stride*8]) + 6*t1 + 4)>>3);
tmp[x]= t3;
tmp2[x]= t1;
src++;
}
#endif
}
Michael Niedermayer
committed
/**
* Deinterlaces the given block by filtering all lines with a (1 2 1) filter.
* will be called for every 8x8 block and can read & write from line 4-15
* lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
* lines 4-12 will be read into the deblocking filter and should be deinterlaced
* this filter will read lines 4-13 and write 4-11
Michael Niedermayer
committed
*/
static inline void RENAME(deInterlaceBlendLinear)(uint8_t src[], int stride, uint8_t *tmp)
Michael Niedermayer
committed
{
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
Michael Niedermayer
committed
asm volatile(
"leal (%0, %1), %%eax \n\t"
Michael Niedermayer
committed
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
Michael Niedermayer
committed
"movq (%2), %%mm0 \n\t" // L0
"movq (%%eax), %%mm1 \n\t" // L2
Michael Niedermayer
committed
PAVGB(%%mm1, %%mm0) // L0+L2
Michael Niedermayer
committed
PAVGB(%%mm2, %%mm0)
"movq %%mm0, (%0) \n\t"
"movq (%%eax, %1), %%mm0 \n\t" // L3
Michael Niedermayer
committed
PAVGB(%%mm0, %%mm2) // L1+L3
PAVGB(%%mm1, %%mm2) // 2L2 + L1 + L3
"movq %%mm2, (%%eax) \n\t"
"movq (%%eax, %1, 2), %%mm2 \n\t" // L4
Michael Niedermayer
committed
PAVGB(%%mm2, %%mm1) // L2+L4
PAVGB(%%mm0, %%mm1) // 2L3 + L2 + L4
"movq %%mm1, (%%eax, %1) \n\t"
"movq (%0, %1, 4), %%mm1 \n\t" // L5
Michael Niedermayer
committed
PAVGB(%%mm1, %%mm0) // L3+L5
PAVGB(%%mm2, %%mm0) // 2L4 + L3 + L5
"movq %%mm0, (%%eax, %1, 2) \n\t"
Michael Niedermayer
committed
PAVGB(%%mm0, %%mm2) // L4+L6
PAVGB(%%mm1, %%mm2) // 2L5 + L4 + L6
"movq %%mm2, (%0, %1, 4) \n\t"
"movq (%%edx, %1), %%mm2 \n\t" // L7
Michael Niedermayer
committed
PAVGB(%%mm2, %%mm1) // L5+L7
PAVGB(%%mm0, %%mm1) // 2L6 + L5 + L7
"movq (%%edx, %1, 2), %%mm1 \n\t" // L8
Michael Niedermayer
committed
PAVGB(%%mm1, %%mm0) // L6+L8
PAVGB(%%mm2, %%mm0) // 2L7 + L6 + L8
"movq (%0, %1, 8), %%mm0 \n\t" // L9
Michael Niedermayer
committed
PAVGB(%%mm0, %%mm2) // L7+L9
PAVGB(%%mm1, %%mm2) // 2L8 + L7 + L9
Michael Niedermayer
committed
: : "r" (src), "r" (stride), "r" (tmp)
Michael Niedermayer
committed
);
#else
int a, b, c, x;
for(x=0; x<2; x++){
a= *(uint32_t*)&tmp[stride*0];
b= *(uint32_t*)&src[stride*0];
c= *(uint32_t*)&src[stride*1];
a= (a&c) + (((a^c)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*0]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
b= (a&b) + (((a^b)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*1]= (c|b) - (((c^b)&0xFEFEFEFEUL)>>1);
c= (b&c) + (((b^c)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*2]= (c|a) - (((c^a)&0xFEFEFEFEUL)>>1);
a= (a&c) + (((a^c)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*3]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
b= (a&b) + (((a^b)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*4]= (c|b) - (((c^b)&0xFEFEFEFEUL)>>1);
c= (b&c) + (((b^c)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*5]= (c|a) - (((c^a)&0xFEFEFEFEUL)>>1);
a= (a&c) + (((a^c)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*6]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
b= (a&b) + (((a^b)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*7]= (c|b) - (((c^b)&0xFEFEFEFEUL)>>1);
Michael Niedermayer
committed
}
#endif
}
/**
* Deinterlaces the given block by applying a median filter to every second line.
* will be called for every 8x8 block and can read & write from line 4-15,
* lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
* lines 4-12 will be read into the deblocking filter and should be deinterlaced
Michael Niedermayer
committed
*/
static inline void RENAME(deInterlaceMedian)(uint8_t src[], int stride)
Michael Niedermayer
committed
{
Michael Niedermayer
committed
asm volatile(
"leal (%0, %1), %%eax \n\t"
Michael Niedermayer
committed
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
Michael Niedermayer
committed
"movq (%0), %%mm0 \n\t" //
"movq (%%eax, %1), %%mm2 \n\t" //
"movq (%%eax), %%mm1 \n\t" //
"movq %%mm0, %%mm3 \n\t"
"pmaxub %%mm1, %%mm0 \n\t" //
"pminub %%mm3, %%mm1 \n\t" //
"pmaxub %%mm2, %%mm1 \n\t" //
"pminub %%mm1, %%mm0 \n\t"
"movq %%mm0, (%%eax) \n\t"
"movq (%0, %1, 4), %%mm0 \n\t" //
"movq (%%eax, %1, 2), %%mm1 \n\t" //
"movq %%mm2, %%mm3 \n\t"
"pmaxub %%mm1, %%mm2 \n\t" //
"pminub %%mm3, %%mm1 \n\t" //
"pmaxub %%mm0, %%mm1 \n\t" //
"pminub %%mm1, %%mm2 \n\t"
"movq %%mm2, (%%eax, %1, 2) \n\t"
"movq (%%edx), %%mm2 \n\t" //
"movq (%%edx, %1), %%mm1 \n\t" //
Michael Niedermayer
committed
"movq %%mm2, %%mm3 \n\t"
"pmaxub %%mm0, %%mm2 \n\t" //
"pminub %%mm3, %%mm0 \n\t" //
"pmaxub %%mm1, %%mm0 \n\t" //
"pminub %%mm0, %%mm2 \n\t"
Michael Niedermayer
committed
Michael Niedermayer
committed
"movq (%0, %1, 8), %%mm0 \n\t" //
"movq %%mm2, %%mm3 \n\t"
"pmaxub %%mm0, %%mm2 \n\t" //
"pminub %%mm3, %%mm0 \n\t" //
"pmaxub %%mm1, %%mm0 \n\t" //
"pminub %%mm0, %%mm2 \n\t"
Michael Niedermayer
committed
: : "r" (src), "r" (stride)
Michael Niedermayer
committed
);
#else // MMX without MMX2
asm volatile(
"leal (%0, %1), %%eax \n\t"
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
"pxor %%mm7, %%mm7 \n\t"
#define MEDIAN(a,b,c)\
"movq " #a ", %%mm0 \n\t"\
"movq " #b ", %%mm2 \n\t"\
"movq " #c ", %%mm1 \n\t"\
"movq %%mm0, %%mm3 \n\t"\
"movq %%mm1, %%mm4 \n\t"\
"movq %%mm2, %%mm5 \n\t"\
"psubusb %%mm1, %%mm3 \n\t"\
"psubusb %%mm2, %%mm4 \n\t"\
"psubusb %%mm0, %%mm5 \n\t"\
"pcmpeqb %%mm7, %%mm3 \n\t"\
"pcmpeqb %%mm7, %%mm4 \n\t"\
"pcmpeqb %%mm7, %%mm5 \n\t"\
"movq %%mm3, %%mm6 \n\t"\
"pxor %%mm4, %%mm3 \n\t"\
"pxor %%mm5, %%mm4 \n\t"\
"pxor %%mm6, %%mm5 \n\t"\
"por %%mm3, %%mm1 \n\t"\
"por %%mm4, %%mm2 \n\t"\
"por %%mm5, %%mm0 \n\t"\
"pand %%mm2, %%mm0 \n\t"\
"pand %%mm1, %%mm0 \n\t"\
"movq %%mm0, " #b " \n\t"
MEDIAN((%0), (%%eax), (%%eax, %1))
MEDIAN((%%eax, %1), (%%eax, %1, 2), (%0, %1, 4))
MEDIAN((%0, %1, 4), (%%edx), (%%edx, %1))
MEDIAN((%%edx, %1), (%%edx, %1, 2), (%0, %1, 8))