Newer
Older
/*
Copyright (C) 2001 Michael Niedermayer (michaelni@gmx.at)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
Michael Niedermayer
committed
C MMX MMX2 3DNow
isVertDC Ec Ec
isVertMinMaxOk Ec Ec
Michael Niedermayer
committed
doVertLowPass E e e
doVertDefFilter Ec Ec Ec
isHorizDC Ec Ec
isHorizMinMaxOk a E
doHorizLowPass E e e
Michael Niedermayer
committed
doHorizDefFilter Ec Ec Ec
Michael Niedermayer
committed
Vertical RKAlgo1 E a a
Michael Niedermayer
committed
Horizontal RKAlgo1 a a
Michael Niedermayer
committed
Vertical X1 a E E
Horizontal X1 a E E
LinIpolDeinterlace e E E*
CubicIpolDeinterlace a e e*
LinBlendDeinterlace e E E*
* i dont have a 3dnow CPU -> its untested
E = Exact implementation
e = allmost exact implementation (slightly different rounding,...)
a = alternative / approximate impl
c = checked against the other implementations (-vo md5)
*/
/*
TODO:
verify that everything workes as it should (how?)
reduce the time wasted on the mem transfer
implement dering
implement everything in C at least (done at the moment but ...)
unroll stuff if instructions depend too much on the prior one
we use 8x8 blocks for the horizontal filters, opendivx seems to use 8x4?
move YScale thing to the end instead of fixing QP
write a faster and higher quality deblocking filter :)
do something about the speed of the horizontal filters
make the mainloop more flexible (variable number of blocks at once
(the if/else stuff per block is slowing things down)
compare the quality & speed of all filters
split this huge file
Michael Niedermayer
committed
fix warnings (unused vars, ...)
Michael Niedermayer
committed
border remover
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include "../config.h"
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#define MIN(a,b) ((a) > (b) ? (b) : (a))
#define MAX(a,b) ((a) < (b) ? (b) : (a))
#define ABS(a) ((a) > 0 ? (a) : (-(a)))
#define SIGN(a) ((a) > 0 ? 1 : -1)
#ifdef HAVE_MMX2
#define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
#elif defined (HAVE_3DNOW)
#define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
#endif
#ifdef HAVE_MMX2
#define PMINUB(a,b,t) "pminub " #a ", " #b " \n\t"
#elif defined (HAVE_MMX)
#define PMINUB(b,a,t) \
"movq " #a ", " #t " \n\t"\
"psubusb " #b ", " #t " \n\t"\
"psubb " #t ", " #a " \n\t"
#endif
#ifdef HAVE_MMX2
#define PMAXUB(a,b) "pmaxub " #a ", " #b " \n\t"
#elif defined (HAVE_MMX)
#define PMAXUB(a,b) \
"psubusb " #a ", " #b " \n\t"\
"paddb " #a ", " #b " \n\t"
#endif
#define GET_MODE_BUFFER_SIZE 500
#define OPTIONS_ARRAY_SIZE 10
static uint64_t packedYOffset= 0x0000000000000000LL;
static uint64_t packedYScale= 0x0100010001000100LL;
static uint64_t w05= 0x0005000500050005LL;
static uint64_t w20= 0x0020002000200020LL;
static uint64_t w1400= 0x1400140014001400LL;
static uint64_t bm00000001= 0x00000000000000FFLL;
static uint64_t bm00010000= 0x000000FF00000000LL;
static uint64_t bm00001000= 0x00000000FF000000LL;
static uint64_t bm10000000= 0xFF00000000000000LL;
static uint64_t bm10000001= 0xFF000000000000FFLL;
static uint64_t bm11000011= 0xFFFF00000000FFFFLL;
static uint64_t bm00000011= 0x000000000000FFFFLL;
static uint64_t bm11111110= 0xFFFFFFFFFFFFFF00LL;
static uint64_t bm11000000= 0xFFFF000000000000LL;
static uint64_t bm00011000= 0x000000FFFF000000LL;
static uint64_t bm00110011= 0x0000FFFF0000FFFFLL;
static uint64_t bm11001100= 0xFFFF0000FFFF0000LL;
static uint64_t b00= 0x0000000000000000LL;
static uint64_t b01= 0x0101010101010101LL;
static uint64_t b02= 0x0202020202020202LL;
static uint64_t b0F= 0x0F0F0F0F0F0F0F0FLL;
static uint64_t b04= 0x0404040404040404LL;
static uint64_t b08= 0x0808080808080808LL;
static uint64_t bFF= 0xFFFFFFFFFFFFFFFFLL;
static uint64_t b20= 0x2020202020202020LL;
static uint64_t b80= 0x8080808080808080LL;
static uint64_t b7E= 0x7E7E7E7E7E7E7E7ELL;
static uint64_t b7C= 0x7C7C7C7C7C7C7C7CLL;
static uint64_t b3F= 0x3F3F3F3F3F3F3F3FLL;
static uint64_t temp0=0;
static uint64_t temp1=0;
static uint64_t temp2=0;
static uint64_t temp3=0;
static uint64_t temp4=0;
static uint64_t temp5=0;
static uint64_t pQPb=0;
static uint8_t tempBlocks[8*16*2]; //used for the horizontal code
int hFlatnessThreshold= 56 - 16;
int vFlatnessThreshold= 56 - 16;
//amount of "black" u r willing to loose to get a brightness corrected picture
double maxClippedThreshold= 0.01;
static struct PPFilter filters[]=
{
{"hb", "hdeblock", 1, 1, 3, H_DEBLOCK},
{"vb", "vdeblock", 1, 2, 4, V_DEBLOCK},
{"vr", "rkvdeblock", 1, 2, 4, H_RK1_FILTER},
{"h1", "x1hdeblock", 1, 1, 3, H_X1_FILTER},
{"v1", "x1vdeblock", 1, 2, 4, V_X1_FILTER},
{"dr", "dering", 1, 5, 6, DERING},
{"al", "autolevels", 0, 1, 2, LEVEL_FIX},
{"lb", "linblenddeint", 0, 1, 6, LINEAR_BLEND_DEINT_FILTER},
{"li", "linipoldeint", 0, 1, 6, LINEAR_IPOL_DEINT_FILTER},
{"ci", "cubicipoldeint", 0, 1, 6, CUBIC_IPOL_DEINT_FILTER},
{"md", "mediandeint", 0, 1, 6, MEDIAN_DEINT_FILTER},
{NULL, NULL,0,0,0,0} //End Marker
};
static char *replaceTable[]=
{
"default", "hdeblock:a,vdeblock:a,dering:a,autolevels",
"de", "hdeblock:a,vdeblock:a,dering:a,autolevels",
"fast", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels",
"fa", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels",
NULL //End Marker
};
Michael Niedermayer
committed
static inline void unusedVariableWarningFixer()
{
if(
packedYOffset + packedYScale + w05 + w20 + w1400 + bm00000001 + bm00010000
+ bm00001000 + bm10000000 + bm10000001 + bm11000011 + bm00000011 + bm11111110
+ bm11000000 + bm00011000 + bm00110011 + bm11001100 + b00 + b01 + b02 + b0F
+ bFF + b20 + b80 + b7E + b7C + b3F + temp0 + temp1 + temp2 + temp3 + temp4
+ temp5 + pQPb== 0) b00=0;
}
static inline long long rdtsc()
{
long long l;
asm volatile( "rdtsc\n\t"
: "=A" (l)
);
// printf("%d\n", int(l/1000));
return l;
}
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
static inline void prefetchnta(void *p)
{
asm volatile( "prefetchnta (%0)\n\t"
: : "r" (p)
);
}
static inline void prefetcht0(void *p)
{
asm volatile( "prefetcht0 (%0)\n\t"
: : "r" (p)
);
}
static inline void prefetcht1(void *p)
{
asm volatile( "prefetcht1 (%0)\n\t"
: : "r" (p)
);
}
static inline void prefetcht2(void *p)
{
asm volatile( "prefetcht2 (%0)\n\t"
: : "r" (p)
);
}
//FIXME? |255-0| = 1 (shouldnt be a problem ...)
/**
* Check if the middle 8x8 Block in the given 8x16 block is flat
static inline int isVertDC(uint8_t src[], int stride){
Michael Niedermayer
committed
#ifndef HAVE_MMX
Michael Niedermayer
committed
#endif
src+= stride*4; // src points to begin of the 8x8 Block
asm volatile(
"leal (%1, %2), %%eax \n\t"
"leal (%%eax, %2, 4), %%ebx \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %1 eax eax+%2 eax+2%2 %1+4%2 ebx ebx+%2 ebx+2%2 %1+8%2 ebx+4%2
"movq b7E, %%mm7 \n\t" // mm7 = 0x7F
"movq b7C, %%mm6 \n\t" // mm6 = 0x7D
"movq (%1), %%mm0 \n\t"
"psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
"paddb %%mm7, %%mm0 \n\t"
"pcmpgtb %%mm6, %%mm0 \n\t"
"psubb %%mm2, %%mm1 \n\t"
"paddb %%mm7, %%mm1 \n\t"
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
"psubb %%mm1, %%mm2 \n\t"
"paddb %%mm7, %%mm2 \n\t"
"pcmpgtb %%mm6, %%mm2 \n\t"
"paddb %%mm2, %%mm0 \n\t"
"psubb %%mm2, %%mm1 \n\t"
"paddb %%mm7, %%mm1 \n\t"
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
"psubb %%mm1, %%mm2 \n\t"
"paddb %%mm7, %%mm2 \n\t"
"pcmpgtb %%mm6, %%mm2 \n\t"
"paddb %%mm2, %%mm0 \n\t"
"psubb %%mm2, %%mm1 \n\t"
"paddb %%mm7, %%mm1 \n\t"
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
"psubb %%mm1, %%mm2 \n\t"
"paddb %%mm7, %%mm2 \n\t"
"pcmpgtb %%mm6, %%mm2 \n\t"
"paddb %%mm2, %%mm0 \n\t"
" \n\t"
"movq %%mm0, %%mm1 \n\t"
"psrlw $8, %%mm0 \n\t"
"paddb %%mm1, %%mm0 \n\t"
Michael Niedermayer
committed
#ifdef HAVE_MMX2
"pshufw $0xF9, %%mm0, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
"pshufw $0xFE, %%mm0, %%mm1 \n\t"
#else
"movq %%mm0, %%mm1 \n\t"
"psrlq $16, %%mm0 \n\t"
"paddb %%mm1, %%mm0 \n\t"
"movq %%mm0, %%mm1 \n\t"
"psrlq $32, %%mm0 \n\t"
Michael Niedermayer
committed
#endif
"paddb %%mm1, %%mm0 \n\t"
"movd %%mm0, %0 \n\t"
: "=r" (numEq)
: "r" (src), "r" (stride)
: "%eax", "%ebx"
for(y=0; y<BLOCK_SIZE-1; y++)
{
if(((src[0] - src[0+stride] + 1)&0xFFFF) < 3) numEq++;
if(((src[1] - src[1+stride] + 1)&0xFFFF) < 3) numEq++;
if(((src[2] - src[2+stride] + 1)&0xFFFF) < 3) numEq++;
if(((src[3] - src[3+stride] + 1)&0xFFFF) < 3) numEq++;
if(((src[4] - src[4+stride] + 1)&0xFFFF) < 3) numEq++;
if(((src[5] - src[5+stride] + 1)&0xFFFF) < 3) numEq++;
if(((src[6] - src[6+stride] + 1)&0xFFFF) < 3) numEq++;
if(((src[7] - src[7+stride] + 1)&0xFFFF) < 3) numEq++;
src+= stride;
}
#endif
/* if(abs(numEq - asmEq) > 0)
{
printf("\nasm:%d c:%d\n", asmEq, numEq);
for(int y=0; y<8; y++)
{
for(int x=0; x<8; x++)
{
printf("%d ", temp[x + y*stride]);
}
printf("\n");
}
}
*/
// for(int i=0; i<numEq/8; i++) src[i]=255;
return (numEq > vFlatnessThreshold) ? 1 : 0;
static inline int isVertMinMaxOk(uint8_t src[], int stride, int QP)
{
#ifdef HAVE_MMX
int isOk;
asm volatile(
// "int $3 \n\t"
"movq (%1, %2), %%mm0 \n\t"
"movq (%1, %2, 8), %%mm1 \n\t"
"movq %%mm0, %%mm2 \n\t"
"psubusb %%mm1, %%mm0 \n\t"
"psubusb %%mm2, %%mm1 \n\t"
"por %%mm1, %%mm0 \n\t" // ABS Diff
"movq pQPb, %%mm7 \n\t" // QP,..., QP
"paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
"psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
"pcmpeqd b00, %%mm0 \n\t"
"psrlq $16, %%mm0 \n\t"
"pcmpeqd bFF, %%mm0 \n\t"
// "movd %%mm0, (%1, %2, 4)\n\t"
"movd %%mm0, %0 \n\t"
: "=r" (isOk)
: "r" (src), "r" (stride)
);
int isOk2= 1;
int x;
for(x=0; x<BLOCK_SIZE; x++)
if(abs((int)src[x + stride] - (int)src[x + (stride<<3)]) > 2*QP) isOk2=0;
}
/* if(isOk && !isOk2 || !isOk && isOk2)
{
printf("\nasm:%d c:%d QP:%d\n", isOk, isOk2, QP);
for(int y=0; y<9; y++)
{
for(int x=0; x<8; x++)
{
printf("%d ", src[x + y*stride]);
}
printf("\n");
}
} */
return isOk2;
#endif
}
/**
* Do a vertical low pass filter on the 8x16 block (only write to the 8x8 block in the middle)
*/
static inline void doVertLowPass(uint8_t *src, int stride, int QP)
{
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
asm volatile( //"movv %0 %1 %2\n\t"
"movq pQPb, %%mm0 \n\t" // QP,..., QP
"movq (%0), %%mm6 \n\t"
"movq (%0, %1), %%mm5 \n\t"
"movq %%mm5, %%mm1 \n\t"
"movq %%mm6, %%mm2 \n\t"
"psubusb %%mm6, %%mm5 \n\t"
"psubusb %%mm1, %%mm2 \n\t"
"por %%mm5, %%mm2 \n\t" // ABS Diff of lines
"psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
"pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
"pand %%mm2, %%mm6 \n\t"
"pandn %%mm1, %%mm2 \n\t"
"por %%mm2, %%mm6 \n\t"// First Line to Filter
"movq (%0, %1, 8), %%mm5 \n\t"
"leal (%0, %1, 4), %%eax \n\t"
"leal (%0, %1, 8), %%ebx \n\t"
"subl %1, %%ebx \n\t"
"addl %1, %0 \n\t" // %0 points to line 1 not 0
"movq (%0, %1, 8), %%mm7 \n\t"
"movq %%mm5, %%mm1 \n\t"
"movq %%mm7, %%mm2 \n\t"
"psubusb %%mm7, %%mm5 \n\t"
"psubusb %%mm1, %%mm2 \n\t"
"por %%mm5, %%mm2 \n\t" // ABS Diff of lines
"psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
"pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
"pand %%mm2, %%mm7 \n\t"
"pandn %%mm1, %%mm2 \n\t"
"por %%mm2, %%mm7 \n\t" // First Line to Filter
// 1 2 3 4 5 6 7 8
// %0 %0+%1 %0+2%1 eax %0+4%1 eax+2%1 ebx eax+4%1
// 6 4 2 2 1 1
// 6 4 4 2
// 6 8 2
"movq (%0, %1), %%mm0 \n\t" // 1
"movq %%mm0, %%mm1 \n\t" // 1
PAVGB(%%mm6, %%mm0) //1 1 /2
PAVGB(%%mm6, %%mm0) //3 1 /4
"movq (%0, %1, 4), %%mm2 \n\t" // 1
"movq %%mm2, %%mm5 \n\t" // 1
PAVGB((%%eax), %%mm2) // 11 /2
PAVGB((%0, %1, 2), %%mm2) // 211 /4
"movq %%mm2, %%mm3 \n\t" // 211 /4
"movq (%0), %%mm4 \n\t" // 1
PAVGB(%%mm4, %%mm3) // 4 211 /8
PAVGB(%%mm0, %%mm3) //642211 /16
"movq %%mm3, (%0) \n\t" // X
// mm1=2 mm2=3(211) mm4=1 mm5=5 mm6=0 mm7=9
"movq %%mm1, %%mm0 \n\t" // 1
"movq %%mm4, %%mm3 \n\t" // 1
PAVGB((%0,%1,2), %%mm3) // 1 1 /2
PAVGB((%%eax,%1,2), %%mm5) // 11 /2
PAVGB((%%eax), %%mm5) // 211 /4
PAVGB(%%mm5, %%mm3) // 2 2211 /8
PAVGB(%%mm0, %%mm3) //4242211 /16
"movq %%mm3, (%0,%1) \n\t" // X
// mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
"movq (%%ebx), %%mm0 \n\t" // 1
PAVGB((%%eax, %1, 2), %%mm0) // 11/2
"movq %%mm0, %%mm3 \n\t" // 11/2
PAVGB(%%mm1, %%mm0) // 2 11/4
PAVGB(%%mm6, %%mm0) //222 11/8
PAVGB(%%mm2, %%mm0) //22242211/16
"movq (%0, %1, 2), %%mm2 \n\t" // 1
"movq %%mm0, (%0, %1, 2) \n\t" // X
// mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
"movq (%%eax, %1, 4), %%mm0 \n\t" // 1
PAVGB((%%ebx), %%mm0) // 11 /2
PAVGB(%%mm0, %%mm6) //11 11 /4
PAVGB(%%mm1, %%mm4) // 11 /2
PAVGB(%%mm2, %%mm1) // 11 /2
PAVGB(%%mm1, %%mm6) //1122 11 /8
PAVGB(%%mm5, %%mm6) //112242211 /16
"movq (%%eax), %%mm5 \n\t" // 1
"movq %%mm6, (%%eax) \n\t" // X
// mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
"movq (%%eax, %1, 4), %%mm6 \n\t" // 1
PAVGB(%%mm7, %%mm6) // 11 /2
PAVGB(%%mm4, %%mm6) // 11 11 /4
PAVGB(%%mm3, %%mm6) // 11 2211 /8
PAVGB(%%mm5, %%mm2) // 11 /2
"movq (%0, %1, 4), %%mm4 \n\t" // 1
PAVGB(%%mm4, %%mm2) // 112 /4
PAVGB(%%mm2, %%mm6) // 112242211 /16
"movq %%mm6, (%0, %1, 4) \n\t" // X
// mm0=7(11) mm1=2(11) mm2=3(112) mm3=6(11) mm4=5 mm5=4 mm7=9
PAVGB(%%mm7, %%mm1) // 11 2 /4
PAVGB(%%mm4, %%mm5) // 11 /2
PAVGB(%%mm5, %%mm0) // 11 11 /4
"movq (%%eax, %1, 2), %%mm6 \n\t" // 1
PAVGB(%%mm6, %%mm1) // 11 4 2 /8
PAVGB(%%mm0, %%mm1) // 11224222 /16
"movq %%mm1, (%%eax, %1, 2) \n\t" // X
// mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
"movq (%%eax, %1, 4), %%mm0 \n\t" // 1
PAVGB(%%mm0, %%mm6) // 1 1 /2
PAVGB(%%mm7, %%mm6) // 1 12 /4
PAVGB(%%mm2, %%mm6) // 1122424 /4
"movq %%mm6, (%%ebx) \n\t" // X
// mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
PAVGB(%%mm7, %%mm5) // 11 2 /4
PAVGB(%%mm7, %%mm5) // 11 6 /8
PAVGB(%%mm3, %%mm0) // 112 /4
PAVGB(%%mm0, %%mm5) // 112246 /16
"movq %%mm5, (%%eax, %1, 4) \n\t" // X
:
: "r" (src), "r" (stride)
: "%eax", "%ebx"
);
#else
const int l1= stride;
const int l2= stride + l1;
const int l3= stride + l2;
const int l4= stride + l3;
const int l5= stride + l4;
const int l6= stride + l5;
const int l7= stride + l6;
const int l8= stride + l7;
const int l9= stride + l8;
for(x=0; x<BLOCK_SIZE; x++)
{
const int first= ABS(src[0] - src[l1]) < QP ? src[0] : src[l1];
const int last= ABS(src[l8] - src[l9]) < QP ? src[l9] : src[l8];
int sums[9];
sums[0] = first + src[l1];
sums[1] = src[l1] + src[l2];
sums[2] = src[l2] + src[l3];
sums[3] = src[l3] + src[l4];
sums[4] = src[l4] + src[l5];
sums[5] = src[l5] + src[l6];
sums[6] = src[l6] + src[l7];
sums[7] = src[l7] + src[l8];
sums[8] = src[l8] + last;
src[l1]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
Michael Niedermayer
committed
src[l2]= ((src[l2]<<2) + ((first + sums[0] + sums[3])<<1) + sums[5] + 8)>>4;
src[l3]= ((src[l3]<<2) + ((first + sums[1] + sums[4])<<1) + sums[6] + 8)>>4;
src[l4]= ((src[l4]<<2) + ((sums[2] + sums[5])<<1) + sums[0] + sums[7] + 8)>>4;
src[l5]= ((src[l5]<<2) + ((sums[3] + sums[6])<<1) + sums[1] + sums[8] + 8)>>4;
src[l6]= ((src[l6]<<2) + ((last + sums[7] + sums[4])<<1) + sums[2] + 8)>>4;
src[l7]= (((last + src[l7])<<2) + ((src[l8] + sums[5])<<1) + sums[3] + 8)>>4;
src[l8]= ((sums[8]<<2) + ((last + sums[6])<<1) + sums[4] + 8)>>4;
src++;
}
#endif
}
/**
* Experimental implementation of the filter (Algorithm 1) described in a paper from Ramkishor & Karandikar
* values are correctly clipped (MMX2)
* values are wraparound (C)
* conclusion: its fast, but introduces ugly horizontal patterns if there is a continious gradient
0 8 16 24
x = 8
x/2 = 4
x/8 = 1
1 12 12 23
*/
static inline void vertRK1Filter(uint8_t *src, int stride, int QP)
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
// FIXME rounding
asm volatile(
"pxor %%mm7, %%mm7 \n\t" // 0
"movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
"leal (%0, %1), %%eax \n\t"
"leal (%%eax, %1, 4), %%ebx \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
"movq pQPb, %%mm0 \n\t" // QP,..., QP
"movq %%mm0, %%mm1 \n\t" // QP,..., QP
"paddusb b02, %%mm0 \n\t"
"psrlw $2, %%mm0 \n\t"
"pand b3F, %%mm0 \n\t" // QP/4,..., QP/4
"paddusb %%mm1, %%mm0 \n\t" // QP*1.25 ...
"movq (%0, %1, 4), %%mm2 \n\t" // line 4
"movq (%%ebx), %%mm3 \n\t" // line 5
"movq %%mm2, %%mm4 \n\t" // line 4
"pcmpeqb %%mm5, %%mm5 \n\t" // -1
"pxor %%mm2, %%mm5 \n\t" // -line 4 - 1
PAVGB(%%mm3, %%mm5)
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
"paddb %%mm6, %%mm5 \n\t" // (l5-l4)/2
"psubusb %%mm3, %%mm4 \n\t"
"psubusb %%mm2, %%mm3 \n\t"
"por %%mm3, %%mm4 \n\t" // |l4 - l5|
"psubusb %%mm0, %%mm4 \n\t"
"pcmpeqb %%mm7, %%mm4 \n\t"
"pand %%mm4, %%mm5 \n\t" // d/2
// "paddb %%mm6, %%mm2 \n\t" // line 4 + 0x80
"paddb %%mm5, %%mm2 \n\t"
// "psubb %%mm6, %%mm2 \n\t"
"movq %%mm2, (%0,%1, 4) \n\t"
"movq (%%ebx), %%mm2 \n\t"
// "paddb %%mm6, %%mm2 \n\t" // line 5 + 0x80
"psubb %%mm5, %%mm2 \n\t"
// "psubb %%mm6, %%mm2 \n\t"
"movq %%mm2, (%%ebx) \n\t"
"paddb %%mm6, %%mm5 \n\t"
"psrlw $2, %%mm5 \n\t"
"pand b3F, %%mm5 \n\t"
"psubb b20, %%mm5 \n\t" // (l5-l4)/8
"movq (%%eax, %1, 2), %%mm2 \n\t"
"paddb %%mm6, %%mm2 \n\t" // line 3 + 0x80
"paddsb %%mm5, %%mm2 \n\t"
"psubb %%mm6, %%mm2 \n\t"
"movq %%mm2, (%%eax, %1, 2) \n\t"
"movq (%%ebx, %1), %%mm2 \n\t"
"paddb %%mm6, %%mm2 \n\t" // line 6 + 0x80
"psubsb %%mm5, %%mm2 \n\t"
"psubb %%mm6, %%mm2 \n\t"
"movq %%mm2, (%%ebx, %1) \n\t"
:
: "r" (src), "r" (stride)
: "%eax", "%ebx"
);
#else
const int l1= stride;
const int l2= stride + l1;
const int l3= stride + l2;
const int l4= stride + l3;
const int l5= stride + l4;
const int l6= stride + l5;
Michael Niedermayer
committed
// const int l7= stride + l6;
// const int l8= stride + l7;
// const int l9= stride + l8;
for(x=0; x<BLOCK_SIZE; x++)
{
if(ABS(src[l4]-src[l5]) < QP + QP/4)
{
int v = (src[l5] - src[l4]);
src[l3] +=v/8;
src[l4] +=v/2;
src[l5] -=v/2;
src[l6] -=v/8;
}
src++;
}
#endif
}
/**
* Experimental Filter 1
* will not damage linear gradients
* Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
* can only smooth blocks at the expected locations (it cant smooth them if they did move)
* MMX2 version does correct clipping C version doesnt
*/
static inline void vertX1Filter(uint8_t *src, int stride, int QP)
{
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
"pxor %%mm7, %%mm7 \n\t" // 0
// "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
"leal (%0, %1), %%eax \n\t"
"leal (%%eax, %1, 4), %%ebx \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
"movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
"movq (%0, %1, 4), %%mm1 \n\t" // line 4
"movq %%mm1, %%mm2 \n\t" // line 4
"psubusb %%mm0, %%mm1 \n\t"
"psubusb %%mm2, %%mm0 \n\t"
"por %%mm1, %%mm0 \n\t" // |l2 - l3|
"movq (%%ebx), %%mm3 \n\t" // line 5
"movq (%%ebx, %1), %%mm4 \n\t" // line 6
"movq %%mm3, %%mm5 \n\t" // line 5
"psubusb %%mm4, %%mm3 \n\t"
"psubusb %%mm5, %%mm4 \n\t"
"por %%mm4, %%mm3 \n\t" // |l5 - l6|
PAVGB(%%mm3, %%mm0) // (|l2 - l3| + |l5 - l6|)/2
"movq %%mm2, %%mm1 \n\t" // line 4
"psubusb %%mm5, %%mm2 \n\t"
"movq %%mm2, %%mm4 \n\t"
"pcmpeqb %%mm7, %%mm2 \n\t" // (l4 - l5) <= 0 ? -1 : 0
"psubusb %%mm1, %%mm5 \n\t"
"por %%mm5, %%mm4 \n\t" // |l4 - l5|
"psubusb %%mm0, %%mm4 \n\t" //d = MAX(0, |l4-l5| - (|l2-l3| + |l5-l6|)/2)
"movq %%mm4, %%mm3 \n\t" // d
"psubusb pQPb, %%mm4 \n\t"
"pcmpeqb %%mm7, %%mm4 \n\t" // d <= QP ? -1 : 0
"psubusb b01, %%mm3 \n\t"
"pand %%mm4, %%mm3 \n\t" // d <= QP ? d : 0
PAVGB(%%mm7, %%mm3) // d/2
"movq %%mm3, %%mm1 \n\t" // d/2
PAVGB(%%mm7, %%mm3) // d/4
PAVGB(%%mm1, %%mm3) // 3*d/8
"movq (%0, %1, 4), %%mm0 \n\t" // line 4
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
"psubusb %%mm3, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%0, %1, 4) \n\t" // line 4
"movq (%%ebx), %%mm0 \n\t" // line 5
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
"paddusb %%mm3, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%%ebx) \n\t" // line 5
PAVGB(%%mm7, %%mm1) // d/4
"movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
"psubusb %%mm1, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%%eax, %1, 2) \n\t" // line 3
"movq (%%ebx, %1), %%mm0 \n\t" // line 6
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
"paddusb %%mm1, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%%ebx, %1) \n\t" // line 6
PAVGB(%%mm7, %%mm1) // d/8
"movq (%%eax, %1), %%mm0 \n\t" // line 2
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l2-1 : l2
"psubusb %%mm1, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%%eax, %1) \n\t" // line 2
"movq (%%ebx, %1, 2), %%mm0 \n\t" // line 7
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l7-1 : l7
"paddusb %%mm1, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%%ebx, %1, 2) \n\t" // line 7
:
: "r" (src), "r" (stride)
: "%eax", "%ebx"
);
#else
const int l1= stride;
const int l2= stride + l1;
const int l3= stride + l2;
const int l4= stride + l3;
const int l5= stride + l4;
const int l6= stride + l5;
const int l7= stride + l6;
Michael Niedermayer
committed
// const int l8= stride + l7;
// const int l9= stride + l8;
for(x=0; x<BLOCK_SIZE; x++)
{
int a= src[l3] - src[l4];
int b= src[l4] - src[l5];
int c= src[l5] - src[l6];
int d= MAX(ABS(b) - (ABS(a) + ABS(c))/2, 0);
if(d < QP)
{
int v = d * SIGN(-b);
src[l2] +=v/8;
src[l3] +=v/4;
src[l4] +=3*v/8;
src[l5] -=3*v/8;
src[l6] -=v/4;
src[l7] -=v/8;
}
src++;
}
/*
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
const int l1= stride;
const int l2= stride + l1;
const int l3= stride + l2;
const int l4= stride + l3;
const int l5= stride + l4;
const int l6= stride + l5;
const int l7= stride + l6;
const int l8= stride + l7;
const int l9= stride + l8;
for(int x=0; x<BLOCK_SIZE; x++)
{
int v2= src[l2];
int v3= src[l3];
int v4= src[l4];
int v5= src[l5];
int v6= src[l6];
int v7= src[l7];
if(ABS(v4-v5)<QP && ABS(v4-v5) - (ABS(v3-v4) + ABS(v5-v6))>0 )
{
src[l3] = (6*v2 + 4*v3 + 3*v4 + 2*v5 + v6 )/16;
src[l4] = (3*v2 + 3*v3 + 4*v4 + 3*v5 + 2*v6 + v7 )/16;
src[l5] = (1*v2 + 2*v3 + 3*v4 + 4*v5 + 3*v6 + 3*v7)/16;
src[l6] = ( 1*v3 + 2*v4 + 3*v5 + 4*v6 + 6*v7)/16;
}
src++;
}
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
/**
* Experimental Filter 1 (Horizontal)
* will not damage linear gradients
* Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
* can only smooth blocks at the expected locations (it cant smooth them if they did move)
* MMX2 version does correct clipping C version doesnt
* not identical with the vertical one
*/
static inline void horizX1Filter(uint8_t *src, int stride, int QP)
{
int y;
static uint64_t *lut= NULL;
if(lut==NULL)
{
int i;
lut= (uint64_t*)memalign(8, 256*8);
for(i=0; i<256; i++)
{
int v= i < 128 ? 2*i : 2*(i-256);
/*
//Simulate 112242211 9-Tap filter
uint64_t a= (v/16) & 0xFF;
uint64_t b= (v/8) & 0xFF;
uint64_t c= (v/4) & 0xFF;
uint64_t d= (3*v/8) & 0xFF;
*/
//Simulate piecewise linear interpolation
uint64_t a= (v/16) & 0xFF;
uint64_t b= (v*3/16) & 0xFF;
uint64_t c= (v*5/16) & 0xFF;
uint64_t d= (7*v/16) & 0xFF;
uint64_t A= (0x100 - a)&0xFF;
uint64_t B= (0x100 - b)&0xFF;
uint64_t C= (0x100 - c)&0xFF;
uint64_t D= (0x100 - c)&0xFF;
lut[i] = (a<<56) | (b<<48) | (c<<40) | (d<<32) |
(D<<24) | (C<<16) | (B<<8) | (A);
//lut[i] = (v<<32) | (v<<24);
}
}
#if 0
asm volatile(
"pxor %%mm7, %%mm7 \n\t" // 0
// "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
"leal (%0, %1), %%eax \n\t"
"leal (%%eax, %1, 4), %%ebx \n\t"
"movq b80, %%mm6 \n\t"
"movd pQPb, %%mm5 \n\t" // QP
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
"movq %%mm5, %%mm4 \n\t"
"paddusb %%mm5, %%mm5 \n\t" // 2QP
"paddusb %%mm5, %%mm4 \n\t" // 3QP
"pxor %%mm5, %%mm5 \n\t" // 0
"psubb %%mm4, %%mm5 \n\t" // -3QP
"por bm11111110, %%mm5 \n\t" // ...,FF,FF,-3QP
"psllq $24, %%mm5 \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
#define HX1old(a) \
"movd " #a ", %%mm0 \n\t"\
"movd 4" #a ", %%mm1 \n\t"\
"punpckldq %%mm1, %%mm0 \n\t"\
"movq %%mm0, %%mm1 \n\t"\
"movq %%mm0, %%mm2 \n\t"\
"psrlq $8, %%mm1 \n\t"\
"psubusb %%mm1, %%mm2 \n\t"\
"psubusb %%mm0, %%mm1 \n\t"\
"por %%mm2, %%mm1 \n\t" /* px = |px - p(x+1)| */\
"pcmpeqb %%mm7, %%mm2 \n\t" /* px = sgn[px - p(x+1)] */\
"pshufw $0x00, %%mm1, %%mm3 \n\t" /* p5 = |p1 - p2| */\
PAVGB(%%mm1, %%mm3) /* p5 = (|p2-p1| + |p6-p5|)/2 */\
"psrlq $16, %%mm3 \n\t" /* p3 = (|p2-p1| + |p6-p5|)/2 */\
"psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
"paddb %%mm5, %%mm1 \n\t"\
"psubusb %%mm5, %%mm1 \n\t"\
PAVGB(%%mm7, %%mm1)\
"pxor %%mm2, %%mm1 \n\t"\
"psubb %%mm2, %%mm1 \n\t"\
"psrlq $24, %%mm1 \n\t"\
"movd %%mm1, %%ecx \n\t"\
"paddb %%mm6, %%mm0 \n\t"\
"paddsb (%3, %%ecx, 8), %%mm0 \n\t"\
"paddb %%mm6, %%mm0 \n\t"\
"movq %%mm0, " #a " \n\t"\
/*
HX1old((%0))
HX1old((%%eax))
HX1old((%%eax, %1))
HX1old((%%eax, %1, 2))
HX1old((%0, %1, 4))
HX1old((%%ebx))
HX1old((%%ebx, %1))
HX1old((%%ebx, %1, 2))
*/
//FIXME add some comments, its unreadable ...
#define HX1b(a, c, b, d) \
"movd " #a ", %%mm0 \n\t"\
"movd 4" #a ", %%mm1 \n\t"\
"punpckldq %%mm1, %%mm0 \n\t"\
"movd " #b ", %%mm4 \n\t"\
"movq %%mm0, %%mm1 \n\t"\
"movq %%mm0, %%mm2 \n\t"\
"psrlq $8, %%mm1 \n\t"\
"movd 4" #b ", %%mm3 \n\t"\
"psubusb %%mm1, %%mm2 \n\t"\
"psubusb %%mm0, %%mm1 \n\t"\
"por %%mm2, %%mm1 \n\t" /* px = |px - p(x+1)| */\
"pcmpeqb %%mm7, %%mm2 \n\t" /* px = sgn[px - p(x+1)] */\
"punpckldq %%mm3, %%mm4 \n\t"\
"movq %%mm1, %%mm3 \n\t"\
"psllq $32, %%mm3 \n\t" /* p5 = |p1 - p2| */\
PAVGB(%%mm1, %%mm3) /* p5 = (|p2-p1| + |p6-p5|)/2 */\
"paddb %%mm6, %%mm0 \n\t"\
"psrlq $16, %%mm3 \n\t" /* p3 = (|p2-p1| + |p6-p5|)/2 */\
"psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
"movq %%mm4, %%mm3 \n\t"\
"paddb %%mm5, %%mm1 \n\t"\
"psubusb %%mm5, %%mm1 \n\t"\
"psrlq $8, %%mm3 \n\t"\
PAVGB(%%mm7, %%mm1)\
"pxor %%mm2, %%mm1 \n\t"\
"psubb %%mm2, %%mm1 \n\t"\
"movq %%mm4, %%mm2 \n\t"\
"psrlq $24, %%mm1 \n\t"\
"psubusb %%mm3, %%mm2 \n\t"\
"movd %%mm1, %%ecx \n\t"\
"psubusb %%mm4, %%mm3 \n\t"\
"paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
"por %%mm2, %%mm3 \n\t" /* px = |px - p(x+1)| */\
"paddb %%mm6, %%mm0 \n\t"\
"pcmpeqb %%mm7, %%mm2 \n\t" /* px = sgn[px - p(x+1)] */\
"movq %%mm3, %%mm1 \n\t"\
"psllq $32, %%mm1 \n\t" /* p5 = |p1 - p2| */\
"movq %%mm0, " #a " \n\t"\
PAVGB(%%mm3, %%mm1) /* p5 = (|p2-p1| + |p6-p5|)/2 */\
"paddb %%mm6, %%mm4 \n\t"\
"psrlq $16, %%mm1 \n\t" /* p3 = (|p2-p1| + |p6-p5|)/2 */\
"psubusb %%mm1, %%mm3 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
"paddb %%mm5, %%mm3 \n\t"\
"psubusb %%mm5, %%mm3 \n\t"\
PAVGB(%%mm7, %%mm3)\