Newer
Older
Fabrice Bellard
committed
}
j += 1 << bit_alloc_bits;
}
for(i=bound;i<sblimit;i++) {
bit_alloc_bits = alloc_table[j];
v = get_bits(&s->gb, bit_alloc_bits);
bit_alloc[0][i] = v;
bit_alloc[1][i] = v;
j += 1 << bit_alloc_bits;
Fabrice Bellard
committed
/* scale codes */
for(i=0;i<sblimit;i++) {
for(ch=0;ch<s->nb_channels;ch++) {
Fabrice Bellard
committed
scale_code[ch][i] = get_bits(&s->gb, 2);
}
}
Fabrice Bellard
committed
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
/* scale factors */
for(i=0;i<sblimit;i++) {
for(ch=0;ch<s->nb_channels;ch++) {
if (bit_alloc[ch][i]) {
sf = scale_factors[ch][i];
switch(scale_code[ch][i]) {
default:
case 0:
sf[0] = get_bits(&s->gb, 6);
sf[1] = get_bits(&s->gb, 6);
sf[2] = get_bits(&s->gb, 6);
break;
case 2:
sf[0] = get_bits(&s->gb, 6);
sf[1] = sf[0];
sf[2] = sf[0];
break;
case 1:
sf[0] = get_bits(&s->gb, 6);
sf[2] = get_bits(&s->gb, 6);
sf[1] = sf[0];
break;
case 3:
sf[0] = get_bits(&s->gb, 6);
sf[2] = get_bits(&s->gb, 6);
sf[1] = sf[2];
break;
}
}
}
}
/* samples */
for(k=0;k<3;k++) {
for(l=0;l<12;l+=3) {
j = 0;
for(i=0;i<bound;i++) {
bit_alloc_bits = alloc_table[j];
for(ch=0;ch<s->nb_channels;ch++) {
b = bit_alloc[ch][i];
if (b) {
scale = scale_factors[ch][i][k];
qindex = alloc_table[j+b];
bits = ff_mpa_quant_bits[qindex];
Fabrice Bellard
committed
if (bits < 0) {
Fabrice Bellard
committed
/* 3 values at the same time */
v = get_bits(&s->gb, -bits);
v2 = division_tabs[qindex][v];
steps = ff_mpa_quant_steps[qindex];
s->sb_samples[ch][k * 12 + l + 0][i] =
l2_unscale_group(steps, v2 & 15, scale);
s->sb_samples[ch][k * 12 + l + 1][i] =
l2_unscale_group(steps, (v2 >> 4) & 15, scale);
s->sb_samples[ch][k * 12 + l + 2][i] =
l2_unscale_group(steps, v2 >> 8 , scale);
Fabrice Bellard
committed
} else {
for(m=0;m<3;m++) {
v = get_bits(&s->gb, bits);
v = l1_unscale(bits - 1, v, scale);
s->sb_samples[ch][k * 12 + l + m][i] = v;
}
}
} else {
s->sb_samples[ch][k * 12 + l + 0][i] = 0;
s->sb_samples[ch][k * 12 + l + 1][i] = 0;
s->sb_samples[ch][k * 12 + l + 2][i] = 0;
}
}
/* next subband in alloc table */
Fabrice Bellard
committed
}
/* XXX: find a way to avoid this duplication of code */
for(i=bound;i<sblimit;i++) {
bit_alloc_bits = alloc_table[j];
b = bit_alloc[0][i];
if (b) {
int mant, scale0, scale1;
scale0 = scale_factors[0][i][k];
scale1 = scale_factors[1][i][k];
qindex = alloc_table[j+b];
bits = ff_mpa_quant_bits[qindex];
Fabrice Bellard
committed
if (bits < 0) {
/* 3 values at the same time */
v = get_bits(&s->gb, -bits);
steps = ff_mpa_quant_steps[qindex];
Fabrice Bellard
committed
mant = v % steps;
v = v / steps;
s->sb_samples[0][k * 12 + l + 0][i] =
Fabrice Bellard
committed
l2_unscale_group(steps, mant, scale0);
s->sb_samples[1][k * 12 + l + 0][i] =
Fabrice Bellard
committed
l2_unscale_group(steps, mant, scale1);
mant = v % steps;
v = v / steps;
s->sb_samples[0][k * 12 + l + 1][i] =
Fabrice Bellard
committed
l2_unscale_group(steps, mant, scale0);
s->sb_samples[1][k * 12 + l + 1][i] =
Fabrice Bellard
committed
l2_unscale_group(steps, mant, scale1);
s->sb_samples[0][k * 12 + l + 2][i] =
Fabrice Bellard
committed
l2_unscale_group(steps, v, scale0);
s->sb_samples[1][k * 12 + l + 2][i] =
Fabrice Bellard
committed
l2_unscale_group(steps, v, scale1);
} else {
for(m=0;m<3;m++) {
mant = get_bits(&s->gb, bits);
s->sb_samples[0][k * 12 + l + m][i] =
Fabrice Bellard
committed
l1_unscale(bits - 1, mant, scale0);
s->sb_samples[1][k * 12 + l + m][i] =
Fabrice Bellard
committed
l1_unscale(bits - 1, mant, scale1);
}
}
} else {
s->sb_samples[0][k * 12 + l + 0][i] = 0;
s->sb_samples[0][k * 12 + l + 1][i] = 0;
s->sb_samples[0][k * 12 + l + 2][i] = 0;
s->sb_samples[1][k * 12 + l + 0][i] = 0;
s->sb_samples[1][k * 12 + l + 1][i] = 0;
s->sb_samples[1][k * 12 + l + 2][i] = 0;
}
/* next subband in alloc table */
Fabrice Bellard
committed
}
/* fill remaining samples to zero */
for(i=sblimit;i<SBLIMIT;i++) {
for(ch=0;ch<s->nb_channels;ch++) {
s->sb_samples[ch][k * 12 + l + 0][i] = 0;
s->sb_samples[ch][k * 12 + l + 1][i] = 0;
s->sb_samples[ch][k * 12 + l + 2][i] = 0;
}
}
}
}
return 3 * 12;
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
#define SPLIT(dst,sf,n)\
if(n==3){\
int m= (sf*171)>>9;\
dst= sf - 3*m;\
sf=m;\
}else if(n==4){\
dst= sf&3;\
sf>>=2;\
}else if(n==5){\
int m= (sf*205)>>10;\
dst= sf - 5*m;\
sf=m;\
}else if(n==6){\
int m= (sf*171)>>10;\
dst= sf - 6*m;\
sf=m;\
}else{\
dst=0;\
}
static av_always_inline void lsf_sf_expand(int *slen,
Fabrice Bellard
committed
int sf, int n1, int n2, int n3)
{
SPLIT(slen[3], sf, n3)
SPLIT(slen[2], sf, n2)
SPLIT(slen[1], sf, n1)
Fabrice Bellard
committed
slen[0] = sf;
}
static void exponents_from_scale_factors(MPADecodeContext *s,
Fabrice Bellard
committed
GranuleDef *g,
Fabrice Bellard
committed
{
Fabrice Bellard
committed
int len, i, j, k, l, v0, shift, gain, gains[3];
Fabrice Bellard
committed
exp_ptr = exponents;
gain = g->global_gain - 210;
shift = g->scalefac_scale + 1;
bstab = band_size_long[s->sample_rate_index];
pretab = mpa_pretab[g->preflag];
for(i=0;i<g->long_end;i++) {
v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
Fabrice Bellard
committed
len = bstab[i];
for(j=len;j>0;j--)
*exp_ptr++ = v0;
}
if (g->short_start < 13) {
bstab = band_size_short[s->sample_rate_index];
gains[0] = gain - (g->subblock_gain[0] << 3);
gains[1] = gain - (g->subblock_gain[1] << 3);
gains[2] = gain - (g->subblock_gain[2] << 3);
k = g->long_end;
for(i=g->short_start;i<13;i++) {
len = bstab[i];
for(l=0;l<3;l++) {
v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
Fabrice Bellard
committed
for(j=len;j>0;j--)
*exp_ptr++ = v0;
}
}
}
}
/* handle n = 0 too */
static inline int get_bitsz(GetBitContext *s, int n)
{
if (n == 0)
return 0;
else
return get_bits(s, n);
}
static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos, int *end_pos2){
if(s->in_gb.buffer && *pos >= s->gb.size_in_bits){
s->gb= s->in_gb;
s->in_gb.buffer=NULL;
assert((get_bits_count(&s->gb) & 7) == 0);
skip_bits_long(&s->gb, *pos - *end_pos);
*end_pos2=
*end_pos= *end_pos2 + get_bits_count(&s->gb) - *pos;
*pos= get_bits_count(&s->gb);
}
}
/* Following is a optimized code for
INTFLOAT v = *src
if(get_bits1(&s->gb))
v = -v;
*dst = v;
*/
#if CONFIG_FLOAT
#define READ_FLIP_SIGN(dst,src)\
v = AV_RN32A(src) ^ (get_bits1(&s->gb)<<31);\
AV_WN32A(dst, v);
#else
#define READ_FLIP_SIGN(dst,src)\
v= -get_bits1(&s->gb);\
*(dst) = (*(src) ^ v) - v;
#endif
Fabrice Bellard
committed
static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
Michael Niedermayer
committed
int16_t *exponents, int end_pos2)
Fabrice Bellard
committed
{
int s_index;
Michael Niedermayer
committed
int last_pos, bits_left;
Fabrice Bellard
committed
VLC *vlc;
Michael Niedermayer
committed
int end_pos= FFMIN(end_pos2, s->gb.size_in_bits);
Fabrice Bellard
committed
/* low frequencies (called big values) */
s_index = 0;
for(i=0;i<3;i++) {
int j, k, l, linbits;
Fabrice Bellard
committed
j = g->region_size[i];
if (j == 0)
continue;
/* select vlc table */
k = g->table_select[i];
l = mpa_huff_data[k][0];
linbits = mpa_huff_data[k][1];
vlc = &huff_vlc[l];
Michael Niedermayer
committed
memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*2*j);
s_index += 2*j;
continue;
}
Fabrice Bellard
committed
/* read huffcode and compute each couple */
for(;j>0;j--) {
int v;
Michael Niedermayer
committed
int pos= get_bits_count(&s->gb);
if (pos >= end_pos){
// av_log(NULL, AV_LOG_ERROR, "pos: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
switch_buffer(s, &pos, &end_pos, &end_pos2);
Michael Niedermayer
committed
// av_log(NULL, AV_LOG_ERROR, "new pos: %d %d\n", pos, end_pos);
if(pos >= end_pos)
break;
}
y = get_vlc2(&s->gb, vlc->table, 7, 3);
if(!y){
g->sb_hybrid[s_index ] =
g->sb_hybrid[s_index+1] = 0;
s_index += 2;
continue;
}
dprintf(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n",
i, g->region_size[i] - j, x, y, exponent);
READ_FLIP_SIGN(g->sb_hybrid+s_index, RENAME(expval_table)[ exponent ]+x)
}else{
x += get_bitsz(&s->gb, linbits);
if (get_bits1(&s->gb))
v = -v;
g->sb_hybrid[s_index] = v;
READ_FLIP_SIGN(g->sb_hybrid+s_index+1, RENAME(expval_table)[ exponent ]+y)
}else{
y += get_bitsz(&s->gb, linbits);
if (get_bits1(&s->gb))
v = -v;
g->sb_hybrid[s_index+1] = v;
}else{
x = y >> 5;
y = y & 0x0f;
x += y;
if (x < 15){
READ_FLIP_SIGN(g->sb_hybrid+s_index+!!y, RENAME(expval_table)[ exponent ]+x)
}else{
x += get_bitsz(&s->gb, linbits);
v = l3_unscale(x, exponent);
if (get_bits1(&s->gb))
v = -v;
g->sb_hybrid[s_index+!!y] = v;
Fabrice Bellard
committed
}
Fabrice Bellard
committed
}
}
Fabrice Bellard
committed
/* high frequencies */
vlc = &huff_quad_vlc[g->count1table_select];
Fabrice Bellard
committed
while (s_index <= 572) {
int pos, code;
Fabrice Bellard
committed
pos = get_bits_count(&s->gb);
if (pos >= end_pos) {
if (pos > end_pos2 && last_pos){
/* some encoders generate an incorrect size for this
part. We must go back into the data */
s_index -= 4;
skip_bits_long(&s->gb, last_pos - pos);
av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
if(s->error_recognition >= FF_ER_COMPLIANT)
Michael Niedermayer
committed
// av_log(NULL, AV_LOG_ERROR, "pos2: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
switch_buffer(s, &pos, &end_pos, &end_pos2);
Michael Niedermayer
committed
// av_log(NULL, AV_LOG_ERROR, "new pos2: %d %d %d\n", pos, end_pos, s_index);
if(pos >= end_pos)
break;
Fabrice Bellard
committed
}
Michael Niedermayer
committed
code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
dprintf(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
g->sb_hybrid[s_index+0]=
g->sb_hybrid[s_index+1]=
g->sb_hybrid[s_index+2]=
g->sb_hybrid[s_index+3]= 0;
while(code){
static const int idxtab[16]={3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0};
int pos= s_index+idxtab[code];
code ^= 8>>idxtab[code];
READ_FLIP_SIGN(g->sb_hybrid+pos, RENAME(exp_table)+exponents[pos])
Fabrice Bellard
committed
}
Fabrice Bellard
committed
}
Michael Niedermayer
committed
/* skip extension bits */
bits_left = end_pos2 - get_bits_count(&s->gb);
Michael Niedermayer
committed
//av_log(NULL, AV_LOG_ERROR, "left:%d buf:%p\n", bits_left, s->in_gb.buffer);
if (bits_left < 0 && s->error_recognition >= FF_ER_COMPLIANT) {
av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
}else if(bits_left > 0 && s->error_recognition >= FF_ER_AGGRESSIVE){
av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
Michael Niedermayer
committed
}
memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*(576 - s_index));
Michael Niedermayer
committed
skip_bits_long(&s->gb, bits_left);
switch_buffer(s, &i, &end_pos, &end_pos2);
Fabrice Bellard
committed
/* Reorder short blocks from bitstream order to interleaved order. It
would be faster to do it in parsing, but the code would be far more
complicated */
static void reorder_block(MPADecodeContext *s, GranuleDef *g)
{
Michael Niedermayer
committed
int i, j, len;
INTFLOAT *ptr, *dst, *ptr1;
INTFLOAT tmp[576];
Fabrice Bellard
committed
if (g->block_type != 2)
return;
if (g->switch_point) {
if (s->sample_rate_index != 8) {
ptr = g->sb_hybrid + 36;
} else {
ptr = g->sb_hybrid + 48;
}
} else {
ptr = g->sb_hybrid;
}
Fabrice Bellard
committed
for(i=g->short_start;i<13;i++) {
len = band_size_short[s->sample_rate_index][i];
ptr1 = ptr;
Michael Niedermayer
committed
dst = tmp;
for(j=len;j>0;j--) {
*dst++ = ptr[0*len];
*dst++ = ptr[1*len];
*dst++ = ptr[2*len];
ptr++;
Fabrice Bellard
committed
}
Michael Niedermayer
committed
ptr+=2*len;
memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
Fabrice Bellard
committed
}
}
#define ISQRT2 FIXR(0.70710678118654752440)
static void compute_stereo(MPADecodeContext *s,
GranuleDef *g0, GranuleDef *g1)
{
int i, j, k, l;
int sf_max, sf, len, non_zero_found;
INTFLOAT (*is_tab)[16], *tab0, *tab1, tmp0, tmp1, v1, v2;
Fabrice Bellard
committed
int non_zero_found_short[3];
/* intensity stereo */
if (s->mode_ext & MODE_EXT_I_STEREO) {
if (!s->lsf) {
is_tab = is_table;
sf_max = 7;
} else {
is_tab = is_table_lsf[g1->scalefac_compress & 1];
sf_max = 16;
}
Fabrice Bellard
committed
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
tab0 = g0->sb_hybrid + 576;
tab1 = g1->sb_hybrid + 576;
non_zero_found_short[0] = 0;
non_zero_found_short[1] = 0;
non_zero_found_short[2] = 0;
k = (13 - g1->short_start) * 3 + g1->long_end - 3;
for(i = 12;i >= g1->short_start;i--) {
/* for last band, use previous scale factor */
if (i != 11)
k -= 3;
len = band_size_short[s->sample_rate_index][i];
for(l=2;l>=0;l--) {
tab0 -= len;
tab1 -= len;
if (!non_zero_found_short[l]) {
/* test if non zero band. if so, stop doing i-stereo */
for(j=0;j<len;j++) {
if (tab1[j] != 0) {
non_zero_found_short[l] = 1;
goto found1;
}
}
sf = g1->scale_factors[k + l];
if (sf >= sf_max)
goto found1;
v1 = is_tab[0][sf];
v2 = is_tab[1][sf];
for(j=0;j<len;j++) {
tmp0 = tab0[j];
tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
Fabrice Bellard
committed
}
} else {
found1:
if (s->mode_ext & MODE_EXT_MS_STEREO) {
/* lower part of the spectrum : do ms stereo
if enabled */
for(j=0;j<len;j++) {
tmp0 = tab0[j];
tmp1 = tab1[j];
tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
Fabrice Bellard
committed
}
}
}
}
}
non_zero_found = non_zero_found_short[0] |
non_zero_found_short[1] |
Fabrice Bellard
committed
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
non_zero_found_short[2];
for(i = g1->long_end - 1;i >= 0;i--) {
len = band_size_long[s->sample_rate_index][i];
tab0 -= len;
tab1 -= len;
/* test if non zero band. if so, stop doing i-stereo */
if (!non_zero_found) {
for(j=0;j<len;j++) {
if (tab1[j] != 0) {
non_zero_found = 1;
goto found2;
}
}
/* for last band, use previous scale factor */
k = (i == 21) ? 20 : i;
sf = g1->scale_factors[k];
if (sf >= sf_max)
goto found2;
v1 = is_tab[0][sf];
v2 = is_tab[1][sf];
for(j=0;j<len;j++) {
tmp0 = tab0[j];
tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
Fabrice Bellard
committed
}
} else {
found2:
if (s->mode_ext & MODE_EXT_MS_STEREO) {
/* lower part of the spectrum : do ms stereo
if enabled */
for(j=0;j<len;j++) {
tmp0 = tab0[j];
tmp1 = tab1[j];
tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
Fabrice Bellard
committed
}
}
}
}
} else if (s->mode_ext & MODE_EXT_MS_STEREO) {
/* ms stereo ONLY */
/* NOTE: the 1/sqrt(2) normalization factor is included in the
global gain */
tab0 = g0->sb_hybrid;
tab1 = g1->sb_hybrid;
for(i=0;i<576;i++) {
tmp0 = tab0[i];
tmp1 = tab1[i];
tab0[i] = tmp0 + tmp1;
tab1[i] = tmp0 - tmp1;
}
}
}
Michael Niedermayer
committed
static void compute_antialias_integer(MPADecodeContext *s,
Fabrice Bellard
committed
GranuleDef *g)
{
Fabrice Bellard
committed
/* we antialias only "long" bands */
if (g->block_type == 2) {
if (!g->switch_point)
return;
/* XXX: check this for 8000Hz case */
n = 1;
} else {
n = SBLIMIT - 1;
}
Fabrice Bellard
committed
ptr = g->sb_hybrid + 18;
for(i = n;i > 0;i--) {
int tmp0, tmp1, tmp2;
csa = &csa_table[0][0];
#define INT_AA(j) \
Michael Niedermayer
committed
tmp0 = ptr[-1-j];\
tmp1 = ptr[ j];\
Michael Niedermayer
committed
ptr[-1-j] = 4*(tmp2 - MULH(tmp1, csa[2+4*j]));\
ptr[ j] = 4*(tmp2 + MULH(tmp0, csa[3+4*j]));
INT_AA(0)
INT_AA(1)
INT_AA(2)
INT_AA(3)
INT_AA(4)
INT_AA(5)
INT_AA(6)
INT_AA(7)
Michael Niedermayer
committed
}
}
static void compute_antialias_float(MPADecodeContext *s,
GranuleDef *g)
{
Michael Niedermayer
committed
/* we antialias only "long" bands */
if (g->block_type == 2) {
if (!g->switch_point)
return;
/* XXX: check this for 8000Hz case */
n = 1;
} else {
n = SBLIMIT - 1;
}
Michael Niedermayer
committed
ptr = g->sb_hybrid + 18;
for(i = n;i > 0;i--) {
float *csa = &csa_table_float[0][0];
#define FLOAT_AA(j)\
tmp0= ptr[-1-j];\
tmp1= ptr[ j];\
ptr[-1-j] = tmp0 * csa[0+4*j] - tmp1 * csa[1+4*j];\
ptr[ j] = tmp0 * csa[1+4*j] + tmp1 * csa[0+4*j];
FLOAT_AA(0)
FLOAT_AA(1)
FLOAT_AA(2)
FLOAT_AA(3)
FLOAT_AA(4)
FLOAT_AA(5)
FLOAT_AA(6)
FLOAT_AA(7)
Fabrice Bellard
committed
}
}
static void compute_imdct(MPADecodeContext *s,
INTFLOAT *sb_samples,
INTFLOAT *mdct_buf)
Fabrice Bellard
committed
{
INTFLOAT *win, *win1, *out_ptr, *ptr, *buf, *ptr1;
INTFLOAT out2[12];
int i, j, mdct_long_end, sblimit;
Fabrice Bellard
committed
/* find last non zero block */
ptr = g->sb_hybrid + 576;
ptr1 = g->sb_hybrid + 2 * 18;
while (ptr >= ptr1) {
Fabrice Bellard
committed
ptr -= 6;
p= (int32_t*)ptr;
if(p[0] | p[1] | p[2] | p[3] | p[4] | p[5])
Fabrice Bellard
committed
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
break;
}
sblimit = ((ptr - g->sb_hybrid) / 18) + 1;
if (g->block_type == 2) {
/* XXX: check for 8000 Hz */
if (g->switch_point)
mdct_long_end = 2;
else
mdct_long_end = 0;
} else {
mdct_long_end = sblimit;
}
buf = mdct_buf;
ptr = g->sb_hybrid;
for(j=0;j<mdct_long_end;j++) {
/* apply window & overlap with previous buffer */
out_ptr = sb_samples + j;
/* select window */
if (g->switch_point && j < 2)
win1 = mdct_win[0];
else
win1 = mdct_win[g->block_type];
/* select frequency inversion */
win = win1 + ((4 * 36) & -(j & 1));
imdct36(out_ptr, buf, ptr, win);
out_ptr += 18*SBLIMIT;
Fabrice Bellard
committed
ptr += 18;
buf += 18;
}
for(j=mdct_long_end;j<sblimit;j++) {
/* select frequency inversion */
win = mdct_win[2] + ((4 * 36) & -(j & 1));
out_ptr = sb_samples + j;
for(i=0; i<6; i++){
*out_ptr = buf[i];
out_ptr += SBLIMIT;
}
imdct12(out2, ptr + 0);
for(i=0;i<6;i++) {
*out_ptr = MULH3(out2[i ], win[i ], 1) + buf[i + 6*1];
buf[i + 6*2] = MULH3(out2[i + 6], win[i + 6], 1);
Fabrice Bellard
committed
out_ptr += SBLIMIT;
}
imdct12(out2, ptr + 1);
for(i=0;i<6;i++) {
*out_ptr = MULH3(out2[i ], win[i ], 1) + buf[i + 6*2];
buf[i + 6*0] = MULH3(out2[i + 6], win[i + 6], 1);
out_ptr += SBLIMIT;
}
imdct12(out2, ptr + 2);
for(i=0;i<6;i++) {
buf[i + 6*0] = MULH3(out2[i ], win[i ], 1) + buf[i + 6*0];
buf[i + 6*1] = MULH3(out2[i + 6], win[i + 6], 1);
Fabrice Bellard
committed
ptr += 18;
buf += 18;
}
/* zero bands */
for(j=sblimit;j<SBLIMIT;j++) {
/* overlap */
out_ptr = sb_samples + j;
for(i=0;i<18;i++) {
*out_ptr = buf[i];
buf[i] = 0;
out_ptr += SBLIMIT;
}
buf += 18;
}
}
/* main layer3 decoding function */
static int mp_decode_layer3(MPADecodeContext *s)
{
int nb_granules, main_data_begin, private_bits;
Michael Niedermayer
committed
int gr, ch, blocksplit_flag, i, j, k, n, bits_pos;
int16_t exponents[576]; //FIXME try INTFLOAT
Fabrice Bellard
committed
/* read side info */
if (s->lsf) {
main_data_begin = get_bits(&s->gb, 8);
Fabrice Bellard
committed
nb_granules = 1;
} else {
main_data_begin = get_bits(&s->gb, 9);
if (s->nb_channels == 2)
private_bits = get_bits(&s->gb, 3);
else
private_bits = get_bits(&s->gb, 5);
nb_granules = 2;
for(ch=0;ch<s->nb_channels;ch++) {
s->granules[ch][0].scfsi = 0;/* all scale factors are transmitted */
s->granules[ch][1].scfsi = get_bits(&s->gb, 4);
Fabrice Bellard
committed
}
}
Fabrice Bellard
committed
for(gr=0;gr<nb_granules;gr++) {
for(ch=0;ch<s->nb_channels;ch++) {
dprintf(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch);
Fabrice Bellard
committed
g->part2_3_length = get_bits(&s->gb, 12);
g->big_values = get_bits(&s->gb, 9);
av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n");
Fabrice Bellard
committed
g->global_gain = get_bits(&s->gb, 8);
/* if MS stereo only is selected, we precompute the
1/sqrt(2) renormalization factor */
if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) ==
Fabrice Bellard
committed
MODE_EXT_MS_STEREO)
g->global_gain -= 2;
if (s->lsf)
g->scalefac_compress = get_bits(&s->gb, 9);
else
g->scalefac_compress = get_bits(&s->gb, 4);
blocksplit_flag = get_bits1(&s->gb);
Fabrice Bellard
committed
if (blocksplit_flag) {
g->block_type = get_bits(&s->gb, 2);
av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n");
Fabrice Bellard
committed
return -1;
g->switch_point = get_bits1(&s->gb);
Fabrice Bellard
committed
for(i=0;i<2;i++)
g->table_select[i] = get_bits(&s->gb, 5);
Fabrice Bellard
committed
g->subblock_gain[i] = get_bits(&s->gb, 3);
ff_init_short_region(s, g);
Fabrice Bellard
committed
} else {
int region_address1, region_address2;
Fabrice Bellard
committed
g->block_type = 0;
g->switch_point = 0;
for(i=0;i<3;i++)
g->table_select[i] = get_bits(&s->gb, 5);
/* compute huffman coded region sizes */
region_address1 = get_bits(&s->gb, 4);
region_address2 = get_bits(&s->gb, 3);
Fabrice Bellard
committed
region_address1, region_address2);
ff_init_long_region(s, g, region_address1, region_address2);
Fabrice Bellard
committed
}
ff_region_offset2size(g);
ff_compute_band_indexes(s, g);
Fabrice Bellard
committed
g->preflag = 0;
if (!s->lsf)
g->preflag = get_bits1(&s->gb);
g->scalefac_scale = get_bits1(&s->gb);
g->count1table_select = get_bits1(&s->gb);
dprintf(s->avctx, "block_type=%d switch_point=%d\n",
Fabrice Bellard
committed
g->block_type, g->switch_point);
}
}
Michael Niedermayer
committed
const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
Fabrice Bellard
committed
/* now we get bits from the main_data_begin offset */
dprintf(s->avctx, "seekback: %d\n", main_data_begin);
Michael Niedermayer
committed
//av_log(NULL, AV_LOG_ERROR, "backstep:%d, lastbuf:%d\n", main_data_begin, s->last_buf_size);
memcpy(s->last_buf + s->last_buf_size, ptr, EXTRABYTES);
s->in_gb= s->gb;
init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8);
skip_bits_long(&s->gb, 8*(s->last_buf_size - main_data_begin));
Fabrice Bellard
committed
for(gr=0;gr<nb_granules;gr++) {
for(ch=0;ch<s->nb_channels;ch++) {
av_log(s->avctx, AV_LOG_DEBUG, "mdb:%d, lastbuf:%d skipping granule %d\n",
main_data_begin, s->last_buf_size, gr);
skip_bits_long(&s->gb, g->part2_3_length);
memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->in_gb.buffer){
skip_bits_long(&s->in_gb, get_bits_count(&s->gb) - s->gb.size_in_bits);
s->gb= s->in_gb;
s->in_gb.buffer=NULL;
}
continue;
}
Fabrice Bellard
committed
bits_pos = get_bits_count(&s->gb);
Fabrice Bellard
committed
if (!s->lsf) {
Fabrice Bellard
committed
int slen, slen1, slen2;
/* MPEG1 scale factors */
slen1 = slen_table[0][g->scalefac_compress];
slen2 = slen_table[1][g->scalefac_compress];
dprintf(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2);
Fabrice Bellard
committed
if (g->block_type == 2) {
n = g->switch_point ? 17 : 18;
j = 0;
if(slen1){
for(i=0;i<n;i++)
g->scale_factors[j++] = get_bits(&s->gb, slen1);
}else{
for(i=0;i<n;i++)
g->scale_factors[j++] = 0;
}
if(slen2){
for(i=0;i<18;i++)
g->scale_factors[j++] = get_bits(&s->gb, slen2);
for(i=0;i<3;i++)
g->scale_factors[j++] = 0;
}else{
for(i=0;i<21;i++)
g->scale_factors[j++] = 0;
}
Fabrice Bellard
committed
} else {
sc = s->granules[ch][0].scale_factors;
Fabrice Bellard
committed
j = 0;
for(k=0;k<4;k++) {
n = (k == 0 ? 6 : 5);
if ((g->scfsi & (0x8 >> k)) == 0) {
slen = (k < 2) ? slen1 : slen2;
if(slen){
for(i=0;i<n;i++)
g->scale_factors[j++] = get_bits(&s->gb, slen);
}else{
for(i=0;i<n;i++)
g->scale_factors[j++] = 0;
}
Fabrice Bellard
committed
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
} else {
/* simply copy from last granule */
for(i=0;i<n;i++) {
g->scale_factors[j] = sc[j];
j++;
}
}
}
g->scale_factors[j++] = 0;
}
} else {
int tindex, tindex2, slen[4], sl, sf;
/* LSF scale factors */
if (g->block_type == 2) {
tindex = g->switch_point ? 2 : 1;
} else {
tindex = 0;
}
sf = g->scalefac_compress;
if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) {
/* intensity stereo case */
sf >>= 1;
if (sf < 180) {
lsf_sf_expand(slen, sf, 6, 6, 0);
tindex2 = 3;
} else if (sf < 244) {
lsf_sf_expand(slen, sf - 180, 4, 4, 0);
tindex2 = 4;
} else {
lsf_sf_expand(slen, sf - 244, 3, 0, 0);
tindex2 = 5;
}
} else {
/* normal case */
if (sf < 400) {
lsf_sf_expand(slen, sf, 5, 4, 4);
tindex2 = 0;
} else if (sf < 500) {
lsf_sf_expand(slen, sf - 400, 5, 4, 0);
tindex2 = 1;
} else {
lsf_sf_expand(slen, sf - 500, 3, 0, 0);
tindex2 = 2;
g->preflag = 1;
}
}
j = 0;
for(k=0;k<4;k++) {
n = lsf_nsf_table[tindex2][tindex][k];
sl = slen[k];
for(i=0;i<n;i++)
g->scale_factors[j++] = get_bits(&s->gb, sl);
}else{
for(i=0;i<n;i++)
g->scale_factors[j++] = 0;
}
Fabrice Bellard
committed
}
/* XXX: should compute exact size */
for(;j<40;j++)
g->scale_factors[j] = 0;
}
exponents_from_scale_factors(s, g, exponents);
/* read Huffman coded residue */
huffman_decode(s, g, exponents, bits_pos + g->part2_3_length);
Fabrice Bellard
committed
} /* ch */
if (s->nb_channels == 2)
compute_stereo(s, &s->granules[0][gr], &s->granules[1][gr]);
Fabrice Bellard
committed
for(ch=0;ch<s->nb_channels;ch++) {
Fabrice Bellard
committed
reorder_block(s, g);
compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
Fabrice Bellard
committed
}
} /* gr */
if(get_bits_count(&s->gb)<0)
skip_bits_long(&s->gb, -get_bits_count(&s->gb));
Fabrice Bellard
committed
return nb_granules * 18;
}
static int mp_decode_frame(MPADecodeContext *s,
Michael Niedermayer
committed
OUT_INT *samples, const uint8_t *buf, int buf_size)
Fabrice Bellard
committed
{
int i, nb_frames, ch;
OUT_INT *samples_ptr;
Fabrice Bellard
committed
Michael Niedermayer
committed
init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE)*8);