Newer
Older
/*
* AAC decoder
* Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org )
* Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com )
*
* AAC LATM decoder
* Copyright (c) 2008-2010 Paul Kendall <paul@kcbbs.gen.nz>
* Copyright (c) 2010 Janne Grunau <janne-ffmpeg@jannau.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* AAC decoder
* @author Oded Shimon ( ods15 ods15 dyndns org )
* @author Maxim Gavrilov ( maxim.gavrilov gmail com )
*/
/*
* supported tools
*
* Support? Name
* N (code in SoC repo) gain control
* Y block switching
* Y window shapes - standard
* N window shapes - Low Delay
* Y filterbank - standard
* N (code in SoC repo) filterbank - Scalable Sample Rate
* Y Temporal Noise Shaping
* N (code in SoC repo) Long Term Prediction
* Y intensity stereo
* Y channel coupling
* Y frequency domain prediction
* Y Perceptual Noise Substitution
* Y Mid/Side stereo
* N Scalable Inverse AAC Quantization
* N Frequency Selective Switch
* N upsampling filter
* Y quantization & coding - AAC
* N quantization & coding - TwinVQ
* N quantization & coding - BSAC
* N AAC Error Resilience tools
* N Error Resilience payload syntax
* N Error Protection tool
* N CELP
* N Silence Compression
* N HVXC
* N HVXC 4kbits/s VR
* N Structured Audio tools
* N Structured Audio Sample Bank Format
* N MIDI
* N Harmonic and Individual Lines plus Noise
* N Text-To-Speech Interface
* Y (not in this code) Layer-1
* Y (not in this code) Layer-2
* Y (not in this code) Layer-3
* N SinuSoidal Coding (Transient, Sinusoid, Noise)
* N Direct Stream Transfer
*
* Note: - HE AAC v1 comprises LC AAC with Spectral Band Replication.
* - HE AAC v2 comprises LC AAC with Spectral Band Replication and
Parametric Stereo.
*/
#include "avcodec.h"
#include "internal.h"
Vitor Sessak
committed
#include "lpc.h"
#include "aac.h"
#include "aactab.h"
Robert Swain
committed
#include "aacdectab.h"
#include "cbrt_tablegen.h"
#include "aacadtsdec.h"
#include <assert.h>
#include <errno.h>
#include <math.h>
#include <string.h>
#if ARCH_ARM
# include "arm/aac.h"
#endif
union float754 {
float f;
uint32_t i;
};
static VLC vlc_scalefactors;
static VLC vlc_spectral[11];
static const char overread_err[] = "Input buffer exhausted before END element found\n";
static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
{
// For PCE based channel configurations map the channels solely based on tags.
if (!ac->m4ac.chan_config) {
return ac->tag_che_map[type][elem_id];
}
// For indexed channel configurations map the channels solely based on position.
switch (ac->m4ac.chan_config) {
case 7:
if (ac->tags_mapped == 3 && type == TYPE_CPE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
}
case 6:
/* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
if (ac->tags_mapped == tags_per_config[ac->m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
ac->tags_mapped++;
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
}
case 5:
if (ac->tags_mapped == 2 && type == TYPE_CPE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
}
case 4:
if (ac->tags_mapped == 2 && ac->m4ac.chan_config == 4 && type == TYPE_SCE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
}
case 3:
case 2:
if (ac->tags_mapped == (ac->m4ac.chan_config != 2) && type == TYPE_CPE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
} else if (ac->m4ac.chan_config == 2) {
}
case 1:
if (!ac->tags_mapped && type == TYPE_SCE) {
ac->tags_mapped++;
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0];
}
default:
return NULL;
Robert Swain
committed
/**
* Check for the channel element in the current channel position configuration.
* If it exists, make sure the appropriate element is allocated and map the
* channel order to match the internal FFmpeg channel layout.
*
* @param che_pos current channel position configuration
* @param type channel element type
* @param id channel element id
* @param channels count of the number of channels in the configuration
*
* @return Returns error status. 0 - OK, !0 - error
*/
static av_cold int che_configure(AACContext *ac,
Robert Swain
committed
enum ChannelPosition che_pos[4][MAX_ELEM_ID],
int type, int id,
int *channels)
{
if (che_pos[type][id]) {
if (!ac->che[type][id] && !(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
return AVERROR(ENOMEM);
Robert Swain
committed
if (type != TYPE_CCE) {
ac->output_data[(*channels)++] = ac->che[type][id]->ch[0].ret;
if (type == TYPE_CPE ||
(type == TYPE_SCE && ac->m4ac.ps == 1)) {
Robert Swain
committed
ac->output_data[(*channels)++] = ac->che[type][id]->ch[1].ret;
}
}
} else {
if (ac->che[type][id])
ff_aac_sbr_ctx_close(&ac->che[type][id]->sbr);
Robert Swain
committed
av_freep(&ac->che[type][id]);
Robert Swain
committed
return 0;
}
/**
* Configure output channel order based on the current program configuration element.
*
* @param che_pos current channel position configuration
* @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
*
* @return Returns error status. 0 - OK, !0 - error
*/
Alex Converse
committed
static av_cold int output_configure(AACContext *ac,
enum ChannelPosition che_pos[4][MAX_ELEM_ID],
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
Alex Converse
committed
int channel_config, enum OCStatus oc_type)
Robert Swain
committed
int i, type, channels = 0, ret;
memcpy(che_pos, new_che_pos, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
if (channel_config) {
for (i = 0; i < tags_per_config[channel_config]; i++) {
Robert Swain
committed
if ((ret = che_configure(ac, che_pos,
aac_channel_layout_map[channel_config - 1][i][0],
aac_channel_layout_map[channel_config - 1][i][1],
&channels)))
return ret;
}
memset(ac->tag_che_map, 0, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
avctx->channel_layout = aac_channel_layout[channel_config - 1];
} else {
/* Allocate or free elements depending on if they are in the
* current program configuration.
*
* Set up default 1:1 output mapping.
*
* For a 5.1 stream the output order will be:
* [ Center ] [ Front Left ] [ Front Right ] [ LFE ] [ Surround Left ] [ Surround Right ]
*/
for (i = 0; i < MAX_ELEM_ID; i++) {
for (type = 0; type < 4; type++) {
Robert Swain
committed
if ((ret = che_configure(ac, che_pos, type, i, &channels)))
return ret;
memcpy(ac->tag_che_map, ac->che, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
avctx->channel_layout = 0;
Alex Converse
committed
ac->output_configured = oc_type;
Alex Converse
committed
/**
* Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit.
*
* @param cpe_map Stereo (Channel Pair Element) map, NULL if stereo bit is not present.
* @param sce_map mono (Single Channel Element) map
* @param type speaker type/position for these channels
*/
static void decode_channel_map(enum ChannelPosition *cpe_map,
enum ChannelPosition *sce_map,
enum ChannelPosition type,
GetBitContext *gb, int n)
{
while (n--) {
enum ChannelPosition *map = cpe_map && get_bits1(gb) ? cpe_map : sce_map; // stereo or mono map
map[get_bits(gb, 4)] = type;
}
}
/**
* Decode program configuration element; reference: table 4.2.
*
* @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
*
* @return Returns error status. 0 - OK, !0 - error
*/
static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc, sampling_index;
skip_bits(gb, 2); // object_type
sampling_index = get_bits(gb, 4);
if (m4ac->sampling_index != sampling_index)
av_log(avctx, AV_LOG_WARNING, "Sample rate index in program config element does not match the sample rate index configured by the container.\n");
Alex Converse
committed
num_front = get_bits(gb, 4);
num_side = get_bits(gb, 4);
num_back = get_bits(gb, 4);
num_lfe = get_bits(gb, 2);
num_assoc_data = get_bits(gb, 3);
num_cc = get_bits(gb, 4);
Robert Swain
committed
if (get_bits1(gb))
skip_bits(gb, 4); // mono_mixdown_tag
if (get_bits1(gb))
skip_bits(gb, 4); // stereo_mixdown_tag
Robert Swain
committed
if (get_bits1(gb))
skip_bits(gb, 3); // mixdown_coeff_index and pseudo_surround
Robert Swain
committed
decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_FRONT, gb, num_front);
decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_SIDE, gb, num_side );
decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_BACK, gb, num_back );
decode_channel_map(NULL, new_che_pos[TYPE_LFE], AAC_CHANNEL_LFE, gb, num_lfe );
skip_bits_long(gb, 4 * num_assoc_data);
Robert Swain
committed
decode_channel_map(new_che_pos[TYPE_CCE], new_che_pos[TYPE_CCE], AAC_CHANNEL_CC, gb, num_cc );
align_get_bits(gb);
/* comment field, first byte is length */
comment_len = get_bits(gb, 8) * 8;
if (get_bits_left(gb) < comment_len) {
av_log(avctx, AV_LOG_ERROR, overread_err);
return -1;
}
skip_bits_long(gb, comment_len);
Robert Swain
committed
return 0;
}
/**
* Set up channel positions based on a default channel configuration
* as specified in table 1.17.
*
* @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
*
* @return Returns error status. 0 - OK, !0 - error
*/
static av_cold int set_default_channel_config(AVCodecContext *avctx,
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
int channel_config)
if (channel_config < 1 || channel_config > 7) {
av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
channel_config);
return -1;
}
/* default channel configurations:
*
* 1ch : front center (mono)
* 2ch : L + R (stereo)
* 3ch : front center + L + R
* 4ch : front center + L + R + back center
* 5ch : front center + L + R + back stereo
* 6ch : front center + L + R + back stereo + LFE
* 7ch : front center + L + R + outer front left + outer front right + back stereo + LFE
*/
new_che_pos[TYPE_SCE][0] = AAC_CHANNEL_FRONT; // front center (or mono)
new_che_pos[TYPE_CPE][0] = AAC_CHANNEL_FRONT; // L + R (or stereo)
new_che_pos[TYPE_SCE][1] = AAC_CHANNEL_BACK; // back center
new_che_pos[TYPE_CPE][(channel_config == 7) + 1]
= AAC_CHANNEL_BACK; // back stereo
if (channel_config > 5)
new_che_pos[TYPE_LFE][0] = AAC_CHANNEL_LFE; // LFE
new_che_pos[TYPE_CPE][1] = AAC_CHANNEL_FRONT; // outer front left + outer front right
return 0;
}
/**
* Decode GA "General Audio" specific configuration; reference: table 4.1.
*
* @param ac pointer to AACContext, may be null
* @param avctx pointer to AVCCodecContext, used for logging
*
* @return Returns error status. 0 - OK, !0 - error
*/
static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
GetBitContext *gb,
MPEG4AudioConfig *m4ac,
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
int extension_flag, ret;
av_log_missing_feature(avctx, "960/120 MDCT window is", 1);
return -1;
}
if (get_bits1(gb)) // dependsOnCoreCoder
skip_bits(gb, 14); // coreCoderDelay
extension_flag = get_bits1(gb);
if (m4ac->object_type == AOT_AAC_SCALABLE ||
m4ac->object_type == AOT_ER_AAC_SCALABLE)
skip_bits(gb, 3); // layerNr
memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
if (channel_config == 0) {
skip_bits(gb, 4); // element_instance_tag
if ((ret = decode_pce(avctx, m4ac, new_che_pos, gb)))
if ((ret = set_default_channel_config(avctx, new_che_pos, channel_config)))
if (ac && (ret = output_configure(ac, ac->che_pos, new_che_pos, channel_config, OC_GLOBAL_HDR)))
return ret;
if (extension_flag) {
switch (m4ac->object_type) {
case AOT_ER_BSAC:
skip_bits(gb, 5); // numOfSubFrame
skip_bits(gb, 11); // layer_length
break;
case AOT_ER_AAC_LC:
case AOT_ER_AAC_LTP:
case AOT_ER_AAC_SCALABLE:
case AOT_ER_AAC_LD:
skip_bits(gb, 3); /* aacSectionDataResilienceFlag
* aacScalefactorDataResilienceFlag
* aacSpectralDataResilienceFlag
*/
}
skip_bits1(gb); // extensionFlag3 (TBD in version 3)
}
return 0;
}
/**
* Decode audio specific configuration; reference: table 1.13.
*
* @param ac pointer to AACContext, may be null
* @param avctx pointer to AVCCodecContext, used for logging
* @param m4ac pointer to MPEG4AudioConfig, used for parsing
* @param data pointer to AVCodecContext extradata
* @param data_size size of AVCCodecContext extradata
*
* @return Returns error status or number of consumed bits. <0 - error
static int decode_audio_specific_config(AACContext *ac,
AVCodecContext *avctx,
MPEG4AudioConfig *m4ac,
const uint8_t *data, int data_size)
GetBitContext gb;
int i;
init_get_bits(&gb, data, data_size * 8);
if ((i = ff_mpeg4audio_get_config(m4ac, data, data_size)) < 0)
if (m4ac->sampling_index > 12) {
av_log(avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", m4ac->sampling_index);
if (m4ac->sbr == 1 && m4ac->ps == -1)
m4ac->ps = 1;
skip_bits_long(&gb, i);
switch (m4ac->object_type) {
case AOT_AAC_MAIN:
if (decode_ga_specific_config(ac, avctx, &gb, m4ac, m4ac->chan_config))
return -1;
break;
default:
av_log(avctx, AV_LOG_ERROR, "Audio object type %s%d is not supported.\n",
m4ac->sbr == 1? "SBR+" : "", m4ac->object_type);
return get_bits_count(&gb);
/**
* linear congruential pseudorandom number generator
*
* @param previous_val pointer to the current state of the generator
*
* @return Returns a 32-bit pseudorandom integer
*/
static av_always_inline int lcg_random(int previous_val)
{
return previous_val * 1664525 + 1013904223;
}
static av_always_inline void reset_predict_state(PredictorState *ps)
ps->cor0 = 0.0f;
ps->cor1 = 0.0f;
ps->var0 = 1.0f;
ps->var1 = 1.0f;
}
static void reset_all_predictors(PredictorState *ps)
{
int i;
for (i = 0; i < MAX_PREDICTORS; i++)
reset_predict_state(&ps[i]);
}
static void reset_predictor_group(PredictorState *ps, int group_num)
{
for (i = group_num - 1; i < MAX_PREDICTORS; i += 30)
reset_predict_state(&ps[i]);
}
Alex Converse
committed
#define AAC_INIT_VLC_STATIC(num, size) \
INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \
ff_aac_spectral_bits[num], sizeof( ff_aac_spectral_bits[num][0]), sizeof( ff_aac_spectral_bits[num][0]), \
ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), sizeof(ff_aac_spectral_codes[num][0]), \
size);
static av_cold int aac_decode_init(AVCodecContext *avctx)
ac->avctx = avctx;
ac->m4ac.sample_rate = avctx->sample_rate;
if (decode_audio_specific_config(ac, ac->avctx, &ac->m4ac,
avctx->extradata,
avctx->extradata_size) < 0)
Robert Swain
committed
return -1;
}
Robert Swain
committed
AAC_INIT_VLC_STATIC( 0, 304);
AAC_INIT_VLC_STATIC( 1, 270);
AAC_INIT_VLC_STATIC( 2, 550);
AAC_INIT_VLC_STATIC( 3, 300);
AAC_INIT_VLC_STATIC( 4, 328);
AAC_INIT_VLC_STATIC( 5, 294);
AAC_INIT_VLC_STATIC( 6, 306);
AAC_INIT_VLC_STATIC( 7, 268);
AAC_INIT_VLC_STATIC( 8, 510);
AAC_INIT_VLC_STATIC( 9, 366);
AAC_INIT_VLC_STATIC(10, 462);
ac->random_state = 0x1f2e3d4c;
// -1024 - Compensate wrong IMDCT method.
// 32768 - Required to scale values to the correct range for the bias method
// for float to int16 conversion.
if (ac->dsp.float_to_int16_interleave == ff_float_to_int16_interleave_c) {
ac->add_bias = 385.0f;
ac->sf_scale = 1. / (-1024. * 32768.);
ac->sf_offset = 0;
} else {
ac->add_bias = 0.0f;
ac->sf_scale = 1. / -1024.;
ac->sf_offset = 60;
}
INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code),
ff_aac_scalefactor_bits, sizeof(ff_aac_scalefactor_bits[0]), sizeof(ff_aac_scalefactor_bits[0]),
ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]),
352);
Siarhei Siamashka
committed
ff_mdct_init(&ac->mdct, 11, 1, 1.0);
ff_mdct_init(&ac->mdct_small, 8, 1, 1.0);
// window initialization
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
ff_init_ff_sine_windows(10);
ff_init_ff_sine_windows( 7);
cbrt_tableinit();
/**
* Skip data_stream_element; reference: table 4.10.
*/
static int skip_data_stream_element(AACContext *ac, GetBitContext *gb)
int byte_align = get_bits1(gb);
int count = get_bits(gb, 8);
if (count == 255)
count += get_bits(gb, 8);
if (byte_align)
align_get_bits(gb);
if (get_bits_left(gb) < 8 * count) {
av_log(ac->avctx, AV_LOG_ERROR, overread_err);
skip_bits_long(gb, 8 * count);
static int decode_prediction(AACContext *ac, IndividualChannelStream *ics,
GetBitContext *gb)
{
int sfb;
if (get_bits1(gb)) {
ics->predictor_reset_group = get_bits(gb, 5);
if (ics->predictor_reset_group == 0 || ics->predictor_reset_group > 30) {
av_log(ac->avctx, AV_LOG_ERROR, "Invalid Predictor Reset Group.\n");
return -1;
}
}
for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->m4ac.sampling_index]); sfb++) {
ics->prediction_used[sfb] = get_bits1(gb);
}
return 0;
}
/**
* Decode Individual Channel Stream info; reference: table 4.6.
*
* @param common_window Channels have independent [0], or shared [1], Individual Channel Stream information.
*/
static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
GetBitContext *gb, int common_window)
{
av_log(ac->avctx, AV_LOG_ERROR, "Reserved bit set.\n");
memset(ics, 0, sizeof(IndividualChannelStream));
return -1;
}
ics->window_sequence[1] = ics->window_sequence[0];
ics->window_sequence[0] = get_bits(gb, 2);
ics->use_kb_window[1] = ics->use_kb_window[0];
ics->use_kb_window[0] = get_bits1(gb);
ics->num_window_groups = 1;
ics->group_len[0] = 1;
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
int i;
ics->max_sfb = get_bits(gb, 4);
for (i = 0; i < 7; i++) {
if (get_bits1(gb)) {
ics->group_len[ics->num_window_groups - 1]++;
ics->group_len[ics->num_window_groups - 1] = 1;
ics->num_windows = 8;
ics->swb_offset = ff_swb_offset_128[ac->m4ac.sampling_index];
ics->num_swb = ff_aac_num_swb_128[ac->m4ac.sampling_index];
ics->tns_max_bands = ff_tns_max_bands_128[ac->m4ac.sampling_index];
ics->predictor_present = 0;
ics->max_sfb = get_bits(gb, 6);
ics->num_windows = 1;
ics->swb_offset = ff_swb_offset_1024[ac->m4ac.sampling_index];
ics->num_swb = ff_aac_num_swb_1024[ac->m4ac.sampling_index];
ics->tns_max_bands = ff_tns_max_bands_1024[ac->m4ac.sampling_index];
ics->predictor_present = get_bits1(gb);
ics->predictor_reset_group = 0;
if (ics->predictor_present) {
if (ac->m4ac.object_type == AOT_AAC_MAIN) {
if (decode_prediction(ac, ics, gb)) {
memset(ics, 0, sizeof(IndividualChannelStream));
return -1;
}
} else if (ac->m4ac.object_type == AOT_AAC_LC) {
av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n");
memset(ics, 0, sizeof(IndividualChannelStream));
return -1;
} else {
av_log_missing_feature(ac->avctx, "Predictor bit set but LTP is", 1);
memset(ics, 0, sizeof(IndividualChannelStream));
return -1;
"Number of scalefactor bands in group (%d) exceeds limit (%d).\n",
ics->max_sfb, ics->num_swb);
memset(ics, 0, sizeof(IndividualChannelStream));
return -1;
}
return 0;
}
/**
* Decode band types (section_data payload); reference: table 4.46.
*
* @param band_type array of the used band type
* @param band_type_run_end array of the last scalefactor band of a band type run
*
* @return Returns error status. 0 - OK, !0 - error
*/
static int decode_band_types(AACContext *ac, enum BandType band_type[120],
int band_type_run_end[120], GetBitContext *gb,
IndividualChannelStream *ics)
{
Robert Swain
committed
int g, idx = 0;
const int bits = (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) ? 3 : 5;
for (g = 0; g < ics->num_window_groups; g++) {
int k = 0;
while (k < ics->max_sfb) {
Alex Converse
committed
uint8_t sect_end = k;
Robert Swain
committed
int sect_len_incr;
int sect_band_type = get_bits(gb, 4);
if (sect_band_type == 12) {
av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
Robert Swain
committed
return -1;
}
while ((sect_len_incr = get_bits(gb, bits)) == (1 << bits) - 1)
Alex Converse
committed
sect_end += sect_len_incr;
sect_end += sect_len_incr;
Alex Converse
committed
if (get_bits_left(gb) < 0) {
av_log(ac->avctx, AV_LOG_ERROR, overread_err);
Alex Converse
committed
return -1;
}
Alex Converse
committed
if (sect_end > ics->max_sfb) {
"Number of bands (%d) exceeds limit (%d).\n",
Alex Converse
committed
sect_end, ics->max_sfb);
Robert Swain
committed
return -1;
}
Alex Converse
committed
for (; k < sect_end; k++) {
Alex Converse
committed
band_type_run_end[idx++] = sect_end;
Robert Swain
committed
/**
* Decode scalefactors; reference: table 4.47.
Robert Swain
committed
*
* @param global_gain first scalefactor value as scalefactors are differentially coded
* @param band_type array of the used band type
* @param band_type_run_end array of the last scalefactor band of a band type run
* @param sf array of scalefactors or intensity stereo positions
*
* @return Returns error status. 0 - OK, !0 - error
*/
static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
unsigned int global_gain,
IndividualChannelStream *ics,
enum BandType band_type[120],
int band_type_run_end[120])
{
Robert Swain
committed
const int sf_offset = ac->sf_offset + (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE ? 12 : 0);
int g, i, idx = 0;
int offset[3] = { global_gain, global_gain - 90, 100 };
int noise_flag = 1;
static const char *sf_str[3] = { "Global gain", "Noise gain", "Intensity stereo position" };
for (g = 0; g < ics->num_window_groups; g++) {
for (i = 0; i < ics->max_sfb;) {
int run_end = band_type_run_end[idx];
if (band_type[idx] == ZERO_BT) {
Robert Swain
committed
sf[idx] = 0.;
} else if ((band_type[idx] == INTENSITY_BT) || (band_type[idx] == INTENSITY_BT2)) {
for (; i < run_end; i++, idx++) {
Robert Swain
committed
offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
"%s (%d) out of range.\n", sf_str[2], offset[2]);
Robert Swain
committed
return -1;
}
sf[idx] = ff_aac_pow2sf_tab[-offset[2] + 300];
Robert Swain
committed
}
} else if (band_type[idx] == NOISE_BT) {
for (; i < run_end; i++, idx++) {
if (noise_flag-- > 0)
Robert Swain
committed
offset[1] += get_bits(gb, 9) - 256;
else
offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
"%s (%d) out of range.\n", sf_str[1], offset[1]);
Robert Swain
committed
return -1;
}
sf[idx] = -ff_aac_pow2sf_tab[offset[1] + sf_offset + 100];
Robert Swain
committed
}
} else {
for (; i < run_end; i++, idx++) {
Robert Swain
committed
offset[0] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
"%s (%d) out of range.\n", sf_str[0], offset[0]);
Robert Swain
committed
return -1;
}
sf[idx] = -ff_aac_pow2sf_tab[ offset[0] + sf_offset];
}
}
}
}
return 0;
}
/**
* Decode pulse data; reference: table 4.7.
*/
static int decode_pulses(Pulse *pulse, GetBitContext *gb,
const uint16_t *swb_offset, int num_swb)
{
Alex Converse
committed
int i, pulse_swb;
Robert Swain
committed
pulse->num_pulse = get_bits(gb, 2) + 1;
Alex Converse
committed
pulse_swb = get_bits(gb, 6);
if (pulse_swb >= num_swb)
return -1;
pulse->pos[0] = swb_offset[pulse_swb];
Robert Swain
committed
pulse->pos[0] += get_bits(gb, 5);
Alex Converse
committed
if (pulse->pos[0] > 1023)
return -1;
pulse->amp[0] = get_bits(gb, 4);
for (i = 1; i < pulse->num_pulse; i++) {
pulse->pos[i] = get_bits(gb, 5) + pulse->pos[i - 1];
Alex Converse
committed
if (pulse->pos[i] > 1023)
return -1;
pulse->amp[i] = get_bits(gb, 4);
Robert Swain
committed
}
Alex Converse
committed
return 0;
Robert Swain
committed
}
/**
* Decode Temporal Noise Shaping data; reference: table 4.48.
*
* @return Returns error status. 0 - OK, !0 - error
*/
static int decode_tns(AACContext *ac, TemporalNoiseShaping *tns,
GetBitContext *gb, const IndividualChannelStream *ics)
{
int w, filt, i, coef_len, coef_res, coef_compress;
const int is8 = ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE;
const int tns_max_order = is8 ? 7 : ac->m4ac.object_type == AOT_AAC_MAIN ? 20 : 12;
for (w = 0; w < ics->num_windows; w++) {
if ((tns->n_filt[w] = get_bits(gb, 2 - is8))) {
for (filt = 0; filt < tns->n_filt[w]; filt++) {
int tmp2_idx;
tns->length[w][filt] = get_bits(gb, 6 - 2 * is8);
if ((tns->order[w][filt] = get_bits(gb, 5 - 2 * is8)) > tns_max_order) {
av_log(ac->avctx, AV_LOG_ERROR, "TNS filter order %d is greater than maximum %d.\n",
tns->order[w][filt], tns_max_order);
tns->order[w][filt] = 0;
return -1;
}
Alex Converse
committed
if (tns->order[w][filt]) {
tns->direction[w][filt] = get_bits1(gb);
coef_compress = get_bits1(gb);
coef_len = coef_res + 3 - coef_compress;
for (i = 0; i < tns->order[w][filt]; i++)
tns->coef[w][filt][i] = tns_tmp2_map[tmp2_idx][get_bits(gb, coef_len)];
Alex Converse
committed
}
/**
* Decode Mid/Side data; reference: table 4.54.
*
* @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s;
* [1] mask is decoded from bitstream; [2] mask is all 1s;
* [3] reserved for scalable AAC
*/
static void decode_mid_side_stereo(ChannelElement *cpe, GetBitContext *gb,
int ms_present)
{
int idx;
if (ms_present == 1) {
for (idx = 0; idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb; idx++)
cpe->ms_mask[idx] = get_bits1(gb);
} else if (ms_present == 2) {
memset(cpe->ms_mask, 1, cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb * sizeof(cpe->ms_mask[0]));
}
}
static inline float *VMUL2(float *dst, const float *v, unsigned idx,
const float *scale)
{
float s = *scale;
*dst++ = v[idx & 15] * s;
*dst++ = v[idx>>4 & 15] * s;
return dst;
}
static inline float *VMUL4(float *dst, const float *v, unsigned idx,
const float *scale)
{
float s = *scale;
*dst++ = v[idx & 3] * s;
*dst++ = v[idx>>2 & 3] * s;
*dst++ = v[idx>>4 & 3] * s;
*dst++ = v[idx>>6 & 3] * s;
return dst;
}
static inline float *VMUL2S(float *dst, const float *v, unsigned idx,
unsigned sign, const float *scale)
{
union float754 s0, s1;
s0.f = s1.f = *scale;
s0.i ^= sign >> 1 << 31;
s1.i ^= sign << 31;
*dst++ = v[idx & 15] * s0.f;
*dst++ = v[idx>>4 & 15] * s1.f;
return dst;
}
static inline float *VMUL4S(float *dst, const float *v, unsigned idx,
unsigned sign, const float *scale)
{
unsigned nz = idx >> 12;
union float754 s = { .f = *scale };
union float754 t;
t.i = s.i ^ (sign & 1<<31);
*dst++ = v[idx & 3] * t.f;
sign <<= nz & 1; nz >>= 1;
t.i = s.i ^ (sign & 1<<31);
*dst++ = v[idx>>2 & 3] * t.f;
sign <<= nz & 1; nz >>= 1;
t.i = s.i ^ (sign & 1<<31);
*dst++ = v[idx>>4 & 3] * t.f;
sign <<= nz & 1; nz >>= 1;
t.i = s.i ^ (sign & 1<<31);
*dst++ = v[idx>>6 & 3] * t.f;
return dst;
}
/**
* Decode spectral data; reference: table 4.50.
* Dequantize and scale spectral data; reference: 4.6.3.3.
*
* @param coef array of dequantized, scaled spectral data
* @param sf array of scalefactors or intensity stereo positions
* @param pulse_present set if pulses are present
* @param pulse pointer to pulse data struct
* @param band_type array of the used band type
*
* @return Returns error status. 0 - OK, !0 - error
*/
static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
GetBitContext *gb, const float sf[120],
int pulse_present, const Pulse *pulse,
const IndividualChannelStream *ics,
enum BandType band_type[120])
{
const int c = 1024 / ics->num_windows;
const uint16_t *offsets = ics->swb_offset;
float *coef_base = coef;
for (g = 0; g < ics->num_windows; g++)
memset(coef + g * 128 + offsets[ics->max_sfb], 0, sizeof(float) * (c - offsets[ics->max_sfb]));