Skip to content
Snippets Groups Projects
adpcm.c 60.4 KiB
Newer Older
  • Learn to ignore specific revisions
  •  * Copyright (c) 2001-2003 The ffmpeg Project
    
     * This file is part of FFmpeg.
     *
     * FFmpeg is free software; you can redistribute it and/or
    
     * modify it under the terms of the GNU Lesser General Public
     * License as published by the Free Software Foundation; either
    
     * version 2.1 of the License, or (at your option) any later version.
    
     * FFmpeg is distributed in the hope that it will be useful,
    
     * but WITHOUT ANY WARRANTY; without even the implied warranty of
     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     * Lesser General Public License for more details.
     *
     * You should have received a copy of the GNU Lesser General Public
    
     * License along with FFmpeg; if not, write to the Free Software
    
     * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
    
    #include "bytestream.h"
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    /**
     * @file adpcm.c
     * ADPCM codecs.
    
     * First version by Francois Revol (revol@free.fr)
    
     * Fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
    
     *   by Mike Melanson (melanson@pcisys.net)
    
     * CD-ROM XA ADPCM codec by BERO
    
     * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
    
     * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
    
    Peter Ross's avatar
    Peter Ross committed
     * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
    
    Peter Ross's avatar
    Peter Ross committed
     * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
    
     * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
    
     * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
    
     * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
    
     *
     * Features and limitations:
     *
     * Reference documents:
    
     * http://www.pcisys.net/~melanson/codecs/simpleaudio.html
    
     * http://www.geocities.com/SiliconValley/8682/aud3.txt
     * http://openquicktime.sourceforge.net/plugins.htm
     * XAnim sources (xa_codec.c) http://www.rasnaimaging.com/people/lapus/download.html
    
     * http://www.cs.ucla.edu/~leec/mediabench/applications.html
     * SoX source code http://home.sprynet.com/~cbagwell/sox.html
    
     *
     * CD-ROM XA:
     * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html
     * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html
     * readstr http://www.geocities.co.jp/Playtown/2004/
    
     */
    
    #define BLKSIZE 1024
    
    /* step_table[] and index_table[] are from the ADPCM reference source */
    /* This is the index table: */
    
    static const int index_table[16] = {
    
        -1, -1, -1, -1, 2, 4, 6, 8,
        -1, -1, -1, -1, 2, 4, 6, 8,
    };
    
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
     * This is the step table. Note that many programs use slight deviations from
    
     * this table, but such deviations are negligible:
     */
    
    static const int step_table[89] = {
    
        7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
        19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
        50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
        130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
        337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
        876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
        2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
        5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
        15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
    };
    
    
    /* These are for MS-ADPCM */
    
    /* AdaptationTable[], AdaptCoeff1[], and AdaptCoeff2[] are from libsndfile */
    
    static const int AdaptationTable[] = {
    
            230, 230, 230, 230, 307, 409, 512, 614,
            768, 614, 512, 409, 307, 230, 230, 230
    };
    
    
    static const int AdaptCoeff1[] = {
    
    static const int AdaptCoeff2[] = {
    
    /* These are for CD-ROM XA ADPCM */
    
    Alex Beregszaszi's avatar
    Alex Beregszaszi committed
    static const int xa_adpcm_table[5][2] = {
    
       {   0,   0 },
       {  60,   0 },
       { 115, -52 },
       {  98, -55 },
       { 122, -60 }
    };
    
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    static const int ea_adpcm_table[] = {
    
        0, 240, 460, 392, 0, 0, -208, -220, 0, 1,
        3, 4, 7, 8, 10, 11, 0, -1, -3, -4
    };
    
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    static const int ct_adpcm_table[8] = {
    
        0x00E6, 0x00E6, 0x00E6, 0x00E6,
        0x0133, 0x0199, 0x0200, 0x0266
    };
    
    
    // padded to zero where table size is less then 16
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    static const int swf_index_tables[4][16] = {
    
        /*2*/ { -1, 2 },
        /*3*/ { -1, -1, 2, 4 },
        /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
        /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
    };
    
    
    static const int yamaha_indexscale[] = {
        230, 230, 230, 230, 307, 409, 512, 614,
        230, 230, 230, 230, 307, 409, 512, 614
    };
    
    static const int yamaha_difflookup[] = {
        1, 3, 5, 7, 9, 11, 13, 15,
        -1, -3, -5, -7, -9, -11, -13, -15
    };
    
    
    /* end of tables */
    
    typedef struct ADPCMChannelStatus {
        int predictor;
        short int step_index;
        int step;
    
        /* for encoding */
        int prev_sample;
    
    
        /* MS version */
        short sample1;
        short sample2;
        int coeff1;
        int coeff2;
        int idelta;
    } ADPCMChannelStatus;
    
    typedef struct ADPCMContext {
    
        ADPCMChannelStatus status[6];
    
    } ADPCMContext;
    
    /* XXX: implement encoding */
    
    
    static int adpcm_encode_init(AVCodecContext *avctx)
    {
    
        if (avctx->channels > 2)
            return -1; /* only stereo or mono =) */
    
    
        if(avctx->trellis && (unsigned)avctx->trellis > 16U){
            av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
            return -1;
        }
    
    
        switch(avctx->codec->id) {
        case CODEC_ID_ADPCM_IMA_WAV:
    
            avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */
                                                                 /* and we have 4 bytes per channel overhead */
            avctx->block_align = BLKSIZE;
            /* seems frame_size isn't taken into account... have to buffer the samples :-( */
            break;
    
        case CODEC_ID_ADPCM_IMA_QT:
            avctx->frame_size = 64;
            avctx->block_align = 34 * avctx->channels;
            break;
    
            avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */
                                                                 /* and we have 7 bytes per channel overhead */
            avctx->block_align = BLKSIZE;
    
        case CODEC_ID_ADPCM_YAMAHA:
            avctx->frame_size = BLKSIZE * avctx->channels;
            avctx->block_align = BLKSIZE;
            break;
    
        case CODEC_ID_ADPCM_SWF:
    
            if (avctx->sample_rate != 11025 &&
                avctx->sample_rate != 22050 &&
                avctx->sample_rate != 44100) {
                av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n");
                return -1;
            }
            avctx->frame_size = 512 * (avctx->sample_rate / 11025);
    
    
        avctx->coded_frame= avcodec_alloc_frame();
        avctx->coded_frame->key_frame= 1;
    
    
        return 0;
    }
    
    static int adpcm_encode_close(AVCodecContext *avctx)
    {
    
        av_freep(&avctx->coded_frame);
    
    
    
    static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample)
    {
    
        int delta = sample - c->prev_sample;
        int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8;
    
        c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8);
    
        c->prev_sample = av_clip_int16(c->prev_sample);
    
        c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88);
    
    static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample)
    {
        int predictor, nibble, bias;
    
        predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 256;
    
        nibble= sample - predictor;
        if(nibble>=0) bias= c->idelta/2;
        else          bias=-c->idelta/2;
    
        nibble= (nibble + bias) / c->idelta;
    
        nibble= av_clip(nibble, -8, 7)&0x0F;
    
        predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
    
        c->sample2 = c->sample1;
    
        c->sample1 = av_clip_int16(predictor);
    
    
        c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
        if (c->idelta < 16) c->idelta = 16;
    
        return nibble;
    }
    
    
    static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample)
    {
    
        int nibble, delta;
    
        delta = sample - c->predictor;
    
        nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8;
    
    
        c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8);
    
        c->predictor = av_clip_int16(c->predictor);
    
        c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
    
        c->step = av_clip(c->step, 127, 24567);
    
    typedef struct TrellisPath {
        int nibble;
        int prev;
    } TrellisPath;
    
    typedef struct TrellisNode {
        uint32_t ssd;
        int path;
        int sample1;
        int sample2;
        int step;
    } TrellisNode;
    
    static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
                                       uint8_t *dst, ADPCMChannelStatus *c, int n)
    {
    #define FREEZE_INTERVAL 128
        //FIXME 6% faster if frontier is a compile-time constant
        const int frontier = 1 << avctx->trellis;
        const int stride = avctx->channels;
        const int version = avctx->codec->id;
        const int max_paths = frontier*FREEZE_INTERVAL;
        TrellisPath paths[max_paths], *p;
        TrellisNode node_buf[2][frontier];
        TrellisNode *nodep_buf[2][frontier];
        TrellisNode **nodes = nodep_buf[0]; // nodes[] is always sorted by .ssd
        TrellisNode **nodes_next = nodep_buf[1];
        int pathn = 0, froze = -1, i, j, k;
    
        assert(!(max_paths&(max_paths-1)));
    
        memset(nodep_buf, 0, sizeof(nodep_buf));
        nodes[0] = &node_buf[1][0];
        nodes[0]->ssd = 0;
        nodes[0]->path = 0;
        nodes[0]->step = c->step_index;
        nodes[0]->sample1 = c->sample1;
        nodes[0]->sample2 = c->sample2;
    
        if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF))
    
            nodes[0]->sample1 = c->prev_sample;
        if(version == CODEC_ID_ADPCM_MS)
            nodes[0]->step = c->idelta;
        if(version == CODEC_ID_ADPCM_YAMAHA) {
            if(c->step == 0) {
                nodes[0]->step = 127;
                nodes[0]->sample1 = 0;
            } else {
                nodes[0]->step = c->step;
                nodes[0]->sample1 = c->predictor;
            }
        }
    
        for(i=0; i<n; i++) {
            TrellisNode *t = node_buf[i&1];
            TrellisNode **u;
            int sample = samples[i*stride];
            memset(nodes_next, 0, frontier*sizeof(TrellisNode*));
            for(j=0; j<frontier && nodes[j]; j++) {
                // higher j have higher ssd already, so they're unlikely to use a suboptimal next sample too
                const int range = (j < frontier/2) ? 1 : 0;
                const int step = nodes[j]->step;
                int nidx;
                if(version == CODEC_ID_ADPCM_MS) {
                    const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 256;
                    const int div = (sample - predictor) / step;
    
                    const int nmin = av_clip(div-range, -8, 6);
                    const int nmax = av_clip(div+range, -7, 7);
    
                    for(nidx=nmin; nidx<=nmax; nidx++) {
                        const int nibble = nidx & 0xf;
                        int dec_sample = predictor + nidx * step;
    #define STORE_NODE(NAME, STEP_INDEX)\
                        int d;\
                        uint32_t ssd;\
    
                        dec_sample = av_clip_int16(dec_sample);\
    
                        d = sample - dec_sample;\
                        ssd = nodes[j]->ssd + d*d;\
                        if(nodes_next[frontier-1] && ssd >= nodes_next[frontier-1]->ssd)\
                            continue;\
                        /* Collapse any two states with the same previous sample value. \
                         * One could also distinguish states by step and by 2nd to last
                         * sample, but the effects of that are negligible. */\
                        for(k=0; k<frontier && nodes_next[k]; k++) {\
                            if(dec_sample == nodes_next[k]->sample1) {\
                                assert(ssd >= nodes_next[k]->ssd);\
                                goto next_##NAME;\
                            }\
                        }\
                        for(k=0; k<frontier; k++) {\
                            if(!nodes_next[k] || ssd < nodes_next[k]->ssd) {\
                                TrellisNode *u = nodes_next[frontier-1];\
                                if(!u) {\
                                    assert(pathn < max_paths);\
                                    u = t++;\
                                    u->path = pathn++;\
                                }\
                                u->ssd = ssd;\
                                u->step = STEP_INDEX;\
                                u->sample2 = nodes[j]->sample1;\
                                u->sample1 = dec_sample;\
                                paths[u->path].nibble = nibble;\
                                paths[u->path].prev = nodes[j]->path;\
                                memmove(&nodes_next[k+1], &nodes_next[k], (frontier-k-1)*sizeof(TrellisNode*));\
                                nodes_next[k] = u;\
                                break;\
                            }\
                        }\
                        next_##NAME:;
                        STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8));
                    }
    
                } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) {
    
    #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
                    const int predictor = nodes[j]->sample1;\
                    const int div = (sample - predictor) * 4 / STEP_TABLE;\
    
                    int nmin = av_clip(div-range, -7, 6);\
                    int nmax = av_clip(div+range, -6, 7);\
    
                    if(nmin<=0) nmin--; /* distinguish -0 from +0 */\
                    if(nmax<0) nmax--;\
                    for(nidx=nmin; nidx<=nmax; nidx++) {\
                        const int nibble = nidx<0 ? 7-nidx : nidx;\
                        int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\
                        STORE_NODE(NAME, STEP_INDEX);\
                    }
    
                    LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88));
    
                } else { //CODEC_ID_ADPCM_YAMAHA
    
                    LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567));
    
    #undef LOOP_NODES
    #undef STORE_NODE
                }
            }
    
            u = nodes;
            nodes = nodes_next;
            nodes_next = u;
    
            // prevent overflow
            if(nodes[0]->ssd > (1<<28)) {
                for(j=1; j<frontier && nodes[j]; j++)
                    nodes[j]->ssd -= nodes[0]->ssd;
                nodes[0]->ssd = 0;
            }
    
            // merge old paths to save memory
            if(i == froze + FREEZE_INTERVAL) {
                p = &paths[nodes[0]->path];
                for(k=i; k>froze; k--) {
                    dst[k] = p->nibble;
                    p = &paths[p->prev];
                }
                froze = i;
                pathn = 0;
                // other nodes might use paths that don't coincide with the frozen one.
                // checking which nodes do so is too slow, so just kill them all.
                // this also slightly improves quality, but I don't know why.
                memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*));
            }
        }
    
        p = &paths[nodes[0]->path];
        for(i=n-1; i>froze; i--) {
            dst[i] = p->nibble;
            p = &paths[p->prev];
        }
    
        c->predictor = nodes[0]->sample1;
        c->sample1 = nodes[0]->sample1;
        c->sample2 = nodes[0]->sample2;
        c->step_index = nodes[0]->step;
        c->step = nodes[0]->step;
        c->idelta = nodes[0]->step;
    }
    
    
    static int adpcm_encode_frame(AVCodecContext *avctx,
    
                                unsigned char *frame, int buf_size, void *data)
    
        short *samples;
        unsigned char *dst;
    
        ADPCMContext *c = avctx->priv_data;
    
        dst = frame;
        samples = (short *)data;
    
        st= avctx->channels == 2;
    
    /*    n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */
    
        case CODEC_ID_ADPCM_IMA_WAV:
            n = avctx->frame_size / 8;
                c->status[0].prev_sample = (signed short)samples[0]; /* XXX */
    /*            c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */
    
                bytestream_put_le16(&dst, c->status[0].prev_sample);
    
                *dst++ = (unsigned char)c->status[0].step_index;
                *dst++ = 0; /* unknown */
                samples++;
                if (avctx->channels == 2) {
    
                    c->status[1].prev_sample = (signed short)samples[0];
    
    /*                c->status[1].step_index = 0; */
    
                    bytestream_put_le16(&dst, c->status[1].prev_sample);
    
                    *dst++ = (unsigned char)c->status[1].step_index;
                    *dst++ = 0;
                    samples++;
                }
    
                /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */
    
                if(avctx->trellis > 0) {
                    uint8_t buf[2][n*8];
                    adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n*8);
                    if(avctx->channels == 2)
                        adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n*8);
                    for(i=0; i<n; i++) {
                        *dst++ = buf[0][8*i+0] | (buf[0][8*i+1] << 4);
                        *dst++ = buf[0][8*i+2] | (buf[0][8*i+3] << 4);
                        *dst++ = buf[0][8*i+4] | (buf[0][8*i+5] << 4);
                        *dst++ = buf[0][8*i+6] | (buf[0][8*i+7] << 4);
                        if (avctx->channels == 2) {
                            *dst++ = buf[1][8*i+0] | (buf[1][8*i+1] << 4);
                            *dst++ = buf[1][8*i+2] | (buf[1][8*i+3] << 4);
                            *dst++ = buf[1][8*i+4] | (buf[1][8*i+5] << 4);
                            *dst++ = buf[1][8*i+6] | (buf[1][8*i+7] << 4);
                        }
                    }
                } else
    
                    *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]);
                    *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4;
    
                    *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]);
                    *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4;
    
                    *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]);
                    *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4;
    
                    *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]);
                    *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4;
    
                    dst++;
                    /* right channel */
                    if (avctx->channels == 2) {
                        *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]);
                        *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4;
                        dst++;
                        *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]);
                        *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4;
                        dst++;
                        *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]);
                        *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4;
                        dst++;
                        *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]);
                        *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4;
                        dst++;
                    }
                    samples += 8 * avctx->channels;
                }
            break;
    
        case CODEC_ID_ADPCM_IMA_QT:
        {
            int ch, i;
            PutBitContext pb;
            init_put_bits(&pb, dst, buf_size*8);
    
            for(ch=0; ch<avctx->channels; ch++){
                put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7);
                put_bits(&pb, 7, c->status[ch].step_index);
                if(avctx->trellis > 0) {
                    uint8_t buf[64];
                    adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64);
                    for(i=0; i<64; i++)
                        put_bits(&pb, 4, buf[i^1]);
                    c->status[ch].prev_sample = c->status[ch].predictor & ~0x7F;
                } else {
                    for (i=0; i<64; i+=2){
                        int t1, t2;
                        t1 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]);
                        t2 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]);
                        put_bits(&pb, 4, t2);
                        put_bits(&pb, 4, t1);
                    }
                    c->status[ch].prev_sample &= ~0x7F;
                }
            }
    
            dst += put_bits_count(&pb)>>3;
            break;
        }
    
        case CODEC_ID_ADPCM_SWF:
        {
            int i;
            PutBitContext pb;
            init_put_bits(&pb, dst, buf_size*8);
    
    
            n = avctx->frame_size-1;
    
    
            //Store AdpcmCodeSize
            put_bits(&pb, 2, 2);                //Set 4bits flash adpcm format
    
            //Init the encoder state
            for(i=0; i<avctx->channels; i++){
    
                c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits
    
                put_bits(&pb, 16, samples[i] & 0xFFFF);
    
                put_bits(&pb, 6, c->status[i].step_index);
    
                c->status[i].prev_sample = (signed short)samples[i];
            }
    
    
            if(avctx->trellis > 0) {
                uint8_t buf[2][n];
                adpcm_compress_trellis(avctx, samples+2, buf[0], &c->status[0], n);
                if (avctx->channels == 2)
                    adpcm_compress_trellis(avctx, samples+3, buf[1], &c->status[1], n);
                for(i=0; i<n; i++) {
                    put_bits(&pb, 4, buf[0][i]);
                    if (avctx->channels == 2)
                        put_bits(&pb, 4, buf[1][i]);
                }
            } else {
    
                for (i=1; i<avctx->frame_size; i++) {
    
                    put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i]));
    
                    if (avctx->channels == 2)
    
                        put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1]));
    
            flush_put_bits(&pb);
            dst += put_bits_count(&pb)>>3;
    
        case CODEC_ID_ADPCM_MS:
            for(i=0; i<avctx->channels; i++){
                int predictor=0;
    
                *dst++ = predictor;
                c->status[i].coeff1 = AdaptCoeff1[predictor];
                c->status[i].coeff2 = AdaptCoeff2[predictor];
            }
            for(i=0; i<avctx->channels; i++){
    
                if (c->status[i].idelta < 16)
    
                    c->status[i].idelta = 16;
    
                bytestream_put_le16(&dst, c->status[i].idelta);
    
            }
            for(i=0; i<avctx->channels; i++){
                c->status[i].sample1= *samples++;
    
    
                bytestream_put_le16(&dst, c->status[i].sample1);
    
            }
            for(i=0; i<avctx->channels; i++){
                c->status[i].sample2= *samples++;
    
    
                bytestream_put_le16(&dst, c->status[i].sample2);
    
            if(avctx->trellis > 0) {
                int n = avctx->block_align - 7*avctx->channels;
                uint8_t buf[2][n];
                if(avctx->channels == 1) {
                    n *= 2;
                    adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
                    for(i=0; i<n; i+=2)
                        *dst++ = (buf[0][i] << 4) | buf[0][i+1];
                } else {
                    adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
                    adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n);
                    for(i=0; i<n; i++)
                        *dst++ = (buf[0][i] << 4) | buf[1][i];
                }
            } else
    
            for(i=7*avctx->channels; i<avctx->block_align; i++) {
                int nibble;
                nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4;
                nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++);
                *dst++ = nibble;
            }
            break;
    
        case CODEC_ID_ADPCM_YAMAHA:
            n = avctx->frame_size / 2;
    
            if(avctx->trellis > 0) {
                uint8_t buf[2][n*2];
                n *= 2;
                if(avctx->channels == 1) {
                    adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
                    for(i=0; i<n; i+=2)
                        *dst++ = buf[0][i] | (buf[0][i+1] << 4);
                } else {
                    adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
                    adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n);
                    for(i=0; i<n; i++)
                        *dst++ = buf[0][i] | (buf[1][i] << 4);
                }
            } else
    
            for (; n>0; n--) {
                for(i = 0; i < avctx->channels; i++) {
                    int nibble;
    
                    nibble  = adpcm_yamaha_compress_sample(&c->status[i], samples[i]);
                    nibble |= adpcm_yamaha_compress_sample(&c->status[i], samples[i+avctx->channels]) << 4;
    
                    *dst++ = nibble;
                }
                samples += 2 * avctx->channels;
            }
            break;
    
    static av_cold int adpcm_decode_init(AVCodecContext * avctx)
    
    {
        ADPCMContext *c = avctx->priv_data;
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
        unsigned int max_channels = 2;
    
        switch(avctx->codec->id) {
        case CODEC_ID_ADPCM_EA_R1:
        case CODEC_ID_ADPCM_EA_R2:
        case CODEC_ID_ADPCM_EA_R3:
            max_channels = 6;
            break;
        }
        if(avctx->channels > max_channels){
    
            c->status[0].step = c->status[1].step = 511;
            break;
    
    Anssi Hannula's avatar
    Anssi Hannula committed
        case CODEC_ID_ADPCM_IMA_WS:
            if (avctx->extradata && avctx->extradata_size == 2 * 4) {
                c->status[0].predictor = AV_RL32(avctx->extradata);
                c->status[1].predictor = AV_RL32(avctx->extradata + 4);
            }
            break;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift)
    
    {
        int step_index;
        int predictor;
        int sign, delta, diff, step;
    
    
        step = step_table[c->step_index];
    
        step_index = c->step_index + index_table[(unsigned)nibble];
        if (step_index < 0) step_index = 0;
    
        else if (step_index > 88) step_index = 88;
    
    
        sign = nibble & 8;
        delta = nibble & 7;
    
        /* perform direct multiplication instead of series of jumps proposed by
         * the reference ADPCM implementation since modern CPUs can do the mults
         * quickly enough */
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        diff = ((2 * delta + 1) * step) >> shift;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        predictor = c->predictor;
        if (sign) predictor -= diff;
        else predictor += diff;
    
    
        c->predictor = av_clip_int16(predictor);
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        c->step_index = step_index;
    
    
        return (short)c->predictor;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    }
    
    
    static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble)
    {
        int predictor;
    
        predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 256;
        predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
    
        c->sample2 = c->sample1;
    
        c->sample1 = av_clip_int16(predictor);
    
        c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
    
        if (c->idelta < 16) c->idelta = 16;
    
    
    static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
    {
        int sign, delta, diff;
        int new_step;
    
        sign = nibble & 8;
        delta = nibble & 7;
        /* perform direct multiplication instead of series of jumps proposed by
         * the reference ADPCM implementation since modern CPUs can do the mults
         * quickly enough */
        diff = ((2 * delta + 1) * c->step) >> 3;
        /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
    
    Aurelien Jacobs's avatar
    Aurelien Jacobs committed
        c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
        c->predictor = av_clip_int16(c->predictor);
    
        /* calculate new step and clamp it to range 511..32767 */
        new_step = (ct_adpcm_table[nibble & 7] * c->step) >> 8;
    
    Aurelien Jacobs's avatar
    Aurelien Jacobs committed
        c->step = av_clip(new_step, 511, 32767);
    
        return (short)c->predictor;
    
    static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
    {
        int sign, delta, diff;
    
        sign = nibble & (1<<(size-1));
        delta = nibble & ((1<<(size-1))-1);
        diff = delta << (7 + c->step + shift);
    
        /* clamp result */
    
    Aurelien Jacobs's avatar
    Aurelien Jacobs committed
        c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
    
    
        /* calculate new step */
        if (delta >= (2*size - 3) && c->step < 3)
            c->step++;
        else if (delta == 0 && c->step > 0)
            c->step--;
    
        return (short) c->predictor;
    }
    
    
    static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble)
    {
        if(!c->step) {
            c->predictor = 0;
            c->step = 127;
        }
    
        c->predictor += (c->step * yamaha_difflookup[nibble]) / 8;
    
        c->predictor = av_clip_int16(c->predictor);
    
        c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
    
        c->step = av_clip(c->step, 127, 24567);
    
    static void xa_decode(short *out, const unsigned char *in,
    
        ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc)
    {
        int i, j;
        int shift,filter,f0,f1;
        int s_1,s_2;
        int d,s,t;
    
        for(i=0;i<4;i++) {
    
            shift  = 12 - (in[4+i*2] & 15);
            filter = in[4+i*2] >> 4;
            f0 = xa_adpcm_table[filter][0];
            f1 = xa_adpcm_table[filter][1];
    
            s_1 = left->sample1;
            s_2 = left->sample2;
    
            for(j=0;j<28;j++) {
                d = in[16+i+j*4];
    
                t = (signed char)(d<<4)>>4;
                s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
                s_2 = s_1;
    
                s_1 = av_clip_int16(s);
                *out = s_1;
                out += inc;
    
            }
    
            if (inc==2) { /* stereo */
                left->sample1 = s_1;
                left->sample2 = s_2;
                s_1 = right->sample1;
                s_2 = right->sample2;
                out = out + 1 - 28*2;
            }
    
            shift  = 12 - (in[5+i*2] & 15);
            filter = in[5+i*2] >> 4;
    
            f0 = xa_adpcm_table[filter][0];
            f1 = xa_adpcm_table[filter][1];
    
            for(j=0;j<28;j++) {
                d = in[16+i+j*4];
    
                t = (signed char)d >> 4;
                s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
                s_2 = s_1;
    
                s_1 = av_clip_int16(s);
                *out = s_1;
                out += inc;
    
            }
    
            if (inc==2) { /* stereo */
                right->sample1 = s_1;
                right->sample2 = s_2;
                out -= 1;
            } else {
                left->sample1 = s_1;
                left->sample2 = s_2;
            }
        }
    }
    
    
    
    /* DK3 ADPCM support macro */
    #define DK3_GET_NEXT_NIBBLE() \
        if (decode_top_nibble_next) \
        { \
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            nibble = last_byte >> 4; \
    
            decode_top_nibble_next = 0; \
        } \
        else \
        { \
            last_byte = *src++; \
            if (src >= buf + buf_size) break; \
            nibble = last_byte & 0x0F; \
            decode_top_nibble_next = 1; \
        }
    
    
    static int adpcm_decode_frame(AVCodecContext *avctx,
    
                                void *data, int *data_size,
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                                const uint8_t *buf, int buf_size)
    
    {
        ADPCMContext *c = avctx->priv_data;
        ADPCMChannelStatus *cs;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        int n, m, channel, i;
    
        int block_predictor[2];
        short *samples;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        const uint8_t *src;
    
        /* DK3 ADPCM accounting variables */
        unsigned char last_byte = 0;
        unsigned char nibble;
        int decode_top_nibble_next = 0;
        int diff_channel;
    
    
        /* EA ADPCM state variables */
        uint32_t samples_in_chunk;
        int32_t previous_left_sample, previous_right_sample;
        int32_t current_left_sample, current_right_sample;
        int32_t next_left_sample, next_right_sample;
        int32_t coeff1l, coeff2l, coeff1r, coeff2r;
        uint8_t shift_left, shift_right;
        int count1, count2;
    
        int coeff[2][2], shift[2];//used in EA MAXIS ADPCM
    
    Mike Melanson's avatar
    Mike Melanson committed
        if (!buf_size)
            return 0;
    
    
        //should protect all 4bit ADPCM variants
        //8 is needed for CODEC_ID_ADPCM_IMA_WAV with 2 channels
        //
        if(*data_size/4 < buf_size + 8)
            return -1;
    
    
        samples_end= samples + *data_size/2;
        *data_size= 0;
    
        st = avctx->channels == 2 ? 1 : 0;
    
    
        switch(avctx->codec->id) {
        case CODEC_ID_ADPCM_IMA_QT:
    
            n = buf_size - 2*avctx->channels;
            for (channel = 0; channel < avctx->channels; channel++) {
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
                cs = &(c->status[channel]);
                /* (pppppp) (piiiiiii) */
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
                /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
                cs->predictor = (*src++) << 8;
                cs->predictor |= (*src & 0x80);
                cs->predictor &= 0xFF80;
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
                /* sign extension */
                if(cs->predictor & 0x8000)
                    cs->predictor -= 0x10000;
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
                cs->predictor = av_clip_int16(cs->predictor);
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
                cs->step_index = (*src++) & 0x7F;
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
                if (cs->step_index > 88){
                    av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
                    cs->step_index = 88;
                }
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
                cs->step = step_table[cs->step_index];
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
                samples = (short*)data + channel;
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
                for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */
                    *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3);
                    samples += avctx->channels;
                    *samples = adpcm_ima_expand_nibble(cs, src[0] >> 4  , 3);
                    samples += avctx->channels;
                    src ++;
                }
    
            break;
        case CODEC_ID_ADPCM_IMA_WAV:
    
            if (avctx->block_align != 0 && buf_size > avctx->block_align)
                buf_size = avctx->block_align;
    
    
    //        samples_per_block= (block_align-4*chanels)*8 / (bits_per_sample * chanels) + 1;
    
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            for(i=0; i<avctx->channels; i++){
                cs = &(c->status[i]);
    
                cs->predictor = *samples++ = (int16_t)(src[0] + (src[1]<<8));
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                cs->step_index = *src++;
    
                if (cs->step_index > 88){
                    av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
                    cs->step_index = 88;
                }
                if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]); /* unused */
    
            while(src < buf + buf_size){
                for(m=0; m<4; m++){
                    for(i=0; i<=st; i++)
                        *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3);
                    for(i=0; i<=st; i++)
                        *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4  , 3);
                    src++;
    
                src += 4*st;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            break;
        case CODEC_ID_ADPCM_4XM:
            cs = &(c->status[0]);
            c->status[0].predictor= (int16_t)(src[0] + (src[1]<<8)); src+=2;
            if(st){