Skip to content
Snippets Groups Projects
ffplay.c 130 KiB
Newer Older
  • Learn to ignore specific revisions
  • Fabrice Bellard's avatar
    Fabrice Bellard committed
    /*
     * Copyright (c) 2003 Fabrice Bellard
     *
    
     * This file is part of FFmpeg.
     *
     * FFmpeg is free software; you can redistribute it and/or
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     * modify it under the terms of the GNU Lesser General Public
     * License as published by the Free Software Foundation; either
    
     * version 2.1 of the License, or (at your option) any later version.
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     *
    
     * FFmpeg is distributed in the hope that it will be useful,
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     * but WITHOUT ANY WARRANTY; without even the implied warranty of
     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     * Lesser General Public License for more details.
     *
     * You should have received a copy of the GNU Lesser General Public
    
     * License along with FFmpeg; if not, write to the Free Software
    
     * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
     */
    
    /**
     * @file
     * simple media player based on the FFmpeg libraries
     */
    
    
    #include "config.h"
    
    #include <inttypes.h>
    
    #include <math.h>
    #include <limits.h>
    
    #include <signal.h>
    
    #include "libavutil/avstring.h"
    
    #include "libavutil/colorspace.h"
    
    #include "libavutil/mathematics.h"
    
    #include "libavutil/imgutils.h"
    
    #include "libavutil/dict.h"
    
    #include "libavutil/parseutils.h"
    #include "libavutil/samplefmt.h"
    
    #include "libavutil/time.h"
    
    #include "libavformat/avformat.h"
    #include "libavdevice/avdevice.h"
    #include "libswscale/swscale.h"
    
    #include "libavutil/opt.h"
    
    #include "libavcodec/avfft.h"
    
    #include "libswresample/swresample.h"
    
    #if CONFIG_AVFILTER
    
    # include "libavfilter/avcodec.h"
    
    # include "libavfilter/avfilter.h"
    
    # include "libavfilter/buffersrc.h"
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    #include <SDL.h>
    #include <SDL_thread.h>
    
    
    #include <assert.h>
    
    
    const char program_name[] = "ffplay";
    
    const int program_birth_year = 2003;
    
    #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
    #define MIN_FRAMES 5
    
    /* Minimum SDL audio buffer size, in samples. */
    
    #define SDL_AUDIO_MIN_BUFFER_SIZE 512
    /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
    
    #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
    
    /* no AV sync correction is done if below the minimum AV sync threshold */
    
    #define AV_SYNC_THRESHOLD_MIN 0.04
    
    /* AV sync correction is done if above the maximum AV sync threshold */
    #define AV_SYNC_THRESHOLD_MAX 0.1
    /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
    #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
    
    /* no AV correction is done if too big error */
    #define AV_NOSYNC_THRESHOLD 10.0
    
    /* maximum audio speed change to get correct sync */
    #define SAMPLE_CORRECTION_PERCENT_MAX 10
    
    
    /* external clock speed adjustment constants for realtime sources based on buffer fullness */
    #define EXTERNAL_CLOCK_SPEED_MIN  0.900
    #define EXTERNAL_CLOCK_SPEED_MAX  1.010
    #define EXTERNAL_CLOCK_SPEED_STEP 0.001
    
    
    /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
    #define AUDIO_DIFF_AVG_NB   20
    
    
    /* polls for possible required screen refresh at least this often, should be less than 1/fps */
    #define REFRESH_RATE 0.01
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
    
    /* TODO: We assume that a decoded and resampled frame fits into this buffer */
    #define SAMPLE_ARRAY_SIZE (8 * 65536)
    
    #define CURSOR_HIDE_DELAY 1000000
    
    
    static int64_t sws_flags = SWS_BICUBIC;
    
    typedef struct MyAVPacketList {
        AVPacket pkt;
        struct MyAVPacketList *next;
        int serial;
    } MyAVPacketList;
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    typedef struct PacketQueue {
    
        MyAVPacketList *first_pkt, *last_pkt;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        int nb_packets;
        int size;
        int abort_request;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        SDL_mutex *mutex;
        SDL_cond *cond;
    } PacketQueue;
    
    
    #define VIDEO_PICTURE_QUEUE_SIZE 3
    
    #define SUBPICTURE_QUEUE_SIZE 16
    
    #define FRAME_QUEUE_SIZE FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE)
    
    typedef struct AudioParams {
        int freq;
        int channels;
    
        enum AVSampleFormat fmt;
    
        int frame_size;
        int bytes_per_sec;
    
    typedef struct Clock {
        double pts;           /* clock base */
        double pts_drift;     /* clock base minus time at which we updated the clock */
        double last_updated;
        double speed;
        int serial;           /* clock is based on a packet with this serial */
        int paused;
        int *queue_serial;    /* pointer to the current packet queue serial, used for obsolete clock detection */
    } Clock;
    
    
    /* Common struct for handling all types of decoded data and allocated render buffers. */
    typedef struct Frame {
        AVFrame *frame;
        AVSubtitle sub;
        int serial;
        double pts;           /* presentation timestamp for the frame */
        double duration;      /* estimated duration of the frame */
        int64_t pos;          /* byte position of the frame in the input file */
        SDL_Overlay *bmp;
        int allocated;
        int reallocate;
        int width;
        int height;
        AVRational sar;
    } Frame;
    
    typedef struct FrameQueue {
        Frame queue[FRAME_QUEUE_SIZE];
        int rindex;
        int windex;
        int size;
        int max_size;
        int keep_last;
        int rindex_shown;
        SDL_mutex *mutex;
        SDL_cond *cond;
        PacketQueue *pktq;
    } FrameQueue;
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    enum {
        AV_SYNC_AUDIO_MASTER, /* default choice */
        AV_SYNC_VIDEO_MASTER,
    
        AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
    
    typedef struct Decoder {
        AVPacket pkt;
        AVPacket pkt_temp;
        PacketQueue *queue;
        AVCodecContext *avctx;
        int pkt_serial;
        int finished;
        int flushed;
        int packet_pending;
        SDL_cond *empty_queue_cond;
    
        int64_t start_pts;
        AVRational start_pts_tb;
        int64_t next_pts;
        AVRational next_pts_tb;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    typedef struct VideoState {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        SDL_Thread *video_tid;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        int no_background;
        int abort_request;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        int paused;
    
        int queue_attachments_req;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        int seek_req;
    
        int seek_flags;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        int64_t seek_pos;
    
        int64_t seek_rel;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        AVFormatContext *ic;
    
        Clock audclk;
        Clock vidclk;
        Clock extclk;
    
    
        FrameQueue pictq;
        FrameQueue subpq;
    
    
        Decoder auddec;
        Decoder viddec;
        Decoder subdec;
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        int audio_stream;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        int av_sync_type;
    
        int audio_clock_serial;
    
        double audio_diff_cum; /* used for AV difference average computation */
        double audio_diff_avg_coef;
        double audio_diff_threshold;
        int audio_diff_avg_count;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        AVStream *audio_st;
        PacketQueue audioq;
        int audio_hw_buf_size;
    
        uint8_t silence_buf[SDL_AUDIO_MIN_BUFFER_SIZE];
    
        uint8_t *audio_buf;
    
        uint8_t *audio_buf1;
    
        unsigned int audio_buf_size; /* in bytes */
    
        unsigned int audio_buf1_size;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        int audio_buf_index; /* in bytes */
    
        int audio_write_buf_size;
    
    Marton Balint's avatar
    Marton Balint committed
        int audio_last_serial;
    
        struct AudioParams audio_src;
    
    Marton Balint's avatar
    Marton Balint committed
    #if CONFIG_AVFILTER
        struct AudioParams audio_filter_src;
    #endif
    
        struct AudioParams audio_tgt;
    
        struct SwrContext *swr_ctx;
    
        int frame_drops_early;
        int frame_drops_late;
    
        AVFrame *frame;
    
        enum ShowMode {
    
            SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        int16_t sample_array[SAMPLE_ARRAY_SIZE];
        int sample_array_index;
    
        int last_i_start;
    
        RDFTContext *rdft;
    
        int rdft_bits;
    
    Måns Rullgård's avatar
    Måns Rullgård committed
        FFTSample *rdft_data;
    
        SDL_Thread *subtitle_tid;
        int subtitle_stream;
        AVStream *subtitle_st;
        PacketQueue subtitleq;
    
        double frame_last_returned_time;
        double frame_last_filter_delay;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        int video_stream;
        AVStream *video_st;
        PacketQueue videoq;
    
        double max_frame_duration;      // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
    
    #if !CONFIG_AVFILTER
    
        struct SwsContext *img_convert_ctx;
    
        SDL_Rect last_display_rect;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        char filename[1024];
        int width, height, xleft, ytop;
    
    #if CONFIG_AVFILTER
    
        AVFilterContext *in_video_filter;   // the first filter in the video chain
        AVFilterContext *out_video_filter;  // the last filter in the video chain
    
    Marton Balint's avatar
    Marton Balint committed
        AVFilterContext *in_audio_filter;   // the first filter in the audio chain
        AVFilterContext *out_audio_filter;  // the last filter in the audio chain
        AVFilterGraph *agraph;              // audio filter graph
    
        int last_video_stream, last_audio_stream, last_subtitle_stream;
    
    
        SDL_cond *continue_read_thread;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    } VideoState;
    
    /* options specified by the user */
    static AVInputFormat *file_iformat;
    static const char *input_filename;
    
    static const char *window_title;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    static int fs_screen_width;
    static int fs_screen_height;
    
    static int default_width  = 640;
    static int default_height = 480;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
    static int screen_width  = 0;
    
    static int screen_height = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    static int audio_disable;
    static int video_disable;
    
    static int subtitle_disable;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
    static int wanted_stream[AVMEDIA_TYPE_NB] = {
        [AVMEDIA_TYPE_AUDIO]    = -1,
        [AVMEDIA_TYPE_VIDEO]    = -1,
        [AVMEDIA_TYPE_SUBTITLE] = -1,
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
    static int seek_by_bytes = -1;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    static int display_disable;
    
    static int av_sync_type = AV_SYNC_AUDIO_MASTER;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    static int64_t start_time = AV_NOPTS_VALUE;
    
    Robert Krüger's avatar
    Robert Krüger committed
    static int64_t duration = AV_NOPTS_VALUE;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    static int workaround_bugs = 1;
    
    static int fast = 0;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    static int lowres = 0;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
    static int decoder_reorder_pts = -1;
    
    static int autoexit;
    
    static int exit_on_keydown;
    static int exit_on_mousedown;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
    static int loop = 1;
    
    static int framedrop = -1;
    
    static int infinite_buffer = -1;
    
    static enum ShowMode show_mode = SHOW_MODE_NONE;
    
    static const char *audio_codec_name;
    static const char *subtitle_codec_name;
    static const char *video_codec_name;
    
    static int64_t cursor_last_shown;
    static int cursor_hidden = 0;
    
    #if CONFIG_AVFILTER
    
    static const char **vfilters_list = NULL;
    static int nb_vfilters = 0;
    
    Marton Balint's avatar
    Marton Balint committed
    static char *afilters = NULL;
    
    static int autorotate = 1;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    /* current context */
    static int is_full_screen;
    
    static int64_t audio_callback_time;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    #define FF_ALLOC_EVENT   (SDL_USEREVENT)
    
    #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
    
    #if CONFIG_AVFILTER
    static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
    {
        GROW_ARRAY(vfilters_list, nb_vfilters);
        vfilters_list[nb_vfilters - 1] = arg;
        return 0;
    }
    #endif
    
    
    Marton Balint's avatar
    Marton Balint committed
    static inline
    int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
                       enum AVSampleFormat fmt2, int64_t channel_count2)
    {
        /* If channel count == 1, planar and non-planar formats are the same */
        if (channel_count1 == 1 && channel_count2 == 1)
            return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
        else
            return channel_count1 != channel_count2 || fmt1 != fmt2;
    }
    
    static inline
    int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
    {
        if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
            return channel_layout;
        else
            return 0;
    }
    
    
    static void free_picture(Frame *vp);
    
    
    static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
    
        if (q->abort_request)
           return -1;
    
        pkt1 = av_malloc(sizeof(MyAVPacketList));
    
        if (!pkt1)
            return -1;
        pkt1->pkt = *pkt;
        pkt1->next = NULL;
    
        if (pkt == &flush_pkt)
            q->serial++;
        pkt1->serial = q->serial;
    
    
        if (!q->last_pkt)
            q->first_pkt = pkt1;
        else
            q->last_pkt->next = pkt1;
        q->last_pkt = pkt1;
        q->nb_packets++;
        q->size += pkt1->pkt.size + sizeof(*pkt1);
        /* XXX: should duplicate packet data in DV case */
        SDL_CondSignal(q->cond);
    
        return 0;
    }
    
    static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
    {
        int ret;
    
        /* duplicate the packet */
        if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
            return -1;
    
        SDL_LockMutex(q->mutex);
        ret = packet_queue_put_private(q, pkt);
    
        SDL_UnlockMutex(q->mutex);
    
    
        if (pkt != &flush_pkt && ret < 0)
            av_free_packet(pkt);
    
        return ret;
    
    static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
    {
        AVPacket pkt1, *pkt = &pkt1;
        av_init_packet(pkt);
        pkt->data = NULL;
        pkt->size = 0;
        pkt->stream_index = stream_index;
        return packet_queue_put(q, pkt);
    }
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    /* packet queue handling */
    static void packet_queue_init(PacketQueue *q)
    {
        memset(q, 0, sizeof(PacketQueue));
        q->mutex = SDL_CreateMutex();
        q->cond = SDL_CreateCond();
    
        q->abort_request = 1;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    static void packet_queue_flush(PacketQueue *q)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
        MyAVPacketList *pkt, *pkt1;
    
        SDL_LockMutex(q->mutex);
    
        for (pkt = q->first_pkt; pkt; pkt = pkt1) {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            pkt1 = pkt->next;
            av_free_packet(&pkt->pkt);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        q->last_pkt = NULL;
        q->first_pkt = NULL;
        q->nb_packets = 0;
        q->size = 0;
    
        SDL_UnlockMutex(q->mutex);
    
    static void packet_queue_destroy(PacketQueue *q)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
        packet_queue_flush(q);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        SDL_DestroyMutex(q->mutex);
        SDL_DestroyCond(q->cond);
    }
    
    static void packet_queue_abort(PacketQueue *q)
    {
        SDL_LockMutex(q->mutex);
    
        q->abort_request = 1;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        SDL_CondSignal(q->cond);
    
        SDL_UnlockMutex(q->mutex);
    }
    
    
    static void packet_queue_start(PacketQueue *q)
    {
        SDL_LockMutex(q->mutex);
        q->abort_request = 0;
        packet_queue_put_private(q, &flush_pkt);
        SDL_UnlockMutex(q->mutex);
    }
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
    
    static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        int ret;
    
        SDL_LockMutex(q->mutex);
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (;;) {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            if (q->abort_request) {
                ret = -1;
                break;
            }
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            pkt1 = q->first_pkt;
            if (pkt1) {
                q->first_pkt = pkt1->next;
                if (!q->first_pkt)
                    q->last_pkt = NULL;
                q->nb_packets--;
    
                q->size -= pkt1->pkt.size + sizeof(*pkt1);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                *pkt = pkt1->pkt;
    
                if (serial)
                    *serial = pkt1->serial;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                av_free(pkt1);
                ret = 1;
                break;
            } else if (!block) {
                ret = 0;
                break;
            } else {
                SDL_CondWait(q->cond, q->mutex);
            }
        }
        SDL_UnlockMutex(q->mutex);
        return ret;
    }
    
    
    static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
        memset(d, 0, sizeof(Decoder));
        d->avctx = avctx;
        d->queue = queue;
        d->empty_queue_cond = empty_queue_cond;
    
        d->start_pts = AV_NOPTS_VALUE;
    
    }
    
    static int decoder_decode_frame(Decoder *d, void *fframe) {
        int got_frame = 0;
    
        AVFrame *frame = fframe;
    
    
        d->flushed = 0;
    
        do {
            int ret = -1;
    
            if (d->queue->abort_request)
                return -1;
    
            if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
                AVPacket pkt;
                do {
                    if (d->queue->nb_packets == 0)
                        SDL_CondSignal(d->empty_queue_cond);
                    if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
                        return -1;
                    if (pkt.data == flush_pkt.data) {
                        avcodec_flush_buffers(d->avctx);
                        d->finished = 0;
                        d->flushed = 1;
    
                        d->next_pts = d->start_pts;
                        d->next_pts_tb = d->start_pts_tb;
    
                    }
                } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
                av_free_packet(&d->pkt);
                d->pkt_temp = d->pkt = pkt;
                d->packet_pending = 1;
            }
    
            switch (d->avctx->codec_type) {
                case AVMEDIA_TYPE_VIDEO:
    
                    ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
                    if (got_frame) {
                        if (decoder_reorder_pts == -1) {
                            frame->pts = av_frame_get_best_effort_timestamp(frame);
                        } else if (decoder_reorder_pts) {
                            frame->pts = frame->pkt_pts;
                        } else {
                            frame->pts = frame->pkt_dts;
                        }
                    }
    
                    break;
                case AVMEDIA_TYPE_AUDIO:
    
                    ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
                    if (got_frame) {
                        AVRational tb = (AVRational){1, frame->sample_rate};
                        if (frame->pts != AV_NOPTS_VALUE)
                            frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
                        else if (frame->pkt_pts != AV_NOPTS_VALUE)
    
                            frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
    
                        else if (d->next_pts != AV_NOPTS_VALUE)
                            frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
                        if (frame->pts != AV_NOPTS_VALUE) {
                            d->next_pts = frame->pts + frame->nb_samples;
                            d->next_pts_tb = tb;
                        }
                    }
    
                    break;
                case AVMEDIA_TYPE_SUBTITLE:
                    ret = avcodec_decode_subtitle2(d->avctx, fframe, &got_frame, &d->pkt_temp);
                    break;
            }
    
            if (ret < 0) {
                d->packet_pending = 0;
            } else {
                d->pkt_temp.dts =
                d->pkt_temp.pts = AV_NOPTS_VALUE;
                if (d->pkt_temp.data) {
                    if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)
                        ret = d->pkt_temp.size;
                    d->pkt_temp.data += ret;
                    d->pkt_temp.size -= ret;
                    if (d->pkt_temp.size <= 0)
                        d->packet_pending = 0;
                } else {
                    if (!got_frame) {
                        d->packet_pending = 0;
                        d->finished = d->pkt_serial;
                    }
                }
            }
        } while (!got_frame && !d->finished);
    
        return got_frame;
    }
    
    static void decoder_destroy(Decoder *d) {
        av_free_packet(&d->pkt);
    }
    
    
    static void frame_queue_unref_item(Frame *vp)
    {
        av_frame_unref(vp->frame);
        avsubtitle_free(&vp->sub);
    }
    
    static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
    {
        int i;
        memset(f, 0, sizeof(FrameQueue));
        if (!(f->mutex = SDL_CreateMutex()))
            return AVERROR(ENOMEM);
        if (!(f->cond = SDL_CreateCond()))
            return AVERROR(ENOMEM);
        f->pktq = pktq;
        f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
        f->keep_last = !!keep_last;
        for (i = 0; i < f->max_size; i++)
            if (!(f->queue[i].frame = av_frame_alloc()))
                return AVERROR(ENOMEM);
        return 0;
    }
    
    static void frame_queue_destory(FrameQueue *f)
    {
        int i;
        for (i = 0; i < f->max_size; i++) {
            Frame *vp = &f->queue[i];
            frame_queue_unref_item(vp);
            av_frame_free(&vp->frame);
            free_picture(vp);
        }
        SDL_DestroyMutex(f->mutex);
        SDL_DestroyCond(f->cond);
    }
    
    static void frame_queue_signal(FrameQueue *f)
    {
        SDL_LockMutex(f->mutex);
        SDL_CondSignal(f->cond);
        SDL_UnlockMutex(f->mutex);
    }
    
    static Frame *frame_queue_peek(FrameQueue *f)
    {
        return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
    }
    
    static Frame *frame_queue_peek_next(FrameQueue *f)
    {
        return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
    }
    
    static Frame *frame_queue_peek_last(FrameQueue *f)
    {
        return &f->queue[f->rindex];
    }
    
    static Frame *frame_queue_peek_writable(FrameQueue *f)
    {
        /* wait until we have space to put a new frame */
        SDL_LockMutex(f->mutex);
        while (f->size >= f->max_size &&
               !f->pktq->abort_request) {
            SDL_CondWait(f->cond, f->mutex);
        }
        SDL_UnlockMutex(f->mutex);
    
        if (f->pktq->abort_request)
            return NULL;
    
        return &f->queue[f->windex];
    }
    
    static void frame_queue_push(FrameQueue *f)
    {
        if (++f->windex == f->max_size)
            f->windex = 0;
        SDL_LockMutex(f->mutex);
        f->size++;
        SDL_UnlockMutex(f->mutex);
    }
    
    static void frame_queue_next(FrameQueue *f)
    {
        if (f->keep_last && !f->rindex_shown) {
            f->rindex_shown = 1;
            return;
        }
        frame_queue_unref_item(&f->queue[f->rindex]);
        if (++f->rindex == f->max_size)
            f->rindex = 0;
        SDL_LockMutex(f->mutex);
        f->size--;
        SDL_CondSignal(f->cond);
        SDL_UnlockMutex(f->mutex);
    }
    
    /* jump back to the previous frame if available by resetting rindex_shown */
    static int frame_queue_prev(FrameQueue *f)
    {
        int ret = f->rindex_shown;
        f->rindex_shown = 0;
        return ret;
    }
    
    /* return the number of undisplayed frames in the queue */
    static int frame_queue_nb_remaining(FrameQueue *f)
    {
        return f->size - f->rindex_shown;
    }
    
    
    /* return last shown position */
    static int64_t frame_queue_last_pos(FrameQueue *f)
    {
        Frame *fp = &f->queue[f->rindex];
        if (f->rindex_shown && fp->serial == f->pktq->serial)
            return fp->pos;
        else
            return -1;
    }
    
    
    static inline void fill_rectangle(SDL_Surface *screen,
    
                                      int x, int y, int w, int h, int color, int update)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
        SDL_Rect rect;
        rect.x = x;
        rect.y = y;
        rect.w = w;
        rect.h = h;
        SDL_FillRect(screen, &rect, color);
    
        if (update && w > 0 && h > 0)
            SDL_UpdateRect(screen, x, y, w, h);
    
    /* draw only the border of a rectangle */
    static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
    {
        int w1, w2, h1, h2;
    
        /* fill the background */
        w1 = x;
        if (w1 < 0)
            w1 = 0;
        w2 = width - (x + w);
        if (w2 < 0)
            w2 = 0;
        h1 = y;
        if (h1 < 0)
            h1 = 0;
        h2 = height - (y + h);
        if (h2 < 0)
            h2 = 0;
        fill_rectangle(screen,
                       xleft, ytop,
                       w1, height,
                       color, update);
        fill_rectangle(screen,
                       xleft + width - w2, ytop,
                       w2, height,
                       color, update);
        fill_rectangle(screen,
                       xleft + w1, ytop,
                       width - w1 - w2, h1,
                       color, update);
        fill_rectangle(screen,
                       xleft + w1, ytop + height - h2,
                       width - w1 - w2, h2,
                       color, update);
    }
    
    
    #define ALPHA_BLEND(a, oldp, newp, s)\
    ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
    
    #define RGBA_IN(r, g, b, a, s)\
    {\
        unsigned int v = ((const uint32_t *)(s))[0];\
        a = (v >> 24) & 0xff;\
        r = (v >> 16) & 0xff;\
        g = (v >> 8) & 0xff;\
        b = v & 0xff;\
    }
    
    #define YUVA_IN(y, u, v, a, s, pal)\
    {\
    
        unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
    
        a = (val >> 24) & 0xff;\
        y = (val >> 16) & 0xff;\
        u = (val >> 8) & 0xff;\
        v = val & 0xff;\
    }
    
    #define YUVA_OUT(d, y, u, v, a)\
    {\
        ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
    }
    
    
    #define BPP 1
    
    
    static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
    
    {
        int wrap, wrap3, width2, skip2;
        int y, u, v, a, u1, v1, a1, w, h;
        uint8_t *lum, *cb, *cr;
        const uint8_t *p;
        const uint32_t *pal;
    
        dstw = av_clip(rect->w, 0, imgw);
        dsth = av_clip(rect->h, 0, imgh);
        dstx = av_clip(rect->x, 0, imgw - dstw);
        dsty = av_clip(rect->y, 0, imgh - dsth);
    
        lum = dst->data[0] + dsty * dst->linesize[0];
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
        cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
    
        width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
    
        wrap3 = rect->pict.linesize[0];
        p = rect->pict.data[0];
        pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
    
                YUVA_IN(y, u, v, a, p, pal);
                lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
                cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
                cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
                cb++;
                cr++;
                lum++;
                p += BPP;
            }
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
    
                YUVA_IN(y, u, v, a, p, pal);
                u1 = u;
                v1 = v;
                a1 = a;
                lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
    
                YUVA_IN(y, u, v, a, p + BPP, pal);
                u1 += u;
                v1 += v;
                a1 += a;
                lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
                cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
                cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
                cb++;
                cr++;
                p += 2 * BPP;
                lum += 2;
            }
            if (w) {
                YUVA_IN(y, u, v, a, p, pal);
                lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
                cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
                cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
    
            p += wrap3 - dstw * BPP;
            lum += wrap - dstw - dstx;
    
            cb += dst->linesize[1] - width2 - skip2;
            cr += dst->linesize[2] - width2 - skip2;
        }
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
    
                YUVA_IN(y, u, v, a, p, pal);
                u1 = u;
                v1 = v;
                a1 = a;
                lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
                p += wrap3;
                lum += wrap;
                YUVA_IN(y, u, v, a, p, pal);
                u1 += u;
                v1 += v;
                a1 += a;
                lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
                cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
                cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
                cb++;
                cr++;
                p += -wrap3 + BPP;
                lum += -wrap + 1;
            }
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
    
                YUVA_IN(y, u, v, a, p, pal);
                u1 = u;
                v1 = v;
                a1 = a;
                lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
    
    
                YUVA_IN(y, u, v, a, p + BPP, pal);
    
                u1 += u;
                v1 += v;
                a1 += a;
                lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
                p += wrap3;
                lum += wrap;
    
                YUVA_IN(y, u, v, a, p, pal);
                u1 += u;
                v1 += v;
                a1 += a;
                lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
    
    
                YUVA_IN(y, u, v, a, p + BPP, pal);
    
                u1 += u;
                v1 += v;
                a1 += a;
                lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
    
                cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
                cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
    
                cb++;
                cr++;
                p += -wrap3 + 2 * BPP;
                lum += -wrap + 2;
            }
            if (w) {
                YUVA_IN(y, u, v, a, p, pal);
                u1 = u;
                v1 = v;
                a1 = a;
                lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
                p += wrap3;
                lum += wrap;
                YUVA_IN(y, u, v, a, p, pal);
                u1 += u;
                v1 += v;
                a1 += a;
                lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
                cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
                cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
                cb++;
                cr++;
                p += -wrap3 + BPP;
                lum += -wrap + 1;
            }
    
            p += wrap3 + (wrap3 - dstw * BPP);
            lum += wrap + (wrap - dstw - dstx);
    
            cb += dst->linesize[1] - width2 - skip2;
            cr += dst->linesize[2] - width2 - skip2;
        }
        /* handle odd height */
        if (h) {