Newer
Older
* FFplay : Simple Media Player based on the Libav libraries
* This file is part of Libav.
Diego Biurrun
committed
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
Diego Biurrun
committed
* version 2.1 of the License, or (at your option) any later version.
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Diego Biurrun
committed
#include <math.h>
#include <limits.h>
#include "libavutil/avstring.h"
#include "libavutil/colorspace.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#if CONFIG_AVFILTER
# include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h"
#endif
#include "cmdutils.h"
#include <SDL.h>
#include <SDL_thread.h>
#ifdef __MINGW32__
#undef main /* We don't want SDL to override our main() */
#endif
Michael Niedermayer
committed
const char program_name[] = "FFplay";
const int program_birth_year = 2003;
//#define DEBUG
Fabrice Bellard
committed
//#define DEBUG_SYNC
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
#define MIN_FRAMES 5
Fabrice Bellard
committed
/* SDL audio buffer size, in samples. Should be small to have precise
A/V sync as SDL does not have hardware buffer fullness info. */
#define SDL_AUDIO_BUFFER_SIZE 1024
/* no AV sync correction is done if below the AV sync threshold */
Fabrice Bellard
committed
/* no AV correction is done if too big error */
#define AV_NOSYNC_THRESHOLD 10.0
Fabrice Bellard
committed
/* maximum audio speed change to get correct sync */
#define SAMPLE_CORRECTION_PERCENT_MAX 10
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
#define AUDIO_DIFF_AVG_NB 20
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
#define SAMPLE_ARRAY_SIZE (2*65536)
static int sws_flags = SWS_BICUBIC;
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
int abort_request;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
#define VIDEO_PICTURE_QUEUE_SIZE 2
#define SUBPICTURE_QUEUE_SIZE 4
double pts; ///<presentation time stamp for this picture
double target_clock; ///<av_gettime() time at which this should be displayed ideally
Michael Niedermayer
committed
int64_t pos; ///<byte position in file
SDL_Overlay *bmp;
int width, height; /* source height & width */
int allocated;
enum PixelFormat pix_fmt;
#if CONFIG_AVFILTER
AVFilterBufferRef *picref;
typedef struct SubPicture {
double pts; /* presentation time stamp for this picture */
AVSubtitle sub;
} SubPicture;
enum {
AV_SYNC_AUDIO_MASTER, /* default choice */
AV_SYNC_VIDEO_MASTER,
Fabrice Bellard
committed
AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
};
typedef struct VideoState {
SDL_Thread *parse_tid;
SDL_Thread *video_tid;
Fabrice Bellard
committed
AVInputFormat *iformat;
int no_background;
int abort_request;
int paused;
Fabrice Bellard
committed
int last_paused;
int read_pause_return;
AVFormatContext *ic;
int dtg_active_format;
int audio_stream;
Fabrice Bellard
committed
double external_clock; /* external clock base */
int64_t external_clock_time;
Fabrice Bellard
committed
double audio_clock;
double audio_diff_cum; /* used for AV difference average computation */
double audio_diff_avg_coef;
double audio_diff_threshold;
int audio_diff_avg_count;
AVStream *audio_st;
PacketQueue audioq;
int audio_hw_buf_size;
/* samples output by the codec. we reserve more space for avsync
compensation */
DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
unsigned int audio_buf_size; /* in bytes */
enum AVSampleFormat audio_src_fmt;
int show_audio; /* if true, display audio samples */
int16_t sample_array[SAMPLE_ARRAY_SIZE];
int sample_array_index;
SDL_Thread *subtitle_tid;
int subtitle_stream;
int subtitle_stream_changed;
AVStream *subtitle_st;
PacketQueue subtitleq;
SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
int subpq_size, subpq_rindex, subpq_windex;
SDL_mutex *subpq_mutex;
SDL_cond *subpq_cond;
Fabrice Bellard
committed
double frame_timer;
double frame_last_pts;
double frame_last_delay;
double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
int video_stream;
AVStream *video_st;
PacketQueue videoq;
double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
Michael Niedermayer
committed
double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
Michael Niedermayer
committed
int64_t video_current_pos; ///<current displayed file pos
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
int pictq_size, pictq_rindex, pictq_windex;
SDL_mutex *pictq_mutex;
SDL_cond *pictq_cond;
struct SwsContext *img_convert_ctx;
// QETimer *video_timer;
char filename[1024];
int width, height, xleft, ytop;
PtsCorrectionContext pts_ctx;
#if CONFIG_AVFILTER
AVFilterContext *out_video_filter; ///<the last filter in the video chain
#endif
float skip_frames;
float skip_frames_index;
int refresh;
Diego Pettenò
committed
static void show_help(void);
Fabrice Bellard
committed
static int audio_write_get_buf_size(VideoState *is);
/* options specified by the user */
static AVInputFormat *file_iformat;
static const char *input_filename;
static const char *window_title;
static int fs_screen_width;
static int fs_screen_height;
static int screen_width = 0;
static int screen_height = 0;
Limin Wang
committed
static int frame_width = 0;
static int frame_height = 0;
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
static int audio_disable;
static int video_disable;
static int wanted_stream[AVMEDIA_TYPE_NB]={
[AVMEDIA_TYPE_AUDIO]=-1,
[AVMEDIA_TYPE_VIDEO]=-1,
[AVMEDIA_TYPE_SUBTITLE]=-1,
Michael Niedermayer
committed
static int seek_by_bytes=-1;
static int show_status = 1;
Fabrice Bellard
committed
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
Wolfgang Hesseler
committed
static int debug = 0;
Wolfgang Hesseler
committed
static int debug_mv = 0;
Wolfgang Hesseler
committed
static int step = 0;
static int genpts = 0;
static int lowres = 0;
static int idct = FF_IDCT_AUTO;
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
static int error_recognition = FF_ER_CAREFUL;
Michael Niedermayer
committed
static int error_concealment = 3;
static int decoder_reorder_pts= -1;
static int exit_on_keydown;
static int exit_on_mousedown;
#if CONFIG_AVFILTER
static char *vfilters = NULL;
#endif
/* current context */
static int is_full_screen;
static VideoState *cur_stream;
static int64_t audio_callback_time;
static AVPacket flush_pkt;
#define FF_ALLOC_EVENT (SDL_USEREVENT)
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
Fabrice Bellard
committed
#define FF_QUIT_EVENT (SDL_USEREVENT + 2)
static SDL_Surface *screen;
Michael Niedermayer
committed
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
/* packet queue handling */
static void packet_queue_init(PacketQueue *q)
{
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
Michael Niedermayer
committed
packet_queue_put(q, &flush_pkt);
SDL_LockMutex(q->mutex);
for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
pkt1 = pkt->next;
av_free_packet(&pkt->pkt);
Michael Niedermayer
committed
av_freep(&pkt);
q->last_pkt = NULL;
q->first_pkt = NULL;
q->nb_packets = 0;
q->size = 0;
SDL_UnlockMutex(q->mutex);
}
static void packet_queue_end(PacketQueue *q)
{
packet_queue_flush(q);
SDL_DestroyMutex(q->mutex);
SDL_DestroyCond(q->cond);
}
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
{
AVPacketList *pkt1;
if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
pkt1 = av_malloc(sizeof(AVPacketList));
if (!pkt1)
return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt)
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1;
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size + sizeof(*pkt1);
/* XXX: should duplicate packet data in DV case */
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
static void packet_queue_abort(PacketQueue *q)
{
SDL_LockMutex(q->mutex);
q->abort_request = 1;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
}
/* return < 0 if aborted, 0 if no packet and > 0 if packet. */
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for(;;) {
if (q->abort_request) {
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size + sizeof(*pkt1);
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
} else if (!block) {
ret = 0;
break;
} else {
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
static inline void fill_rectangle(SDL_Surface *screen,
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
int x, int y, int w, int h, int color)
{
SDL_Rect rect;
rect.x = x;
rect.y = y;
rect.w = w;
rect.h = h;
SDL_FillRect(screen, &rect, color);
}
#if 0
/* draw only the border of a rectangle */
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
{
int w1, w2, h1, h2;
/* fill the background */
w1 = x;
if (w1 < 0)
w1 = 0;
w2 = s->width - (x + w);
if (w2 < 0)
w2 = 0;
h1 = y;
if (h1 < 0)
h1 = 0;
h2 = s->height - (y + h);
if (h2 < 0)
h2 = 0;
fill_rectangle(screen,
s->xleft, s->ytop,
w1, s->height,
fill_rectangle(screen,
s->xleft + s->width - w2, s->ytop,
w2, s->height,
fill_rectangle(screen,
s->xleft + w1, s->ytop,
s->width - w1 - w2, h1,
s->xleft + w1, s->ytop + s->height - h2,
s->width - w1 - w2, h2,
color);
}
#endif
#define ALPHA_BLEND(a, oldp, newp, s)\
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
#define RGBA_IN(r, g, b, a, s)\
{\
unsigned int v = ((const uint32_t *)(s))[0];\
a = (v >> 24) & 0xff;\
r = (v >> 16) & 0xff;\
g = (v >> 8) & 0xff;\
b = v & 0xff;\
}
#define YUVA_IN(y, u, v, a, s, pal)\
{\
Reimar Döffinger
committed
unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
a = (val >> 24) & 0xff;\
y = (val >> 16) & 0xff;\
u = (val >> 8) & 0xff;\
v = val & 0xff;\
}
#define YUVA_OUT(d, y, u, v, a)\
{\
((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
}
#define BPP 1
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
{
int wrap, wrap3, width2, skip2;
int y, u, v, a, u1, v1, a1, w, h;
uint8_t *lum, *cb, *cr;
const uint8_t *p;
const uint32_t *pal;
int dstx, dsty, dstw, dsth;
dstw = av_clip(rect->w, 0, imgw);
dsth = av_clip(rect->h, 0, imgh);
dstx = av_clip(rect->x, 0, imgw - dstw);
dsty = av_clip(rect->y, 0, imgh - dsth);
lum = dst->data[0] + dsty * dst->linesize[0];
cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
skip2 = dstx >> 1;
wrap = dst->linesize[0];
wrap3 = rect->pict.linesize[0];
p = rect->pict.data[0];
pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
if (dsty & 1) {
lum += dstx;
cb += skip2;
cr += skip2;
if (dstx & 1) {
YUVA_IN(y, u, v, a, p, pal);
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
cb++;
cr++;
lum++;
p += BPP;
}
for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
YUVA_IN(y, u, v, a, p, pal);
u1 = u;
v1 = v;
a1 = a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
YUVA_IN(y, u, v, a, p + BPP, pal);
u1 += u;
v1 += v;
a1 += a;
lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
cb++;
cr++;
p += 2 * BPP;
lum += 2;
}
if (w) {
YUVA_IN(y, u, v, a, p, pal);
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
}
p += wrap3 - dstw * BPP;
lum += wrap - dstw - dstx;
cb += dst->linesize[1] - width2 - skip2;
cr += dst->linesize[2] - width2 - skip2;
}
for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
lum += dstx;
cb += skip2;
cr += skip2;
if (dstx & 1) {
YUVA_IN(y, u, v, a, p, pal);
u1 = u;
v1 = v;
a1 = a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
p += wrap3;
lum += wrap;
YUVA_IN(y, u, v, a, p, pal);
u1 += u;
v1 += v;
a1 += a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
cb++;
cr++;
p += -wrap3 + BPP;
lum += -wrap + 1;
}
for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
YUVA_IN(y, u, v, a, p, pal);
u1 = u;
v1 = v;
a1 = a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
YUVA_IN(y, u, v, a, p + BPP, pal);
u1 += u;
v1 += v;
a1 += a;
lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
p += wrap3;
lum += wrap;
YUVA_IN(y, u, v, a, p, pal);
u1 += u;
v1 += v;
a1 += a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
YUVA_IN(y, u, v, a, p + BPP, pal);
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
u1 += u;
v1 += v;
a1 += a;
lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
cb++;
cr++;
p += -wrap3 + 2 * BPP;
lum += -wrap + 2;
}
if (w) {
YUVA_IN(y, u, v, a, p, pal);
u1 = u;
v1 = v;
a1 = a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
p += wrap3;
lum += wrap;
YUVA_IN(y, u, v, a, p, pal);
u1 += u;
v1 += v;
a1 += a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
cb++;
cr++;
p += -wrap3 + BPP;
lum += -wrap + 1;
}
p += wrap3 + (wrap3 - dstw * BPP);
lum += wrap + (wrap - dstw - dstx);
cb += dst->linesize[1] - width2 - skip2;
cr += dst->linesize[2] - width2 - skip2;
}
/* handle odd height */
if (h) {
lum += dstx;
cb += skip2;
cr += skip2;
if (dstx & 1) {
YUVA_IN(y, u, v, a, p, pal);
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
cb++;
cr++;
lum++;
p += BPP;
}
for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
YUVA_IN(y, u, v, a, p, pal);
u1 = u;
v1 = v;
a1 = a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
YUVA_IN(y, u, v, a, p + BPP, pal);
u1 += u;
v1 += v;
a1 += a;
lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
cb++;
cr++;
p += 2 * BPP;
lum += 2;
}
if (w) {
YUVA_IN(y, u, v, a, p, pal);
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
}
}
}
static void free_subpicture(SubPicture *sp)
{
}
static void video_image_display(VideoState *is)
{
VideoPicture *vp;
SubPicture *sp;
AVPicture pict;
float aspect_ratio;
int width, height, x, y;
SDL_Rect rect;
int i;
vp = &is->pictq[is->pictq_rindex];
if (vp->bmp) {
S.N. Hemanth Meenakshisundaram
committed
if (vp->picref->video->pixel_aspect.num == 0)
S.N. Hemanth Meenakshisundaram
committed
aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
if (is->video_st->sample_aspect_ratio.num)
aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
else if (is->video_st->codec->sample_aspect_ratio.num)
aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
aspect_ratio = 0;
aspect_ratio = 1.0;
aspect_ratio *= (float)vp->width / (float)vp->height;
if (is->subtitle_st)
{
if (is->subpq_size > 0)
{
sp = &is->subpq[is->subpq_rindex];
if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
{
SDL_LockYUVOverlay (vp->bmp);
pict.data[0] = vp->bmp->pixels[0];
pict.data[1] = vp->bmp->pixels[2];
pict.data[2] = vp->bmp->pixels[1];
pict.linesize[0] = vp->bmp->pitches[0];
pict.linesize[1] = vp->bmp->pitches[2];
pict.linesize[2] = vp->bmp->pitches[1];
for (i = 0; i < sp->sub.num_rects; i++)
blend_subrect(&pict, sp->sub.rects[i],
vp->bmp->w, vp->bmp->h);
SDL_UnlockYUVOverlay (vp->bmp);
}
}
}
/* XXX: we suppose the screen has a 1.0 pixel ratio */
height = is->height;
width = ((int)rint(height * aspect_ratio)) & ~1;
height = ((int)rint(width / aspect_ratio)) & ~1;
}
x = (is->width - width) / 2;
y = (is->height - height) / 2;
if (!is->no_background) {
/* fill the background */
// fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
} else {
is->no_background = 0;
}
rect.x = is->xleft + x;
rect.w = width;
rect.h = height;
SDL_DisplayYUVOverlay(vp->bmp, &rect);
} else {
#if 0
fill_rectangle(screen,
is->xleft, is->ytop, is->width, is->height,
QERGB(0x00, 0x00, 0x00));
#endif
}
}
static inline int compute_mod(int a, int b)
{
a = a % b;
return a;
else
return a + b;
}
static void video_audio_display(VideoState *s)
{
int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
int ch, channels, h, h2, bgcolor, fgcolor;
int16_t time_diff;
int rdft_bits, nb_freq;
for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
;
nb_freq= 1<<(rdft_bits-1);
/* compute display index : center on currently output samples */
Michael Niedermayer
committed
channels = s->audio_st->codec->channels;
int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
n = 2 * channels;
delay = audio_write_get_buf_size(s);
delay /= n;
/* to be more precise, we take into account the time spent since
the last buffer computation */
if (audio_callback_time) {
time_diff = av_gettime() - audio_callback_time;
delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
delay += 2*data_used;
if (delay < data_used)
delay = data_used;
i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
h= INT_MIN;
for(i=0; i<1000; i+=channels){
int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
int a= s->sample_array[idx];
int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
int score= a-d;
if(h<score && (b^c)<0){
h= score;
i_start= idx;
}
s->last_i_start = i_start;
} else {
i_start = s->last_i_start;
}
bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
fill_rectangle(screen,
s->xleft, s->ytop, s->width, s->height,
bgcolor);
fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
/* total height for one channel */
h = s->height / nb_display_channels;
/* graph height / 2 */
h2 = (h * 9) / 20;
for(ch = 0;ch < nb_display_channels; ch++) {
i = i_start + ch;
y1 = s->ytop + ch * h + (h / 2); /* position of center line */
for(x = 0; x < s->width; x++) {
y = (s->sample_array[i] * h2) >> 15;
if (y < 0) {
y = -y;
ys = y1 - y;
} else {
ys = y1;
}
fill_rectangle(screen,
s->xleft + x, ys, 1, y,
fgcolor);
i += channels;
if (i >= SAMPLE_ARRAY_SIZE)
i -= SAMPLE_ARRAY_SIZE;
fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
for(ch = 1;ch < nb_display_channels; ch++) {
y = s->ytop + ch * h;
fill_rectangle(screen,
s->xleft, y, s->width, 1,
fgcolor);
}
SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
}else{
nb_display_channels= FFMIN(nb_display_channels, 2);
if(rdft_bits != s->rdft_bits){
s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
for(ch = 0;ch < nb_display_channels; ch++) {
i = i_start + ch;
for(x = 0; x < 2*nb_freq; x++) {
double w= (x-nb_freq)*(1.0/nb_freq);
data[ch][x]= s->sample_array[i]*(1.0-w*w);
i += channels;
if (i >= SAMPLE_ARRAY_SIZE)
i -= SAMPLE_ARRAY_SIZE;
}
}
//least efficient way to do this, we should of course directly access it but its more than fast enough
Michael Niedermayer
committed
for(y=0; y<s->height; y++){
double w= 1/sqrt(nb_freq);
int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
+ data[1][2*y+1]*data[1][2*y+1])) : a;
a= FFMIN(a,255);
b= FFMIN(b,255);
fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
fill_rectangle(screen,
s->xpos, s->height-y, 1, 1,
fgcolor);
}
}
SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
s->xpos++;
if(s->xpos >= s->width)
s->xpos= s->xleft;
}
static int video_open(VideoState *is){
int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
int w,h;
Michael Niedermayer
committed
if(is_full_screen) flags |= SDL_FULLSCREEN;
else flags |= SDL_RESIZABLE;
if (is_full_screen && fs_screen_width) {
w = fs_screen_width;
h = fs_screen_height;
Michael Niedermayer
committed
} else if(!is_full_screen && screen_width){
w = screen_width;
h = screen_height;
#if CONFIG_AVFILTER
}else if (is->out_video_filter && is->out_video_filter->inputs[0]){
w = is->out_video_filter->inputs[0]->w;
h = is->out_video_filter->inputs[0]->h;
#else
Michael Niedermayer
committed
}else if (is->video_st && is->video_st->codec->width){
w = is->video_st->codec->width;
h = is->video_st->codec->height;
Michael Niedermayer
committed
w = 640;
h = 480;
if(screen && is->width == screen->w && screen->w == w
&& is->height== screen->h && screen->h == h)
return 0;
#ifndef __APPLE__
screen = SDL_SetVideoMode(w, h, 0, flags);
#else
/* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
screen = SDL_SetVideoMode(w, h, 24, flags);
#endif
if (!screen) {
fprintf(stderr, "SDL: could not set video mode - exiting\n");
return -1;
}
if (!window_title)
window_title = input_filename;
SDL_WM_SetCaption(window_title, window_title);
is->width = screen->w;
is->height = screen->h;
return 0;
}
/* display the current picture, if any */
static void video_display(VideoState *is)
{
if(!screen)
video_open(cur_stream);
if (is->audio_st && is->show_audio)
video_audio_display(is);
else if (is->video_st)
video_image_display(is);
}
VideoState *is= opaque;
while(!is->abort_request){
SDL_Event event;
event.type = FF_REFRESH_EVENT;
event.user.data1 = opaque;
usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
Fabrice Bellard
committed
/* get the current audio clock value */
static double get_audio_clock(VideoState *is)
{