Newer
Older
* ffplay : Simple Media Player based on the FFmpeg libraries
Diego Biurrun
committed
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
Diego Biurrun
committed
* version 2.1 of the License, or (at your option) any later version.
Diego Biurrun
committed
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
Diego Biurrun
committed
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Diego Biurrun
committed
#include <math.h>
#include <limits.h>
#include "libavutil/avstring.h"
#include "libavutil/colorspace.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/avassert.h"
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
# include "libavfilter/avcodec.h"
# include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h"
Stefano Sabatini
committed
# include "libavfilter/buffersink.h"
#include "cmdutils.h"
const int program_birth_year = 2003;
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
#define MIN_FRAMES 5
Fabrice Bellard
committed
/* SDL audio buffer size, in samples. Should be small to have precise
A/V sync as SDL does not have hardware buffer fullness info. */
#define SDL_AUDIO_BUFFER_SIZE 1024
/* no AV sync correction is done if below the AV sync threshold */
Fabrice Bellard
committed
/* no AV correction is done if too big error */
#define AV_NOSYNC_THRESHOLD 10.0
Fabrice Bellard
committed
/* maximum audio speed change to get correct sync */
#define SAMPLE_CORRECTION_PERCENT_MAX 10
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
#define AUDIO_DIFF_AVG_NB 20
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
#define SAMPLE_ARRAY_SIZE (2*65536)
static int sws_flags = SWS_BICUBIC;
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
int abort_request;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
#define VIDEO_PICTURE_QUEUE_SIZE 2
#define SUBPICTURE_QUEUE_SIZE 4
double pts; ///<presentation time stamp for this picture
double target_clock; ///<av_gettime() time at which this should be displayed ideally
double duration; ///<expected duration of the frame
Michael Niedermayer
committed
int64_t pos; ///<byte position in file
SDL_Overlay *bmp;
int width, height; /* source height & width */
int allocated;
enum PixelFormat pix_fmt;
#if CONFIG_AVFILTER
AVFilterBufferRef *picref;
typedef struct SubPicture {
double pts; /* presentation time stamp for this picture */
AVSubtitle sub;
} SubPicture;
enum {
AV_SYNC_AUDIO_MASTER, /* default choice */
AV_SYNC_VIDEO_MASTER,
Fabrice Bellard
committed
AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
SDL_Thread *read_tid;
Fabrice Bellard
committed
AVInputFormat *iformat;
int no_background;
int abort_request;
int paused;
Fabrice Bellard
committed
int last_paused;
int read_pause_return;
Fabrice Bellard
committed
double external_clock; /* external clock base */
int64_t external_clock_time;
Fabrice Bellard
committed
double audio_clock;
double audio_diff_cum; /* used for AV difference average computation */
double audio_diff_avg_coef;
double audio_diff_threshold;
int audio_diff_avg_count;
AVStream *audio_st;
PacketQueue audioq;
int audio_hw_buf_size;
/* samples output by the codec. we reserve more space for avsync
compensation */
DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
unsigned int audio_buf_size; /* in bytes */
int audio_write_buf_size;
enum AVSampleFormat audio_src_fmt;
double audio_current_pts;
double audio_current_pts_drift;
SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
int16_t sample_array[SAMPLE_ARRAY_SIZE];
int sample_array_index;
SDL_Thread *subtitle_tid;
int subtitle_stream;
int subtitle_stream_changed;
AVStream *subtitle_st;
PacketQueue subtitleq;
SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
int subpq_size, subpq_rindex, subpq_windex;
SDL_mutex *subpq_mutex;
SDL_cond *subpq_cond;
Fabrice Bellard
committed
double frame_timer;
double frame_last_pts;
double frame_last_delay;
double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
int video_stream;
AVStream *video_st;
PacketQueue videoq;
double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
Michael Niedermayer
committed
double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
Michael Niedermayer
committed
int64_t video_current_pos; ///<current displayed file pos
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
int pictq_size, pictq_rindex, pictq_windex;
SDL_mutex *pictq_mutex;
SDL_cond *pictq_cond;
struct SwsContext *img_convert_ctx;
char filename[1024];
int width, height, xleft, ytop;
#if CONFIG_AVFILTER
AVFilterContext *out_video_filter; ///<the last filter in the video chain
#endif
float skip_frames;
float skip_frames_index;
int refresh;
Jeff Downs
committed
static int opt_help(const char *opt, const char *arg);
/* options specified by the user */
static AVInputFormat *file_iformat;
static const char *input_filename;
static const char *window_title;
static int fs_screen_width;
static int fs_screen_height;
static int screen_width = 0;
static int screen_height = 0;
static int audio_disable;
static int video_disable;
static int wanted_stream[AVMEDIA_TYPE_NB]={
[AVMEDIA_TYPE_AUDIO]=-1,
[AVMEDIA_TYPE_VIDEO]=-1,
[AVMEDIA_TYPE_SUBTITLE]=-1,
Michael Niedermayer
committed
static int seek_by_bytes=-1;
static int show_status = 1;
Fabrice Bellard
committed
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
static int genpts = 0;
static int lowres = 0;
static int idct = FF_IDCT_AUTO;
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
static int error_recognition = FF_ER_CAREFUL;
Michael Niedermayer
committed
static int error_concealment = 3;
static int decoder_reorder_pts= -1;
static int exit_on_keydown;
static int exit_on_mousedown;
static int framedrop=-1;
static enum ShowMode show_mode = SHOW_MODE_NONE;
static const char *audio_codec_name;
static const char *subtitle_codec_name;
static const char *video_codec_name;
#if CONFIG_AVFILTER
static char *vfilters = NULL;
#endif
/* current context */
static int is_full_screen;
static int64_t audio_callback_time;
static AVPacket flush_pkt;
#define FF_ALLOC_EVENT (SDL_USEREVENT)
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
Fabrice Bellard
committed
#define FF_QUIT_EVENT (SDL_USEREVENT + 2)
static SDL_Surface *screen;
void exit_program(int ret)
{
exit(ret);
}
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
{
AVPacketList *pkt1;
/* duplicate the packet */
if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
return -1;
pkt1 = av_malloc(sizeof(AVPacketList));
if (!pkt1)
return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt)
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1;
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size + sizeof(*pkt1);
/* XXX: should duplicate packet data in DV case */
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
Michael Niedermayer
committed
/* packet queue handling */
static void packet_queue_init(PacketQueue *q)
{
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
Michael Niedermayer
committed
packet_queue_put(q, &flush_pkt);
SDL_LockMutex(q->mutex);
for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
pkt1 = pkt->next;
av_free_packet(&pkt->pkt);
Michael Niedermayer
committed
av_freep(&pkt);
q->last_pkt = NULL;
q->first_pkt = NULL;
q->nb_packets = 0;
q->size = 0;
SDL_UnlockMutex(q->mutex);
}
static void packet_queue_end(PacketQueue *q)
{
packet_queue_flush(q);
SDL_DestroyMutex(q->mutex);
SDL_DestroyCond(q->cond);
}
static void packet_queue_abort(PacketQueue *q)
{
SDL_LockMutex(q->mutex);
q->abort_request = 1;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
}
/* return < 0 if aborted, 0 if no packet and > 0 if packet. */
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for(;;) {
if (q->abort_request) {
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size + sizeof(*pkt1);
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
} else if (!block) {
ret = 0;
break;
} else {
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
static inline void fill_rectangle(SDL_Surface *screen,
int x, int y, int w, int h, int color)
{
SDL_Rect rect;
rect.x = x;
rect.y = y;
rect.w = w;
rect.h = h;
SDL_FillRect(screen, &rect, color);
}
#define ALPHA_BLEND(a, oldp, newp, s)\
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
#define RGBA_IN(r, g, b, a, s)\
{\
unsigned int v = ((const uint32_t *)(s))[0];\
a = (v >> 24) & 0xff;\
r = (v >> 16) & 0xff;\
g = (v >> 8) & 0xff;\
b = v & 0xff;\
}
#define YUVA_IN(y, u, v, a, s, pal)\
{\
Reimar Döffinger
committed
unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
a = (val >> 24) & 0xff;\
y = (val >> 16) & 0xff;\
u = (val >> 8) & 0xff;\
v = val & 0xff;\
}
#define YUVA_OUT(d, y, u, v, a)\
{\
((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
}
#define BPP 1
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
{
int wrap, wrap3, width2, skip2;
int y, u, v, a, u1, v1, a1, w, h;
uint8_t *lum, *cb, *cr;
const uint8_t *p;
const uint32_t *pal;
int dstx, dsty, dstw, dsth;
dstw = av_clip(rect->w, 0, imgw);
dsth = av_clip(rect->h, 0, imgh);
dstx = av_clip(rect->x, 0, imgw - dstw);
dsty = av_clip(rect->y, 0, imgh - dsth);
lum = dst->data[0] + dsty * dst->linesize[0];
cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
skip2 = dstx >> 1;
wrap = dst->linesize[0];
wrap3 = rect->pict.linesize[0];
p = rect->pict.data[0];
pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
if (dsty & 1) {
lum += dstx;
cb += skip2;
cr += skip2;
if (dstx & 1) {
YUVA_IN(y, u, v, a, p, pal);
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
cb++;
cr++;
lum++;
p += BPP;
}
for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
YUVA_IN(y, u, v, a, p, pal);
u1 = u;
v1 = v;
a1 = a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
YUVA_IN(y, u, v, a, p + BPP, pal);
u1 += u;
v1 += v;
a1 += a;
lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
cb++;
cr++;
p += 2 * BPP;
lum += 2;
}
if (w) {
YUVA_IN(y, u, v, a, p, pal);
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
}
p += wrap3 - dstw * BPP;
lum += wrap - dstw - dstx;
cb += dst->linesize[1] - width2 - skip2;
cr += dst->linesize[2] - width2 - skip2;
}
for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
lum += dstx;
cb += skip2;
cr += skip2;
if (dstx & 1) {
YUVA_IN(y, u, v, a, p, pal);
u1 = u;
v1 = v;
a1 = a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
p += wrap3;
lum += wrap;
YUVA_IN(y, u, v, a, p, pal);
u1 += u;
v1 += v;
a1 += a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
cb++;
cr++;
p += -wrap3 + BPP;
lum += -wrap + 1;
}
for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
YUVA_IN(y, u, v, a, p, pal);
u1 = u;
v1 = v;
a1 = a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
YUVA_IN(y, u, v, a, p + BPP, pal);
u1 += u;
v1 += v;
a1 += a;
lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
p += wrap3;
lum += wrap;
YUVA_IN(y, u, v, a, p, pal);
u1 += u;
v1 += v;
a1 += a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
YUVA_IN(y, u, v, a, p + BPP, pal);
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
u1 += u;
v1 += v;
a1 += a;
lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
cb++;
cr++;
p += -wrap3 + 2 * BPP;
lum += -wrap + 2;
}
if (w) {
YUVA_IN(y, u, v, a, p, pal);
u1 = u;
v1 = v;
a1 = a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
p += wrap3;
lum += wrap;
YUVA_IN(y, u, v, a, p, pal);
u1 += u;
v1 += v;
a1 += a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
cb++;
cr++;
p += -wrap3 + BPP;
lum += -wrap + 1;
}
p += wrap3 + (wrap3 - dstw * BPP);
lum += wrap + (wrap - dstw - dstx);
cb += dst->linesize[1] - width2 - skip2;
cr += dst->linesize[2] - width2 - skip2;
}
/* handle odd height */
if (h) {
lum += dstx;
cb += skip2;
cr += skip2;
if (dstx & 1) {
YUVA_IN(y, u, v, a, p, pal);
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
cb++;
cr++;
lum++;
p += BPP;
}
for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
YUVA_IN(y, u, v, a, p, pal);
u1 = u;
v1 = v;
a1 = a;
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
YUVA_IN(y, u, v, a, p + BPP, pal);
u1 += u;
v1 += v;
a1 += a;
lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
cb++;
cr++;
p += 2 * BPP;
lum += 2;
}
if (w) {
YUVA_IN(y, u, v, a, p, pal);
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
}
}
}
static void free_subpicture(SubPicture *sp)
{
}
static void video_image_display(VideoState *is)
{
VideoPicture *vp;
SubPicture *sp;
AVPicture pict;
float aspect_ratio;
int width, height, x, y;
SDL_Rect rect;
int i;
vp = &is->pictq[is->pictq_rindex];
if (vp->bmp) {
Stefano Sabatini
committed
if (vp->picref->video->sample_aspect_ratio.num == 0)
Stefano Sabatini
committed
aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
if (is->video_st->sample_aspect_ratio.num)
aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
else if (is->video_st->codec->sample_aspect_ratio.num)
aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
aspect_ratio = 0;
aspect_ratio = 1.0;
aspect_ratio *= (float)vp->width / (float)vp->height;
if (is->subtitle_st) {
if (is->subpq_size > 0) {
sp = &is->subpq[is->subpq_rindex];
if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
SDL_LockYUVOverlay (vp->bmp);
pict.data[0] = vp->bmp->pixels[0];
pict.data[1] = vp->bmp->pixels[2];
pict.data[2] = vp->bmp->pixels[1];
pict.linesize[0] = vp->bmp->pitches[0];
pict.linesize[1] = vp->bmp->pitches[2];
pict.linesize[2] = vp->bmp->pitches[1];
for (i = 0; i < sp->sub.num_rects; i++)
blend_subrect(&pict, sp->sub.rects[i],
vp->bmp->w, vp->bmp->h);
SDL_UnlockYUVOverlay (vp->bmp);
}
}
}
/* XXX: we suppose the screen has a 1.0 pixel ratio */
height = is->height;
width = ((int)rint(height * aspect_ratio)) & ~1;
height = ((int)rint(width / aspect_ratio)) & ~1;
}
x = (is->width - width) / 2;
y = (is->height - height) / 2;
rect.w = FFMAX(width, 1);
rect.h = FFMAX(height, 1);
SDL_DisplayYUVOverlay(vp->bmp, &rect);
}
}
static inline int compute_mod(int a, int b)
{
return a < 0 ? a%b + b : a%b;
}
static void video_audio_display(VideoState *s)
{
int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
int ch, channels, h, h2, bgcolor, fgcolor;
int16_t time_diff;
int rdft_bits, nb_freq;
for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
;
nb_freq= 1<<(rdft_bits-1);
/* compute display index : center on currently output samples */
Michael Niedermayer
committed
channels = s->audio_st->codec->channels;
int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
delay = s->audio_write_buf_size;
/* to be more precise, we take into account the time spent since
the last buffer computation */
if (audio_callback_time) {
time_diff = av_gettime() - audio_callback_time;
delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
delay += 2*data_used;
if (delay < data_used)
delay = data_used;
i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
if (s->show_mode == SHOW_MODE_WAVES) {
h= INT_MIN;
for(i=0; i<1000; i+=channels){
int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
int a= s->sample_array[idx];
int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
int score= a-d;
if(h<score && (b^c)<0){
h= score;
i_start= idx;
}
s->last_i_start = i_start;
} else {
i_start = s->last_i_start;
}
bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
if (s->show_mode == SHOW_MODE_WAVES) {
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
fill_rectangle(screen,
s->xleft, s->ytop, s->width, s->height,
bgcolor);
fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
/* total height for one channel */
h = s->height / nb_display_channels;
/* graph height / 2 */
h2 = (h * 9) / 20;
for(ch = 0;ch < nb_display_channels; ch++) {
i = i_start + ch;
y1 = s->ytop + ch * h + (h / 2); /* position of center line */
for(x = 0; x < s->width; x++) {
y = (s->sample_array[i] * h2) >> 15;
if (y < 0) {
y = -y;
ys = y1 - y;
} else {
ys = y1;
}
fill_rectangle(screen,
s->xleft + x, ys, 1, y,
fgcolor);
i += channels;
if (i >= SAMPLE_ARRAY_SIZE)
i -= SAMPLE_ARRAY_SIZE;
fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
for(ch = 1;ch < nb_display_channels; ch++) {
y = s->ytop + ch * h;
fill_rectangle(screen,
s->xleft, y, s->width, 1,
fgcolor);
}
SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
}else{
nb_display_channels= FFMIN(nb_display_channels, 2);
if(rdft_bits != s->rdft_bits){
s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
for(ch = 0;ch < nb_display_channels; ch++) {
i = i_start + ch;
for(x = 0; x < 2*nb_freq; x++) {
double w= (x-nb_freq)*(1.0/nb_freq);
data[ch][x]= s->sample_array[i]*(1.0-w*w);
i += channels;
if (i >= SAMPLE_ARRAY_SIZE)
i -= SAMPLE_ARRAY_SIZE;
}
}
//least efficient way to do this, we should of course directly access it but its more than fast enough
Michael Niedermayer
committed
for(y=0; y<s->height; y++){
double w= 1/sqrt(nb_freq);
int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
+ data[1][2*y+1]*data[1][2*y+1])) : a;
a= FFMIN(a,255);
b= FFMIN(b,255);
fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
fill_rectangle(screen,
s->xpos, s->height-y, 1, 1,
fgcolor);
}
}
SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
s->xpos++;
if(s->xpos >= s->width)
s->xpos= s->xleft;
}
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
static void stream_close(VideoState *is)
{
VideoPicture *vp;
int i;
/* XXX: use a special url_shutdown call to abort parse cleanly */
is->abort_request = 1;
SDL_WaitThread(is->read_tid, NULL);
SDL_WaitThread(is->refresh_tid, NULL);
/* free all pictures */
for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
vp = &is->pictq[i];
#if CONFIG_AVFILTER
if (vp->picref) {
avfilter_unref_buffer(vp->picref);
vp->picref = NULL;
}
#endif
if (vp->bmp) {
SDL_FreeYUVOverlay(vp->bmp);
vp->bmp = NULL;
}
}
SDL_DestroyMutex(is->pictq_mutex);
SDL_DestroyCond(is->pictq_cond);
SDL_DestroyMutex(is->subpq_mutex);
SDL_DestroyCond(is->subpq_cond);
#if !CONFIG_AVFILTER
if (is->img_convert_ctx)
sws_freeContext(is->img_convert_ctx);
#endif
av_free(is);
}
static void do_exit(VideoState *is)
if (is) {
stream_close(is);
av_lockmgr_register(NULL);
uninit_opts();
#if CONFIG_AVFILTER
avfilter_uninit();
#endif
if (show_status)
printf("\n");
SDL_Quit();
av_log(NULL, AV_LOG_QUIET, "%s", "");
static int video_open(VideoState *is){
int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
int w,h;
Michael Niedermayer
committed
if(is_full_screen) flags |= SDL_FULLSCREEN;
else flags |= SDL_RESIZABLE;
if (is_full_screen && fs_screen_width) {
w = fs_screen_width;
h = fs_screen_height;
Michael Niedermayer
committed
} else if(!is_full_screen && screen_width){
w = screen_width;
h = screen_height;
#if CONFIG_AVFILTER
}else if (is->out_video_filter && is->out_video_filter->inputs[0]){
w = is->out_video_filter->inputs[0]->w;
h = is->out_video_filter->inputs[0]->h;
#else
Michael Niedermayer
committed
}else if (is->video_st && is->video_st->codec->width){
w = is->video_st->codec->width;
h = is->video_st->codec->height;
Michael Niedermayer
committed
w = 640;
h = 480;
if(screen && is->width == screen->w && screen->w == w
&& is->height== screen->h && screen->h == h)
return 0;
#ifndef __APPLE__
screen = SDL_SetVideoMode(w, h, 0, flags);
#else
/* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
screen = SDL_SetVideoMode(w, h, 24, flags);
#endif
if (!screen) {
fprintf(stderr, "SDL: could not set video mode - exiting\n");
if (!window_title)
window_title = input_filename;
SDL_WM_SetCaption(window_title, window_title);
is->width = screen->w;
is->height = screen->h;
return 0;
}
/* display the current picture, if any */
static void video_display(VideoState *is)
{
if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
video_audio_display(is);
else if (is->video_st)
video_image_display(is);
}
VideoState *is= opaque;
while(!is->abort_request){
SDL_Event event;
event.type = FF_REFRESH_EVENT;
event.user.data1 = opaque;
//FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
Fabrice Bellard
committed
/* get the current audio clock value */
static double get_audio_clock(VideoState *is)
{
if (is->paused) {
return is->audio_current_pts;
} else {
return is->audio_current_pts_drift + av_gettime() / 1000000.0;