Newer
Older
return 0;
}
/* display the current picture, if any */
static void video_display(VideoState *is)
{
if(!screen)
video_open(cur_stream);
if (is->audio_st && is->show_audio)
video_audio_display(is);
else if (is->video_st)
video_image_display(is);
}
VideoState *is= opaque;
while(!is->abort_request){
SDL_Event event;
event.type = FF_REFRESH_EVENT;
event.user.data1 = opaque;
usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
Fabrice Bellard
committed
/* get the current audio clock value */
static double get_audio_clock(VideoState *is)
{
double pts;
int hw_buf_size, bytes_per_sec;
pts = is->audio_clock;
hw_buf_size = audio_write_get_buf_size(is);
bytes_per_sec = 0;
if (is->audio_st) {
bytes_per_sec = is->audio_st->codec->sample_rate *
Michael Niedermayer
committed
2 * is->audio_st->codec->channels;
Fabrice Bellard
committed
}
if (bytes_per_sec)
pts -= (double)hw_buf_size / bytes_per_sec;
return pts;
}
/* get the current video clock value */
static double get_video_clock(VideoState *is)
{
Michael Niedermayer
committed
return is->video_current_pts_drift + av_gettime() / 1000000.0;
Fabrice Bellard
committed
}
/* get the current external clock value */
static double get_external_clock(VideoState *is)
{
int64_t ti;
ti = av_gettime();
return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
}
/* get the current master clock value */
static double get_master_clock(VideoState *is)
{
double val;
if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
if (is->video_st)
val = get_video_clock(is);
else
val = get_audio_clock(is);
} else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
if (is->audio_st)
val = get_audio_clock(is);
else
val = get_video_clock(is);
} else {
Fabrice Bellard
committed
val = get_external_clock(is);
Fabrice Bellard
committed
return val;
}
Michael Niedermayer
committed
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
if (!is->seek_req) {
is->seek_pos = pos;
if (seek_by_bytes)
is->seek_flags |= AVSEEK_FLAG_BYTE;
is->seek_req = 1;
}
}
/* pause or resume the video */
static void stream_pause(VideoState *is)
{
Michael Niedermayer
committed
if (is->paused) {
is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
if(is->read_pause_return != AVERROR(ENOSYS)){
Michael Niedermayer
committed
is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
Michael Niedermayer
committed
is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
Michael Niedermayer
committed
is->paused = !is->paused;
static double compute_target_time(double frame_current_pts, VideoState *is)
/* compute nominal delay */
delay = frame_current_pts - is->frame_last_pts;
if (delay <= 0 || delay >= 10.0) {
/* if incorrect delay, use previous one */
delay = is->frame_last_delay;
is->frame_last_delay = delay;
is->frame_last_pts = frame_current_pts;
/* update delay to follow master synchronisation source */
if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
/* if video is slave, we try to correct big delays by
duplicating or deleting a frame */
diff = get_video_clock(is) - get_master_clock(is);
/* skip or repeat frame. We take into account the
delay to compute the threshold. I still don't know
if it is the best guess */
sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
if (diff <= -sync_threshold)
delay = 0;
else if (diff >= sync_threshold)
delay = 2 * delay;
}
}
is->frame_timer += delay;
#if defined(DEBUG_SYNC)
printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
delay, actual_delay, frame_current_pts, -diff);
#endif
/* called to display each frame */
static void video_refresh_timer(void *opaque)
{
VideoState *is = opaque;
VideoPicture *vp;
Fabrice Bellard
committed
SubPicture *sp, *sp2;
//nothing to do, no picture to display in the que
double time= av_gettime()/1000000.0;
double next_target;
Fabrice Bellard
committed
/* dequeue the picture */
Fabrice Bellard
committed
Fabrice Bellard
committed
/* update current video pts */
is->video_current_pts = vp->pts;
is->video_current_pts_drift = is->video_current_pts - time;
Michael Niedermayer
committed
is->video_current_pos = vp->pos;
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
if(is->pictq_size > 1){
VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
assert(nextvp->target_clock >= vp->target_clock);
next_target= nextvp->target_clock;
}else{
next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
}
if(framedrop && time > next_target){
is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
if(is->pictq_size > 1 || time > next_target + 0.5){
/* update queue size and signal for next picture */
if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
is->pictq_rindex = 0;
SDL_LockMutex(is->pictq_mutex);
is->pictq_size--;
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
goto retry;
}
}
Fabrice Bellard
committed
if(is->subtitle_st) {
if (is->subtitle_stream_changed) {
SDL_LockMutex(is->subpq_mutex);
while (is->subpq_size) {
free_subpicture(&is->subpq[is->subpq_rindex]);
/* update queue size and signal for next picture */
if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
is->subpq_rindex = 0;
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
is->subpq_size--;
}
is->subtitle_stream_changed = 0;
SDL_CondSignal(is->subpq_cond);
SDL_UnlockMutex(is->subpq_mutex);
} else {
if (is->subpq_size > 0) {
sp = &is->subpq[is->subpq_rindex];
if (is->subpq_size > 1)
sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
else
sp2 = NULL;
if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
|| (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
{
free_subpicture(sp);
/* update queue size and signal for next picture */
if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
is->subpq_rindex = 0;
SDL_LockMutex(is->subpq_mutex);
is->subpq_size--;
SDL_CondSignal(is->subpq_cond);
SDL_UnlockMutex(is->subpq_mutex);
}
}
}
}
/* update queue size and signal for next picture */
if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
is->pictq_rindex = 0;
SDL_LockMutex(is->pictq_mutex);
is->pictq_size--;
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
}
} else if (is->audio_st) {
/* draw the next audio frame */
/* if only audio stream, then display the audio bars (better
than nothing, just to test the implementation */
/* display picture */
video_display(is);
}
if (show_status) {
static int64_t last_time;
int64_t cur_time;
int aqsize, vqsize, sqsize;
Fabrice Bellard
committed
double av_diff;
if (!last_time || (cur_time - last_time) >= 30000) {
sqsize = 0;
if (is->audio_st)
aqsize = is->audioq.size;
if (is->video_st)
vqsize = is->videoq.size;
if (is->subtitle_st)
sqsize = is->subtitleq.size;
Fabrice Bellard
committed
av_diff = 0;
if (is->audio_st && is->video_st)
av_diff = get_audio_clock(is) - get_video_clock(is);
printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
fflush(stdout);
last_time = cur_time;
}
}
}
/* allocate a picture (needs to do that in main thread to avoid
potential locking problems */
static void alloc_picture(void *opaque)
{
VideoState *is = opaque;
VideoPicture *vp;
vp = &is->pictq[is->pictq_windex];
if (vp->bmp)
SDL_FreeYUVOverlay(vp->bmp);
#if CONFIG_AVFILTER
if (vp->picref)
avfilter_unref_buffer(vp->picref);
vp->picref = NULL;
vp->width = is->out_video_filter->inputs[0]->w;
vp->height = is->out_video_filter->inputs[0]->h;
vp->pix_fmt = is->out_video_filter->inputs[0]->format;
#else
vp->width = is->video_st->codec->width;
vp->height = is->video_st->codec->height;
vp->pix_fmt = is->video_st->codec->pix_fmt;
#endif
vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
Fabrice Bellard
committed
screen);
SDL_LockMutex(is->pictq_mutex);
vp->allocated = 1;
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
}
/**
*
* @param pts the dts of the pkt / pts of the frame and guessed if not known
*/
Michael Niedermayer
committed
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
#if CONFIG_AVFILTER
AVPicture pict_src;
#endif
/* wait until we have space to put a new picture */
SDL_LockMutex(is->pictq_mutex);
if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
!is->videoq.abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
SDL_UnlockMutex(is->pictq_mutex);
if (is->videoq.abort_request)
return -1;
vp = &is->pictq[is->pictq_windex];
/* alloc or resize hardware picture buffer */
#if CONFIG_AVFILTER
vp->width != is->out_video_filter->inputs[0]->w ||
vp->height != is->out_video_filter->inputs[0]->h) {
#else
Michael Niedermayer
committed
vp->width != is->video_st->codec->width ||
vp->height != is->video_st->codec->height) {
SDL_Event event;
vp->allocated = 0;
/* the allocation must be done in the main thread to avoid
locking problems */
event.type = FF_ALLOC_EVENT;
event.user.data1 = is;
SDL_PushEvent(&event);
/* wait until the picture is allocated */
SDL_LockMutex(is->pictq_mutex);
while (!vp->allocated && !is->videoq.abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
SDL_UnlockMutex(is->pictq_mutex);
if (is->videoq.abort_request)
return -1;
}
Fabrice Bellard
committed
/* if the frame is not skipped, then display it */
AVPicture pict;
#if CONFIG_AVFILTER
if(vp->picref)
avfilter_unref_buffer(vp->picref);
vp->picref = src_frame->opaque;
#endif
/* get a pointer on the bitmap */
SDL_LockYUVOverlay (vp->bmp);
dst_pix_fmt = PIX_FMT_YUV420P;
memset(&pict,0,sizeof(AVPicture));
pict.data[0] = vp->bmp->pixels[0];
pict.data[1] = vp->bmp->pixels[2];
pict.data[2] = vp->bmp->pixels[1];
pict.linesize[0] = vp->bmp->pitches[0];
pict.linesize[1] = vp->bmp->pitches[2];
pict.linesize[2] = vp->bmp->pitches[1];
#if CONFIG_AVFILTER
pict_src.data[0] = src_frame->data[0];
pict_src.data[1] = src_frame->data[1];
pict_src.data[2] = src_frame->data[2];
pict_src.linesize[0] = src_frame->linesize[0];
pict_src.linesize[1] = src_frame->linesize[1];
pict_src.linesize[2] = src_frame->linesize[2];
//FIXME use direct rendering
av_picture_copy(&pict, &pict_src,
vp->pix_fmt, vp->width, vp->height);
#else
sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
dst_pix_fmt, sws_flags, NULL, NULL, NULL);
if (is->img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context\n");
exit(1);
}
sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
0, vp->height, pict.data, pict.linesize);
#endif
/* update the bitmap content */
SDL_UnlockYUVOverlay(vp->bmp);
Fabrice Bellard
committed
vp->pts = pts;
Michael Niedermayer
committed
vp->pos = pos;
/* now we can update the picture count */
if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
is->pictq_windex = 0;
SDL_LockMutex(is->pictq_mutex);
vp->target_clock= compute_target_time(vp->pts, is);
is->pictq_size++;
SDL_UnlockMutex(is->pictq_mutex);
}
Fabrice Bellard
committed
return 0;
}
/**
* compute the exact PTS for the picture if it is omitted in the stream
* @param pts1 the dts of the pkt / pts of the frame
*/
Michael Niedermayer
committed
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
Fabrice Bellard
committed
{
double frame_delay, pts;
Fabrice Bellard
committed
pts = pts1;
Fabrice Bellard
committed
/* update video clock with pts, if present */
pts = is->video_clock;
}
/* update video clock for next frame */
Michael Niedermayer
committed
frame_delay = av_q2d(is->video_st->codec->time_base);
/* for MPEG2, the frame can be repeated, so we update the
clock accordingly */
frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
Fabrice Bellard
committed
#if defined(DEBUG_SYNC) && 0
printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
av_get_pict_type_char(src_frame->pict_type), pts, pts1);
Fabrice Bellard
committed
#endif
Michael Niedermayer
committed
return queue_picture(is, src_frame, pts, pos);
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
Michael Niedermayer
committed
int len1, got_picture, i;
if (packet_queue_get(&is->videoq, pkt, 1) < 0)
if(pkt->data == flush_pkt.data){
avcodec_flush_buffers(is->video_st->codec);
Michael Niedermayer
committed
SDL_LockMutex(is->pictq_mutex);
//Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
Michael Niedermayer
committed
}
while (is->pictq_size && !is->videoq.abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
Michael Niedermayer
committed
is->video_current_pos= -1;
Michael Niedermayer
committed
SDL_UnlockMutex(is->pictq_mutex);
is->last_dts_for_fault_detection=
is->last_pts_for_fault_detection= INT64_MIN;
is->frame_last_pts= AV_NOPTS_VALUE;
Michael Niedermayer
committed
is->frame_timer = (double)av_gettime() / 1000000.0;
is->skip_frames= 1;
is->skip_frames_index= 0;
Fabrice Bellard
committed
/* NOTE: ipts is the PTS of the _first_ picture beginning in
this packet, if any */
is->video_st->codec->reordered_opaque= pkt->pts;
len1 = avcodec_decode_video2(is->video_st->codec,
Michael Niedermayer
committed
if (got_picture) {
if(pkt->dts != AV_NOPTS_VALUE){
is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
is->last_dts_for_fault_detection= pkt->dts;
}
if(frame->reordered_opaque != AV_NOPTS_VALUE){
is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
is->last_pts_for_fault_detection= frame->reordered_opaque;
}
Michael Niedermayer
committed
}
if( ( decoder_reorder_pts==1
|| (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
|| pkt->dts == AV_NOPTS_VALUE)
&& frame->reordered_opaque != AV_NOPTS_VALUE)
if (got_picture){
is->skip_frames_index += 1;
if(is->skip_frames_index >= is->skip_frames){
is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
return 1;
}
}
return 0;
}
#if CONFIG_AVFILTER
typedef struct {
VideoState *is;
AVFrame *frame;
int use_dr1;
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
{
AVFilterContext *ctx = codec->opaque;
AVFilterBufferRef *ref;
int perms = AV_PERM_WRITE;
int i, w, h, stride[4];
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
unsigned edge;
if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
}
if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
w = codec->width;
h = codec->height;
avcodec_align_dimensions2(codec, &w, &h, stride);
edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
w += edge << 1;
h += edge << 1;
if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
return -1;
ref->w = codec->width;
ref->h = codec->height;
for(i = 0; i < 4; i ++) {
unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
if (ref->data[i]) {
ref->data[i] += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
pic->data[i] = ref->data[i];
pic->linesize[i] = ref->linesize[i];
}
pic->opaque = ref;
pic->age = INT_MAX;
pic->type = FF_BUFFER_TYPE_USER;
pic->reordered_opaque = codec->reordered_opaque;
return 0;
}
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
{
memset(pic->data, 0, sizeof(pic->data));
avfilter_unref_buffer(pic->opaque);
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
{
AVFilterBufferRef *ref = pic->opaque;
if (pic->data[0] == NULL) {
pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
return codec->get_buffer(codec, pic);
}
if ((codec->width != ref->w) || (codec->height != ref->h) ||
(codec->pix_fmt != ref->format)) {
av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
return -1;
}
pic->reordered_opaque = codec->reordered_opaque;
return 0;
}
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
{
FilterPriv *priv = ctx->priv;
AVCodecContext *codec;
if(!opaque) return -1;
priv->is = opaque;
codec = priv->is->video_st->codec;
codec->opaque = ctx;
if(codec->codec->capabilities & CODEC_CAP_DR1) {
priv->use_dr1 = 1;
codec->get_buffer = input_get_buffer;
codec->release_buffer = input_release_buffer;
codec->reget_buffer = input_reget_buffer;
priv->frame = avcodec_alloc_frame();
return 0;
}
static void input_uninit(AVFilterContext *ctx)
{
FilterPriv *priv = ctx->priv;
av_free(priv->frame);
}
static int input_request_frame(AVFilterLink *link)
{
FilterPriv *priv = link->src->priv;
AVFilterBufferRef *picref;
AVPacket pkt;
int ret;
while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
av_free_packet(&pkt);
if (ret < 0)
return -1;
if(priv->use_dr1) {
picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
picref->format, link->w, link->h);
av_free_packet(&pkt);
picref->pts = pts;
picref->pos = pkt.pos;
picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
avfilter_start_frame(link, picref);
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
avfilter_draw_slice(link, 0, link->h, 1);
avfilter_end_frame(link);
return 0;
}
static int input_query_formats(AVFilterContext *ctx)
{
FilterPriv *priv = ctx->priv;
enum PixelFormat pix_fmts[] = {
priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
};
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
return 0;
}
static int input_config_props(AVFilterLink *link)
{
FilterPriv *priv = link->src->priv;
AVCodecContext *c = priv->is->video_st->codec;
link->w = c->width;
link->h = c->height;
return 0;
}
static AVFilter input_filter =
{
.name = "ffplay_input",
.priv_size = sizeof(FilterPriv),
.init = input_init,
.uninit = input_uninit,
.query_formats = input_query_formats,
.inputs = (AVFilterPad[]) {{ .name = NULL }},
.outputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = input_request_frame,
.config_props = input_config_props, },
{ .name = NULL }},
};
static void output_end_frame(AVFilterLink *link)
{
}
static int output_query_formats(AVFilterContext *ctx)
{
enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
return 0;
}
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
int64_t *pts, int64_t *pos)
AVFilterBufferRef *pic;
if(avfilter_request_frame(ctx->inputs[0]))
return -1;
if(!(pic = ctx->inputs[0]->cur_pic))
return -1;
ctx->inputs[0]->cur_pic = NULL;
frame->opaque = pic;
*pts = pic->pts;
*pos = pic->pos;
memcpy(frame->data, pic->data, sizeof(frame->data));
memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
return 1;
}
static AVFilter output_filter =
{
.name = "ffplay_output",
.query_formats = output_query_formats,
.inputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.end_frame = output_end_frame,
.min_perms = AV_PERM_READ, },
{ .name = NULL }},
.outputs = (AVFilterPad[]) {{ .name = NULL }},
};
#endif /* CONFIG_AVFILTER */
static int video_thread(void *arg)
{
VideoState *is = arg;
AVFrame *frame= avcodec_alloc_frame();
double pts;
int ret;
#if CONFIG_AVFILTER
AVFilterContext *filt_src = NULL, *filt_out = NULL;
AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
graph->scale_sws_opts = av_strdup(sws_flags_str);
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
if(!(filt_src = avfilter_open(&input_filter, "src"))) goto the_end;
if(!(filt_out = avfilter_open(&output_filter, "out"))) goto the_end;
if(avfilter_init_filter(filt_src, NULL, is)) goto the_end;
if(avfilter_init_filter(filt_out, NULL, frame)) goto the_end;
if(vfilters) {
AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
outputs->name = av_strdup("in");
outputs->filter = filt_src;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter = filt_out;
inputs->pad_idx = 0;
inputs->next = NULL;
if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
goto the_end;
av_freep(&vfilters);
} else {
if(avfilter_link(filt_src, 0, filt_out, 0) < 0) goto the_end;
}
avfilter_graph_add_filter(graph, filt_src);
avfilter_graph_add_filter(graph, filt_out);
if(avfilter_graph_check_validity(graph, NULL)) goto the_end;
if(avfilter_graph_config_formats(graph, NULL)) goto the_end;
if(avfilter_graph_config_links(graph, NULL)) goto the_end;
is->out_video_filter = filt_out;
#endif
for(;;) {
#if !CONFIG_AVFILTER
AVPacket pkt;
#endif
while (is->paused && !is->videoq.abort_request)
SDL_Delay(10);
#if CONFIG_AVFILTER
ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
#else
ret = get_video_frame(is, frame, &pts_int, &pkt);
#endif
if (ret < 0) goto the_end;
if (!ret)
continue;
pts = pts_int*av_q2d(is->video_st->time_base);
ret = output_picture2(is, frame, pts, pos);
ret = output_picture2(is, frame, pts, pkt.pos);
av_free_packet(&pkt);
#endif
if (ret < 0)
goto the_end;
Wolfgang Hesseler
committed
if (cur_stream)
stream_pause(cur_stream);
#if CONFIG_AVFILTER
avfilter_graph_destroy(graph);
av_freep(&graph);
#endif
static int subtitle_thread(void *arg)
{
VideoState *is = arg;
SubPicture *sp;
AVPacket pkt1, *pkt = &pkt1;
int len1, got_subtitle;
double pts;
int i, j;
int r, g, b, y, u, v, a;
for(;;) {
while (is->paused && !is->subtitleq.abort_request) {
SDL_Delay(10);
}
if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
break;
if(pkt->data == flush_pkt.data){
avcodec_flush_buffers(is->subtitle_st->codec);
continue;
}
SDL_LockMutex(is->subpq_mutex);
while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
!is->subtitleq.abort_request) {
SDL_CondWait(is->subpq_cond, is->subpq_mutex);
}
SDL_UnlockMutex(is->subpq_mutex);
if (is->subtitleq.abort_request)
goto the_end;
sp = &is->subpq[is->subpq_windex];
/* NOTE: ipts is the PTS of the _first_ picture beginning in
this packet, if any */
pts = 0;
if (pkt->pts != AV_NOPTS_VALUE)
pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
// if (len1 < 0)
// break;
if (got_subtitle && sp->sub.format == 0) {
sp->pts = pts;
for (i = 0; i < sp->sub.num_rects; i++)
{
for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
{
RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
y = RGB_TO_Y_CCIR(r, g, b);
u = RGB_TO_U_CCIR(r, g, b, 0);
v = RGB_TO_V_CCIR(r, g, b, 0);
YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
}
}
/* now we can update the picture count */
if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
is->subpq_windex = 0;
SDL_LockMutex(is->subpq_mutex);
is->subpq_size++;
SDL_UnlockMutex(is->subpq_mutex);
}
av_free_packet(pkt);
// if (cur_stream)
// stream_pause(cur_stream);
}
the_end:
return 0;
}
/* copy samples for viewing in editor window */
static void update_sample_display(VideoState *is, short *samples, int samples_size)
{
int size, len, channels;
Michael Niedermayer
committed
channels = is->audio_st->codec->channels;
size = samples_size / sizeof(short);
while (size > 0) {
len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
if (len > size)
len = size;
memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
samples += len;
is->sample_array_index += len;
if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
is->sample_array_index = 0;
size -= len;
}
}
/* return the new audio buffer size (samples can be added or deleted
to get better sync if video or external master clock) */
static int synchronize_audio(VideoState *is, short *samples,
Fabrice Bellard
committed
int samples_size1, double pts)
Fabrice Bellard
committed
int n, samples_size;
Michael Niedermayer
committed
n = 2 * is->audio_st->codec->channels;
Fabrice Bellard
committed
samples_size = samples_size1;
/* if not master, then we try to remove or add samples to correct the clock */
if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
Fabrice Bellard
committed
is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
double diff, avg_diff;
int wanted_size, min_size, max_size, nb_samples;
Fabrice Bellard
committed
ref_clock = get_master_clock(is);
diff = get_audio_clock(is) - ref_clock;
Fabrice Bellard
committed
if (diff < AV_NOSYNC_THRESHOLD) {
is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
/* not enough measures to have a correct estimate */
is->audio_diff_avg_count++;
} else {
/* estimate the A-V difference */
avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
if (fabs(avg_diff) >= is->audio_diff_threshold) {
Michael Niedermayer
committed
wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
Fabrice Bellard
committed
nb_samples = samples_size / n;
Fabrice Bellard
committed
min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;