"README.md" did not exist on "9106a698e726c041128a05db0a011caae755d10b"
Newer
Older
} else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
if (is->audio_st)
val = get_audio_clock(is);
else
val = get_video_clock(is);
} else {
Fabrice Bellard
committed
val = get_external_clock(is);
Fabrice Bellard
committed
return val;
}
Michael Niedermayer
committed
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
if (!is->seek_req) {
is->seek_pos = pos;
if (seek_by_bytes)
is->seek_flags |= AVSEEK_FLAG_BYTE;
is->seek_req = 1;
}
}
/* pause or resume the video */
static void stream_pause(VideoState *is)
{
Michael Niedermayer
committed
if (is->paused) {
is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
Michael Niedermayer
committed
is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
Michael Niedermayer
committed
is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
Michael Niedermayer
committed
is->paused = !is->paused;
static double compute_target_time(double frame_current_pts, VideoState *is)
/* compute nominal delay */
delay = frame_current_pts - is->frame_last_pts;
if (delay <= 0 || delay >= 10.0) {
/* if incorrect delay, use previous one */
delay = is->frame_last_delay;
is->frame_last_delay = delay;
is->frame_last_pts = frame_current_pts;
/* update delay to follow master synchronisation source */
if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
/* if video is slave, we try to correct big delays by
duplicating or deleting a frame */
diff = get_video_clock(is) - get_master_clock(is);
/* skip or repeat frame. We take into account the
delay to compute the threshold. I still don't know
if it is the best guess */
sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
if (diff <= -sync_threshold)
delay = 0;
else if (diff >= sync_threshold)
delay = 2 * delay;
}
}
is->frame_timer += delay;
av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
delay, frame_current_pts, -diff);
/* called to display each frame */
static void video_refresh_timer(void *opaque)
{
VideoState *is = opaque;
VideoPicture *vp;
Fabrice Bellard
committed
SubPicture *sp, *sp2;
Fabrice Bellard
committed
/* dequeue the picture */
Fabrice Bellard
committed
Fabrice Bellard
committed
/* update current video pts */
is->video_current_pts = vp->pts;
is->video_current_pts_drift = is->video_current_pts - time;
Michael Niedermayer
committed
is->video_current_pos = vp->pos;
if (is->pictq_size > 1) {
VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
assert(nextvp->target_clock >= vp->target_clock);
next_target= nextvp->target_clock;
} else {
next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
if (is->pictq_size > 1 || time > next_target + 0.5) {
/* update queue size and signal for next picture */
if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
is->pictq_rindex = 0;
SDL_LockMutex(is->pictq_mutex);
is->pictq_size--;
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
goto retry;
}
}
Fabrice Bellard
committed
if (is->subtitle_stream_changed) {
SDL_LockMutex(is->subpq_mutex);
while (is->subpq_size) {
free_subpicture(&is->subpq[is->subpq_rindex]);
/* update queue size and signal for next picture */
if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
is->subpq_rindex = 0;
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
is->subpq_size--;
}
is->subtitle_stream_changed = 0;
SDL_CondSignal(is->subpq_cond);
SDL_UnlockMutex(is->subpq_mutex);
} else {
if (is->subpq_size > 0) {
sp = &is->subpq[is->subpq_rindex];
if (is->subpq_size > 1)
sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
else
sp2 = NULL;
if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
|| (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
{
free_subpicture(sp);
/* update queue size and signal for next picture */
if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
is->subpq_rindex = 0;
SDL_LockMutex(is->subpq_mutex);
is->subpq_size--;
SDL_CondSignal(is->subpq_cond);
SDL_UnlockMutex(is->subpq_mutex);
}
}
}
}
if (!display_disable)
/* update queue size and signal for next picture */
if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
is->pictq_rindex = 0;
SDL_LockMutex(is->pictq_mutex);
is->pictq_size--;
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
}
} else if (is->audio_st) {
/* draw the next audio frame */
/* if only audio stream, then display the audio bars (better
than nothing, just to test the implementation */
if (!display_disable)
}
if (show_status) {
static int64_t last_time;
int64_t cur_time;
int aqsize, vqsize, sqsize;
Fabrice Bellard
committed
double av_diff;
if (!last_time || (cur_time - last_time) >= 30000) {
sqsize = 0;
if (is->audio_st)
aqsize = is->audioq.size;
if (is->video_st)
vqsize = is->videoq.size;
if (is->subtitle_st)
sqsize = is->subtitleq.size;
Fabrice Bellard
committed
av_diff = 0;
if (is->audio_st && is->video_st)
av_diff = get_audio_clock(is) - get_video_clock(is);
printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
fflush(stdout);
last_time = cur_time;
}
}
}
static void stream_close(VideoState *is)
{
VideoPicture *vp;
int i;
/* XXX: use a special url_shutdown call to abort parse cleanly */
is->abort_request = 1;
SDL_WaitThread(is->parse_tid, NULL);
SDL_WaitThread(is->refresh_tid, NULL);
/* free all pictures */
vp = &is->pictq[i];
#if CONFIG_AVFILTER
avfilter_unref_bufferp(&vp->picref);
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
#endif
if (vp->bmp) {
SDL_FreeYUVOverlay(vp->bmp);
vp->bmp = NULL;
}
}
SDL_DestroyMutex(is->pictq_mutex);
SDL_DestroyCond(is->pictq_cond);
SDL_DestroyMutex(is->subpq_mutex);
SDL_DestroyCond(is->subpq_cond);
#if !CONFIG_AVFILTER
if (is->img_convert_ctx)
sws_freeContext(is->img_convert_ctx);
#endif
av_free(is);
}
static void do_exit(void)
{
if (cur_stream) {
stream_close(cur_stream);
cur_stream = NULL;
}
uninit_opts();
#if CONFIG_AVFILTER
avfilter_uninit();
#endif
avformat_network_deinit();
if (show_status)
printf("\n");
SDL_Quit();
av_log(NULL, AV_LOG_QUIET, "");
/* allocate a picture (needs to do that in main thread to avoid
potential locking problems */
static void alloc_picture(void *opaque)
{
VideoState *is = opaque;
VideoPicture *vp;
vp = &is->pictq[is->pictq_windex];
if (vp->bmp)
SDL_FreeYUVOverlay(vp->bmp);
avfilter_unref_bufferp(&vp->picref);
vp->width = is->out_video_filter->inputs[0]->w;
vp->height = is->out_video_filter->inputs[0]->h;
vp->pix_fmt = is->out_video_filter->inputs[0]->format;
#else
vp->width = is->video_st->codec->width;
vp->height = is->video_st->codec->height;
vp->pix_fmt = is->video_st->codec->pix_fmt;
#endif
vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
Fabrice Bellard
committed
screen);
if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
/* SDL allocates a buffer smaller than requested if the video
* overlay hardware is unable to support the requested size. */
fprintf(stderr, "Error: the video system does not support an image\n"
"size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
"to reduce the image size.\n", vp->width, vp->height );
do_exit();
}
SDL_LockMutex(is->pictq_mutex);
vp->allocated = 1;
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
}
/* The 'pts' parameter is the dts of the packet / pts of the frame and
* guessed if not known. */
Michael Niedermayer
committed
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
#if CONFIG_AVFILTER
AVPicture pict_src;
int dst_pix_fmt = AV_PIX_FMT_YUV420P;
/* wait until we have space to put a new picture */
SDL_LockMutex(is->pictq_mutex);
if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
!is->videoq.abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
SDL_UnlockMutex(is->pictq_mutex);
if (is->videoq.abort_request)
return -1;
vp = &is->pictq[is->pictq_windex];
/* alloc or resize hardware picture buffer */
if (!vp->bmp || vp->reallocate ||
#if CONFIG_AVFILTER
vp->width != is->out_video_filter->inputs[0]->w ||
vp->height != is->out_video_filter->inputs[0]->h) {
#else
Michael Niedermayer
committed
vp->width != is->video_st->codec->width ||
vp->height != is->video_st->codec->height) {
vp->allocated = 0;
vp->reallocate = 0;
/* the allocation must be done in the main thread to avoid
locking problems */
event.type = FF_ALLOC_EVENT;
event.user.data1 = is;
SDL_PushEvent(&event);
/* wait until the picture is allocated */
SDL_LockMutex(is->pictq_mutex);
while (!vp->allocated && !is->videoq.abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
SDL_UnlockMutex(is->pictq_mutex);
if (is->videoq.abort_request)
return -1;
}
Fabrice Bellard
committed
/* if the frame is not skipped, then display it */
avfilter_unref_bufferp(&vp->picref);
vp->picref = src_frame->opaque;
#endif
/* get a pointer on the bitmap */
SDL_LockYUVOverlay (vp->bmp);
pict.data[0] = vp->bmp->pixels[0];
pict.data[1] = vp->bmp->pixels[2];
pict.data[2] = vp->bmp->pixels[1];
pict.linesize[0] = vp->bmp->pitches[0];
pict.linesize[1] = vp->bmp->pitches[2];
pict.linesize[2] = vp->bmp->pitches[1];
#if CONFIG_AVFILTER
pict_src.data[0] = src_frame->data[0];
pict_src.data[1] = src_frame->data[1];
pict_src.data[2] = src_frame->data[2];
pict_src.linesize[0] = src_frame->linesize[0];
pict_src.linesize[1] = src_frame->linesize[1];
pict_src.linesize[2] = src_frame->linesize[2];
av_picture_copy(&pict, &pict_src,
vp->pix_fmt, vp->width, vp->height);
#else
sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
dst_pix_fmt, sws_flags, NULL, NULL, NULL);
if (is->img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context\n");
exit(1);
}
sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
0, vp->height, pict.data, pict.linesize);
#endif
/* update the bitmap content */
SDL_UnlockYUVOverlay(vp->bmp);
Fabrice Bellard
committed
vp->pts = pts;
Michael Niedermayer
committed
vp->pos = pos;
/* now we can update the picture count */
if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
is->pictq_windex = 0;
SDL_LockMutex(is->pictq_mutex);
vp->target_clock = compute_target_time(vp->pts, is);
is->pictq_size++;
SDL_UnlockMutex(is->pictq_mutex);
}
Fabrice Bellard
committed
return 0;
}
/* Compute the exact PTS for the picture if it is omitted in the stream.
* The 'pts1' parameter is the dts of the packet / pts of the frame. */
Michael Niedermayer
committed
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
Fabrice Bellard
committed
{
double frame_delay, pts;
Fabrice Bellard
committed
pts = pts1;
Fabrice Bellard
committed
/* update video clock with pts, if present */
pts = is->video_clock;
}
/* update video clock for next frame */
Michael Niedermayer
committed
frame_delay = av_q2d(is->video_st->codec->time_base);
/* for MPEG2, the frame can be repeated, so we update the
clock accordingly */
frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
Fabrice Bellard
committed
Michael Niedermayer
committed
return queue_picture(is, src_frame, pts, pos);
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
Diego Biurrun
committed
int got_picture, i;
if (packet_queue_get(&is->videoq, pkt, 1) < 0)
return -1;
Michael Niedermayer
committed
if (pkt->data == flush_pkt.data) {
avcodec_flush_buffers(is->video_st->codec);
Michael Niedermayer
committed
SDL_LockMutex(is->pictq_mutex);
// Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
is->pictq[i].target_clock= 0;
}
while (is->pictq_size && !is->videoq.abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
is->video_current_pos = -1;
SDL_UnlockMutex(is->pictq_mutex);
init_pts_correction(&is->pts_ctx);
is->frame_last_pts = AV_NOPTS_VALUE;
is->frame_last_delay = 0;
is->frame_timer = (double)av_gettime() / 1000000.0;
is->skip_frames = 1;
is->skip_frames_index = 0;
return 0;
}
Diego Biurrun
committed
avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
if (got_picture) {
if (decoder_reorder_pts == -1) {
*pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
} else if (decoder_reorder_pts) {
Stefano Sabatini
committed
*pts = frame->pkt_pts;
}
if (*pts == AV_NOPTS_VALUE) {
*pts = 0;
Michael Niedermayer
committed
}
is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
return 1;
}
}
return 0;
}
#if CONFIG_AVFILTER
Stefano Sabatini
committed
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
char buffersrc_args[256];
Stefano Sabatini
committed
int ret;
AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
AVCodecContext *codec = is->video_st->codec;
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
graph->scale_sws_opts = av_strdup(sws_flags_str);
snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
codec->width, codec->height, codec->pix_fmt,
is->video_st->time_base.num, is->video_st->time_base.den,
codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
if ((ret = avfilter_graph_create_filter(&filt_src,
avfilter_get_by_name("buffer"),
"src", buffersrc_args, NULL,
graph)) < 0)
return ret;
if ((ret = avfilter_graph_create_filter(&filt_out,
avfilter_get_by_name("buffersink"),
"out", NULL, NULL, graph)) < 0)
return ret;
if ((ret = avfilter_graph_create_filter(&filt_format,
avfilter_get_by_name("format"),
"format", "yuv420p", NULL, graph)) < 0)
return ret;
if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
return ret;
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
outputs->name = av_strdup("in");
Stefano Sabatini
committed
outputs->filter_ctx = filt_src;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = filt_format;
inputs->pad_idx = 0;
inputs->next = NULL;
Stefano Sabatini
committed
if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
return ret;
if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
return ret;
Stefano Sabatini
committed
if ((ret = avfilter_graph_config(graph, NULL)) < 0)
return ret;
is->in_video_filter = filt_src;
if (codec->codec->capabilities & CODEC_CAP_DR1) {
is->use_dr1 = 1;
codec->get_buffer = codec_get_buffer;
codec->release_buffer = codec_release_buffer;
codec->opaque = &is->buffer_pool;
}
Stefano Sabatini
committed
return ret;
}
#endif /* CONFIG_AVFILTER */
static int video_thread(void *arg)
{
Stefano Sabatini
committed
VideoState *is = arg;
Stefano Sabatini
committed
int64_t pts_int;
double pts;
int ret;
#if CONFIG_AVFILTER
AVFilterGraph *graph = avfilter_graph_alloc();
AVFilterContext *filt_out = NULL, *filt_in = NULL;
Stefano Sabatini
committed
int64_t pos;
int last_w = is->video_st->codec->width;
int last_h = is->video_st->codec->height;
Stefano Sabatini
committed
if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
goto the_end;
filt_in = is->in_video_filter;
Stefano Sabatini
committed
filt_out = is->out_video_filter;
#if CONFIG_AVFILTER
AVFilterBufferRef *picref;
AVRational tb;
#endif
while (is->paused && !is->videoq.abort_request)
SDL_Delay(10);
av_free_packet(&pkt);
ret = get_video_frame(is, frame, &pts_int, &pkt);
if (ret < 0)
goto the_end;
if (!ret)
continue;
if ( last_w != is->video_st->codec->width
|| last_h != is->video_st->codec->height) {
av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
is->video_st->codec->width, is->video_st->codec->height);
avfilter_graph_free(&graph);
graph = avfilter_graph_alloc();
if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
goto the_end;
filt_in = is->in_video_filter;
filt_out = is->out_video_filter;
last_w = is->video_st->codec->width;
last_h = is->video_st->codec->height;
}
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
frame->pts = pts_int;
if (is->use_dr1) {
FrameBuffer *buf = frame->opaque;
AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
frame->data, frame->linesize,
AV_PERM_READ | AV_PERM_PRESERVE,
frame->width, frame->height,
frame->format);
avfilter_copy_frame_props(fb, frame);
fb->buf->priv = buf;
fb->buf->free = filter_release_buffer;
buf->refcount++;
av_buffersrc_buffer(filt_in, fb);
} else
av_buffersrc_write_frame(filt_in, frame);
while (ret >= 0) {
ret = av_buffersink_read(filt_out, &picref);
if (ret < 0) {
ret = 0;
break;
}
avfilter_copy_buf_props(frame, picref);
pts_int = picref->pts;
tb = filt_out->inputs[0]->time_base;
pos = picref->pos;
frame->opaque = picref;
if (av_cmp_q(tb, is->video_st->time_base)) {
av_unused int64_t pts1 = pts_int;
pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
av_dlog(NULL, "video_thread(): "
"tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
tb.num, tb.den, pts1,
is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
}
pts = pts_int * av_q2d(is->video_st->time_base);
ret = output_picture2(is, frame, pts, pos);
}
ret = output_picture2(is, frame, pts, pkt.pos);
if (ret < 0)
goto the_end;
Wolfgang Hesseler
committed
if (cur_stream)
stream_pause(cur_stream);
av_freep(&vfilters);
avfilter_graph_free(&graph);
static int subtitle_thread(void *arg)
{
VideoState *is = arg;
SubPicture *sp;
AVPacket pkt1, *pkt = &pkt1;
Diego Biurrun
committed
int got_subtitle;
double pts;
int i, j;
int r, g, b, y, u, v, a;
while (is->paused && !is->subtitleq.abort_request) {
SDL_Delay(10);
}
if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
break;
avcodec_flush_buffers(is->subtitle_st->codec);
continue;
}
SDL_LockMutex(is->subpq_mutex);
while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
!is->subtitleq.abort_request) {
SDL_CondWait(is->subpq_cond, is->subpq_mutex);
}
SDL_UnlockMutex(is->subpq_mutex);
if (is->subtitleq.abort_request)
return 0;
sp = &is->subpq[is->subpq_windex];
/* NOTE: ipts is the PTS of the _first_ picture beginning in
this packet, if any */
pts = 0;
if (pkt->pts != AV_NOPTS_VALUE)
pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
Diego Biurrun
committed
avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
&got_subtitle, pkt);
if (got_subtitle && sp->sub.format == 0) {
sp->pts = pts;
for (i = 0; i < sp->sub.num_rects; i++)
{
for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
{
RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
y = RGB_TO_Y_CCIR(r, g, b);
u = RGB_TO_U_CCIR(r, g, b, 0);
v = RGB_TO_V_CCIR(r, g, b, 0);
YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
}
}
/* now we can update the picture count */
if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
is->subpq_windex = 0;
SDL_LockMutex(is->subpq_mutex);
is->subpq_size++;
SDL_UnlockMutex(is->subpq_mutex);
}
av_free_packet(pkt);
}
return 0;
}
/* copy samples for viewing in editor window */
static void update_sample_display(VideoState *is, short *samples, int samples_size)
{
Diego Biurrun
committed
int size, len;
size = samples_size / sizeof(short);
while (size > 0) {
len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
if (len > size)
len = size;
memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
samples += len;
is->sample_array_index += len;
if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
is->sample_array_index = 0;
size -= len;
}
}
/* return the new audio buffer size (samples can be added or deleted
to get better sync if video or external master clock) */
static int synchronize_audio(VideoState *is, short *samples,
Fabrice Bellard
committed
int samples_size1, double pts)
Fabrice Bellard
committed
int n, samples_size;
n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
Fabrice Bellard
committed
samples_size = samples_size1;
/* if not master, then we try to remove or add samples to correct the clock */
if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
Fabrice Bellard
committed
is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
double diff, avg_diff;
int wanted_size, min_size, max_size, nb_samples;
Fabrice Bellard
committed
ref_clock = get_master_clock(is);
diff = get_audio_clock(is) - ref_clock;
Fabrice Bellard
committed
if (diff < AV_NOSYNC_THRESHOLD) {
is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
/* not enough measures to have a correct estimate */
is->audio_diff_avg_count++;
} else {
/* estimate the A-V difference */
avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
if (fabs(avg_diff) >= is->audio_diff_threshold) {
wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
Fabrice Bellard
committed
nb_samples = samples_size / n;
Fabrice Bellard
committed
min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
if (wanted_size < min_size)
wanted_size = min_size;
else if (wanted_size > max_size)
wanted_size = max_size;
Fabrice Bellard
committed
/* add or remove samples to correction the synchro */
if (wanted_size < samples_size) {
/* remove samples */
samples_size = wanted_size;
} else if (wanted_size > samples_size) {
uint8_t *samples_end, *q;
int nb;
Fabrice Bellard
committed
/* add samples */
nb = (samples_size - wanted_size);
samples_end = (uint8_t *)samples + samples_size - n;
q = samples_end + n;
while (nb > 0) {
memcpy(q, samples_end, n);
q += n;
nb -= n;
}
samples_size = wanted_size;
}
}
Diego Biurrun
committed
av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
diff, avg_diff, samples_size - samples_size1,
is->audio_clock, is->video_clock, is->audio_diff_threshold);
Fabrice Bellard
committed
} else {
/* too big difference : may be initial PTS errors, so
reset A-V filter */
is->audio_diff_avg_count = 0;
}
}
return samples_size;
}
/* decode one audio frame and returns its uncompressed size */
static int audio_decode_frame(VideoState *is, double *pts_ptr)
AVPacket *pkt_temp = &is->audio_pkt_temp;
int n, len1, data_size, got_frame;
int new_packet = 0;
int flush_complete = 0;
/* NOTE: the audio packet can contain several frames */
while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
int resample_changed, audio_resample;
if (!is->frame) {
if (!(is->frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
} else
avcodec_get_frame_defaults(is->frame);
if (flush_complete)
break;
new_packet = 0;
len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
if (len1 < 0) {
/* if error, we skip the frame */
pkt_temp->data += len1;
pkt_temp->size -= len1;
/* stop sending empty packets if the decoder is finished */
if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
flush_complete = 1;
}
data_size = av_samples_get_buffer_size(NULL, dec->channels,
is->frame->nb_samples,
Justin Ruggles
committed
is->frame->format, 1);
audio_resample = is->frame->format != is->sdl_sample_fmt ||
is->frame->channel_layout != is->sdl_channel_layout ||
is->frame->sample_rate != is->sdl_sample_rate;
resample_changed = is->frame->format != is->resample_sample_fmt ||
is->frame->channel_layout != is->resample_channel_layout ||
is->frame->sample_rate != is->resample_sample_rate;
if ((!is->avr && audio_resample) || resample_changed) {
Justin Ruggles
committed
int ret;
if (is->avr)
avresample_close(is->avr);
else if (audio_resample) {
is->avr = avresample_alloc_context();
if (!is->avr) {
fprintf(stderr, "error allocating AVAudioResampleContext\n");
}
Justin Ruggles
committed
}
if (audio_resample) {
Justin Ruggles
committed
av_opt_set_int(is->avr, "in_channel_layout", is->frame->channel_layout, 0);
av_opt_set_int(is->avr, "in_sample_fmt", is->frame->format, 0);
av_opt_set_int(is->avr, "in_sample_rate", is->frame->sample_rate, 0);
av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout, 0);
av_opt_set_int(is->avr, "out_sample_fmt", is->sdl_sample_fmt, 0);
av_opt_set_int(is->avr, "out_sample_rate", is->sdl_sample_rate, 0);
if ((ret = avresample_open(is->avr)) < 0) {
fprintf(stderr, "error initializing libavresample\n");
break;
}
Justin Ruggles
committed
is->resample_sample_fmt = is->frame->format;
is->resample_channel_layout = is->frame->channel_layout;
is->resample_sample_rate = is->frame->sample_rate;
if (audio_resample) {
void *tmp_out;
int out_samples, out_size, out_linesize;
int osize = av_get_bytes_per_sample(is->sdl_sample_fmt);
int nb_samples = is->frame->nb_samples;
out_size = av_samples_get_buffer_size(&out_linesize,
is->sdl_channels,
nb_samples,
is->sdl_sample_fmt, 0);
tmp_out = av_realloc(is->audio_buf1, out_size);
if (!tmp_out)
is->audio_buf1 = tmp_out;
out_samples = avresample_convert(is->avr,
&is->audio_buf1,
out_linesize, nb_samples,
is->frame->data,
is->frame->linesize[0],
is->frame->nb_samples);
if (out_samples < 0) {
fprintf(stderr, "avresample_convert() failed\n");
data_size = out_samples * osize * is->sdl_channels;
is->audio_buf = is->frame->data[0];
/* if no pts, then compute it */
pts = is->audio_clock;
*pts_ptr = pts;
n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
is->audio_clock += (double)data_size /
(double)(n * is->sdl_sample_rate);
#ifdef DEBUG
{
static double last_clock;
printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
is->audio_clock - last_clock,
is->audio_clock, pts);
last_clock = is->audio_clock;
/* free the current packet */
if (pkt->data)