Skip to content
Snippets Groups Projects
avplay.c 97.8 KiB
Newer Older
  • Learn to ignore specific revisions
  • Fabrice Bellard's avatar
    Fabrice Bellard committed
        } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
            if (is->audio_st)
                val = get_audio_clock(is);
            else
                val = get_video_clock(is);
        } else {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    /* seek in the stream */
    
    static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
    
        if (!is->seek_req) {
            is->seek_pos = pos;
    
            is->seek_rel = rel;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            is->seek_flags &= ~AVSEEK_FLAG_BYTE;
    
            if (seek_by_bytes)
                is->seek_flags |= AVSEEK_FLAG_BYTE;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    }
    
    /* pause or resume the video */
    static void stream_pause(VideoState *is)
    {
    
        if (is->paused) {
            is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            if (is->read_pause_return != AVERROR(ENOSYS)) {
    
                is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
    
            is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
    
    static double compute_target_time(double frame_current_pts, VideoState *is)
    
        double delay, sync_threshold, diff;
    
    
        /* compute nominal delay */
        delay = frame_current_pts - is->frame_last_pts;
        if (delay <= 0 || delay >= 10.0) {
            /* if incorrect delay, use previous one */
            delay = is->frame_last_delay;
    
            is->frame_last_delay = delay;
    
        is->frame_last_pts = frame_current_pts;
    
        /* update delay to follow master synchronisation source */
        if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
             is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
            /* if video is slave, we try to correct big delays by
               duplicating or deleting a frame */
    
            diff = get_video_clock(is) - get_master_clock(is);
    
    
            /* skip or repeat frame. We take into account the
               delay to compute the threshold. I still don't know
               if it is the best guess */
            sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
            if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
                if (diff <= -sync_threshold)
                    delay = 0;
                else if (diff >= sync_threshold)
                    delay = 2 * delay;
            }
        }
        is->frame_timer += delay;
    
    
        av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
                delay, frame_current_pts, -diff);
    
        return is->frame_timer;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    /* called to display each frame */
    static void video_refresh_timer(void *opaque)
    {
        VideoState *is = opaque;
        VideoPicture *vp;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
        if (is->video_st) {
    
    retry:
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            if (is->pictq_size == 0) {
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                // nothing to do, no picture to display in the que
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            } else {
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                double time = av_gettime() / 1000000.0;
    
                double next_target;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                vp = &is->pictq[is->pictq_rindex];
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (time < vp->target_clock)
    
                    return;
    
                /* update current video pts */
                is->video_current_pts = vp->pts;
    
                is->video_current_pts_drift = is->video_current_pts - time;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (is->pictq_size > 1) {
                    VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
    
                    assert(nextvp->target_clock >= vp->target_clock);
                    next_target= nextvp->target_clock;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                } else {
                    next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (framedrop && time > next_target) {
    
                    is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                    if (is->pictq_size > 1 || time > next_target + 0.5) {
    
                        /* update queue size and signal for next picture */
                        if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
                            is->pictq_rindex = 0;
    
                        SDL_LockMutex(is->pictq_mutex);
                        is->pictq_size--;
                        SDL_CondSignal(is->pictq_cond);
                        SDL_UnlockMutex(is->pictq_mutex);
                        goto retry;
                    }
                }
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                if (is->subtitle_st) {
    
                    if (is->subtitle_stream_changed) {
                        SDL_LockMutex(is->subpq_mutex);
    
                        while (is->subpq_size) {
                            free_subpicture(&is->subpq[is->subpq_rindex]);
    
                            /* update queue size and signal for next picture */
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
                                is->subpq_rindex = 0;
    
                            is->subpq_size--;
                        }
                        is->subtitle_stream_changed = 0;
    
                        SDL_CondSignal(is->subpq_cond);
                        SDL_UnlockMutex(is->subpq_mutex);
                    } else {
                        if (is->subpq_size > 0) {
                            sp = &is->subpq[is->subpq_rindex];
    
                            if (is->subpq_size > 1)
                                sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
                            else
                                sp2 = NULL;
    
                            if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
                                    || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
                            {
                                free_subpicture(sp);
    
                                /* update queue size and signal for next picture */
                                if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
                                    is->subpq_rindex = 0;
    
                                SDL_LockMutex(is->subpq_mutex);
                                is->subpq_size--;
                                SDL_CondSignal(is->subpq_cond);
                                SDL_UnlockMutex(is->subpq_mutex);
                            }
                        }
                    }
                }
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                /* display picture */
    
                    video_display(is);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                /* update queue size and signal for next picture */
                if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
                    is->pictq_rindex = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                SDL_LockMutex(is->pictq_mutex);
                is->pictq_size--;
                SDL_CondSignal(is->pictq_cond);
                SDL_UnlockMutex(is->pictq_mutex);
            }
        } else if (is->audio_st) {
            /* draw the next audio frame */
    
            /* if only audio stream, then display the audio bars (better
               than nothing, just to test the implementation */
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            /* display picture */
    
                video_display(is);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
        if (show_status) {
            static int64_t last_time;
            int64_t cur_time;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            cur_time = av_gettime();
    
            if (!last_time || (cur_time - last_time) >= 30000) {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                aqsize = 0;
                vqsize = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                if (is->audio_st)
                    aqsize = is->audioq.size;
                if (is->video_st)
                    vqsize = is->videoq.size;
    
                if (is->subtitle_st)
                    sqsize = is->subtitleq.size;
    
                av_diff = 0;
                if (is->audio_st && is->video_st)
                    av_diff = get_audio_clock(is) - get_video_clock(is);
    
                printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                       get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
                       vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                fflush(stdout);
                last_time = cur_time;
            }
        }
    }
    
    
    static void stream_close(VideoState *is)
    {
        VideoPicture *vp;
        int i;
        /* XXX: use a special url_shutdown call to abort parse cleanly */
        is->abort_request = 1;
        SDL_WaitThread(is->parse_tid, NULL);
        SDL_WaitThread(is->refresh_tid, NULL);
    
        /* free all pictures */
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
    
            vp = &is->pictq[i];
    #if CONFIG_AVFILTER
    
            avfilter_unref_bufferp(&vp->picref);
    
    #endif
            if (vp->bmp) {
                SDL_FreeYUVOverlay(vp->bmp);
                vp->bmp = NULL;
            }
        }
        SDL_DestroyMutex(is->pictq_mutex);
        SDL_DestroyCond(is->pictq_cond);
        SDL_DestroyMutex(is->subpq_mutex);
        SDL_DestroyCond(is->subpq_cond);
    #if !CONFIG_AVFILTER
        if (is->img_convert_ctx)
            sws_freeContext(is->img_convert_ctx);
    #endif
        av_free(is);
    }
    
    static void do_exit(void)
    {
        if (cur_stream) {
            stream_close(cur_stream);
            cur_stream = NULL;
        }
    
    #if CONFIG_AVFILTER
        avfilter_uninit();
    #endif
    
        avformat_network_deinit();
    
        if (show_status)
            printf("\n");
        SDL_Quit();
    
        av_log(NULL, AV_LOG_QUIET, "");
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    /* allocate a picture (needs to do that in main thread to avoid
       potential locking problems */
    static void alloc_picture(void *opaque)
    {
        VideoState *is = opaque;
        VideoPicture *vp;
    
        vp = &is->pictq[is->pictq_windex];
    
        if (vp->bmp)
            SDL_FreeYUVOverlay(vp->bmp);
    
    
    #if CONFIG_AVFILTER
    
        avfilter_unref_bufferp(&vp->picref);
    
    
        vp->width   = is->out_video_filter->inputs[0]->w;
        vp->height  = is->out_video_filter->inputs[0]->h;
        vp->pix_fmt = is->out_video_filter->inputs[0]->format;
    #else
        vp->width   = is->video_st->codec->width;
        vp->height  = is->video_st->codec->height;
        vp->pix_fmt = is->video_st->codec->pix_fmt;
    #endif
    
        vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
    
                                       SDL_YV12_OVERLAY,
    
        if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
            /* SDL allocates a buffer smaller than requested if the video
             * overlay hardware is unable to support the requested size. */
            fprintf(stderr, "Error: the video system does not support an image\n"
    
                            "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
    
                            "to reduce the image size.\n", vp->width, vp->height );
            do_exit();
        }
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
        SDL_LockMutex(is->pictq_mutex);
        vp->allocated = 1;
        SDL_CondSignal(is->pictq_cond);
        SDL_UnlockMutex(is->pictq_mutex);
    }
    
    
    /* The 'pts' parameter is the dts of the packet / pts of the frame and
     * guessed if not known. */
    
    static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
        VideoPicture *vp;
    
    #if CONFIG_AVFILTER
        AVPicture pict_src;
    
        int dst_pix_fmt = AV_PIX_FMT_YUV420P;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        /* wait until we have space to put a new picture */
        SDL_LockMutex(is->pictq_mutex);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
            is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
               !is->videoq.abort_request) {
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
        }
        SDL_UnlockMutex(is->pictq_mutex);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        if (is->videoq.abort_request)
            return -1;
    
        vp = &is->pictq[is->pictq_windex];
    
        /* alloc or resize hardware picture buffer */
    
        if (!vp->bmp || vp->reallocate ||
    
    #if CONFIG_AVFILTER
            vp->width  != is->out_video_filter->inputs[0]->w ||
            vp->height != is->out_video_filter->inputs[0]->h) {
    #else
    
            vp->width != is->video_st->codec->width ||
            vp->height != is->video_st->codec->height) {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            SDL_Event event;
    
    
            vp->allocated  = 0;
            vp->reallocate = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
            /* the allocation must be done in the main thread to avoid
               locking problems */
            event.type = FF_ALLOC_EVENT;
            event.user.data1 = is;
            SDL_PushEvent(&event);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            /* wait until the picture is allocated */
            SDL_LockMutex(is->pictq_mutex);
            while (!vp->allocated && !is->videoq.abort_request) {
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
            }
            SDL_UnlockMutex(is->pictq_mutex);
    
            if (is->videoq.abort_request)
                return -1;
        }
    
    
        /* if the frame is not skipped, then display it */
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        if (vp->bmp) {
    
            AVPicture pict = { { 0 } };
    
    #if CONFIG_AVFILTER
    
            avfilter_unref_bufferp(&vp->picref);
    
            vp->picref = src_frame->opaque;
    #endif
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            /* get a pointer on the bitmap */
            SDL_LockYUVOverlay (vp->bmp);
    
            pict.data[0] = vp->bmp->pixels[0];
            pict.data[1] = vp->bmp->pixels[2];
            pict.data[2] = vp->bmp->pixels[1];
    
            pict.linesize[0] = vp->bmp->pitches[0];
            pict.linesize[1] = vp->bmp->pitches[2];
            pict.linesize[2] = vp->bmp->pitches[1];
    
    
    #if CONFIG_AVFILTER
            pict_src.data[0] = src_frame->data[0];
            pict_src.data[1] = src_frame->data[1];
            pict_src.data[2] = src_frame->data[2];
    
            pict_src.linesize[0] = src_frame->linesize[0];
            pict_src.linesize[1] = src_frame->linesize[1];
            pict_src.linesize[2] = src_frame->linesize[2];
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            // FIXME use direct rendering
    
            av_picture_copy(&pict, &pict_src,
                            vp->pix_fmt, vp->width, vp->height);
    #else
    
            sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
    
            is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
    
                vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
    
                dst_pix_fmt, sws_flags, NULL, NULL, NULL);
    
            if (is->img_convert_ctx == NULL) {
    
                fprintf(stderr, "Cannot initialize the conversion context\n");
                exit(1);
            }
    
            sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
    
                      0, vp->height, pict.data, pict.linesize);
    #endif
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            /* update the bitmap content */
            SDL_UnlockYUVOverlay(vp->bmp);
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
            /* now we can update the picture count */
            if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
                is->pictq_windex = 0;
            SDL_LockMutex(is->pictq_mutex);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            vp->target_clock = compute_target_time(vp->pts, is);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            is->pictq_size++;
            SDL_UnlockMutex(is->pictq_mutex);
        }
    
    /* Compute the exact PTS for the picture if it is omitted in the stream.
     * The 'pts1' parameter is the dts of the packet / pts of the frame. */
    
    static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        if (pts != 0) {
    
            /* update video clock with pts, if present */
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            is->video_clock = pts;
        } else {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            pts = is->video_clock;
        }
        /* update video clock for next frame */
    
        frame_delay = av_q2d(is->video_st->codec->time_base);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        /* for MPEG2, the frame can be repeated, so we update the
           clock accordingly */
    
        frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        is->video_clock += frame_delay;
    
    static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
    
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
            return -1;
    
        if (pkt->data == flush_pkt.data) {
            avcodec_flush_buffers(is->video_st->codec);
    
            SDL_LockMutex(is->pictq_mutex);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
    
            for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
                is->pictq[i].target_clock= 0;
            }
            while (is->pictq_size && !is->videoq.abort_request) {
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
    
            is->video_current_pos = -1;
            SDL_UnlockMutex(is->pictq_mutex);
    
            init_pts_correction(&is->pts_ctx);
            is->frame_last_pts = AV_NOPTS_VALUE;
            is->frame_last_delay = 0;
            is->frame_timer = (double)av_gettime() / 1000000.0;
            is->skip_frames = 1;
            is->skip_frames_index = 0;
            return 0;
        }
    
        avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
    
    
        if (got_picture) {
            if (decoder_reorder_pts == -1) {
    
                *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
    
            } else if (decoder_reorder_pts) {
    
                *pts = frame->pkt_dts;
    
            }
    
            if (*pts == AV_NOPTS_VALUE) {
                *pts = 0;
    
            is->skip_frames_index += 1;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            if (is->skip_frames_index >= is->skip_frames) {
    
                is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
                return 1;
            }
    
        }
    
        return 0;
    }
    
    #if CONFIG_AVFILTER
    
    static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
    
    Stefano Sabatini's avatar
    Stefano Sabatini committed
        char sws_flags_str[128];
    
        char buffersrc_args[256];
    
        AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
    
        AVCodecContext *codec = is->video_st->codec;
    
    
    Stefano Sabatini's avatar
    Stefano Sabatini committed
        snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
        graph->scale_sws_opts = av_strdup(sws_flags_str);
    
        snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
                 codec->width, codec->height, codec->pix_fmt,
                 is->video_st->time_base.num, is->video_st->time_base.den,
                 codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
    
    
        if ((ret = avfilter_graph_create_filter(&filt_src,
                                                avfilter_get_by_name("buffer"),
                                                "src", buffersrc_args, NULL,
                                                graph)) < 0)
    
        if ((ret = avfilter_graph_create_filter(&filt_out,
                                                avfilter_get_by_name("buffersink"),
                                                "out", NULL, NULL, graph)) < 0)
    
        if ((ret = avfilter_graph_create_filter(&filt_format,
                                                avfilter_get_by_name("format"),
                                                "format", "yuv420p", NULL, graph)) < 0)
            return ret;
        if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
            return ret;
    
    
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        if (vfilters) {
    
            AVFilterInOut *outputs = avfilter_inout_alloc();
            AVFilterInOut *inputs  = avfilter_inout_alloc();
    
    
            outputs->name    = av_strdup("in");
    
            outputs->pad_idx = 0;
            outputs->next    = NULL;
    
            inputs->name    = av_strdup("out");
    
            inputs->filter_ctx = filt_format;
    
            inputs->pad_idx = 0;
            inputs->next    = NULL;
    
    
            if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
    
            if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
    
        if ((ret = avfilter_graph_config(graph, NULL)) < 0)
    
        is->in_video_filter  = filt_src;
    
        is->out_video_filter = filt_out;
    
        if (codec->codec->capabilities & CODEC_CAP_DR1) {
            is->use_dr1 = 1;
            codec->get_buffer     = codec_get_buffer;
            codec->release_buffer = codec_release_buffer;
            codec->opaque         = &is->buffer_pool;
        }
    
    
        return ret;
    }
    
    #endif  /* CONFIG_AVFILTER */
    
    static int video_thread(void *arg)
    {
    
        AVPacket pkt = { 0 };
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        AVFrame *frame = avcodec_alloc_frame();
    
        int64_t pts_int;
        double pts;
        int ret;
    
    #if CONFIG_AVFILTER
        AVFilterGraph *graph = avfilter_graph_alloc();
    
        AVFilterContext *filt_out = NULL, *filt_in = NULL;
    
        int last_w = is->video_st->codec->width;
        int last_h = is->video_st->codec->height;
    
    
        if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
            goto the_end;
    
        filt_in  = is->in_video_filter;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (;;) {
    
    #endif
            while (is->paused && !is->videoq.abort_request)
                SDL_Delay(10);
    
            ret = get_video_frame(is, frame, &pts_int, &pkt);
            if (ret < 0)
                goto the_end;
    
            if (!ret)
                continue;
    
    
    #if CONFIG_AVFILTER
    
            if (   last_w != is->video_st->codec->width
                || last_h != is->video_st->codec->height) {
                av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
                        is->video_st->codec->width, is->video_st->codec->height);
                avfilter_graph_free(&graph);
                graph = avfilter_graph_alloc();
                if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
                    goto the_end;
    
                filt_out = is->out_video_filter;
                last_w = is->video_st->codec->width;
                last_h = is->video_st->codec->height;
            }
    
    
            frame->pts = pts_int;
            if (is->use_dr1) {
                FrameBuffer      *buf = frame->opaque;
                AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
                                            frame->data, frame->linesize,
                                            AV_PERM_READ | AV_PERM_PRESERVE,
                                            frame->width, frame->height,
                                            frame->format);
    
                avfilter_copy_frame_props(fb, frame);
                fb->buf->priv           = buf;
                fb->buf->free           = filter_release_buffer;
    
                buf->refcount++;
                av_buffersrc_buffer(filt_in, fb);
    
            } else
                av_buffersrc_write_frame(filt_in, frame);
    
            while (ret >= 0) {
                ret = av_buffersink_read(filt_out, &picref);
                if (ret < 0) {
                    ret = 0;
                    break;
                }
    
    
                avfilter_copy_buf_props(frame, picref);
    
    
                tb      = filt_out->inputs[0]->time_base;
    
                pos     = picref->pos;
                frame->opaque = picref;
    
                if (av_cmp_q(tb, is->video_st->time_base)) {
                    av_unused int64_t pts1 = pts_int;
                    pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
                    av_dlog(NULL, "video_thread(): "
                            "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
                            tb.num, tb.den, pts1,
                            is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
                }
                pts = pts_int * av_q2d(is->video_st->time_base);
                ret = output_picture2(is, frame, pts, pos);
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            pts = pts_int * av_q2d(is->video_st->time_base);
    
            ret = output_picture2(is, frame, pts,  pkt.pos);
    
            if (ret < 0)
                goto the_end;
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
     the_end:
    
    #if CONFIG_AVFILTER
    
        avfilter_graph_free(&graph);
    
        av_free_packet(&pkt);
    
        avcodec_free_frame(&frame);
    
    static int subtitle_thread(void *arg)
    {
        VideoState *is = arg;
        SubPicture *sp;
        AVPacket pkt1, *pkt = &pkt1;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (;;) {
    
            while (is->paused && !is->subtitleq.abort_request) {
                SDL_Delay(10);
            }
            if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
                break;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
            if (pkt->data == flush_pkt.data) {
    
                avcodec_flush_buffers(is->subtitle_st->codec);
                continue;
            }
    
            SDL_LockMutex(is->subpq_mutex);
            while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
                   !is->subtitleq.abort_request) {
                SDL_CondWait(is->subpq_cond, is->subpq_mutex);
            }
            SDL_UnlockMutex(is->subpq_mutex);
    
            sp = &is->subpq[is->subpq_windex];
    
           /* NOTE: ipts is the PTS of the _first_ picture beginning in
               this packet, if any */
            pts = 0;
            if (pkt->pts != AV_NOPTS_VALUE)
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
    
            avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
                                     &got_subtitle, pkt);
    
    
            if (got_subtitle && sp->sub.format == 0) {
                sp->pts = pts;
    
                for (i = 0; i < sp->sub.num_rects; i++)
                {
    
                    for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
    
                        RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
    
                        y = RGB_TO_Y_CCIR(r, g, b);
                        u = RGB_TO_U_CCIR(r, g, b, 0);
                        v = RGB_TO_V_CCIR(r, g, b, 0);
    
                        YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
    
                    }
                }
    
                /* now we can update the picture count */
                if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
                    is->subpq_windex = 0;
                SDL_LockMutex(is->subpq_mutex);
                is->subpq_size++;
                SDL_UnlockMutex(is->subpq_mutex);
            }
            av_free_packet(pkt);
        }
        return 0;
    }
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    /* copy samples for viewing in editor window */
    static void update_sample_display(VideoState *is, short *samples, int samples_size)
    {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
        size = samples_size / sizeof(short);
        while (size > 0) {
            len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
            if (len > size)
                len = size;
            memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
            samples += len;
            is->sample_array_index += len;
            if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
                is->sample_array_index = 0;
            size -= len;
        }
    }
    
    /* return the new audio buffer size (samples can be added or deleted
       to get better sync if video or external master clock) */
    
    static int synchronize_audio(VideoState *is, short *samples,
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        double ref_clock;
    
        n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
        /* if not master, then we try to remove or add samples to correct the clock */
        if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
    
             is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
            double diff, avg_diff;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            int wanted_size, min_size, max_size, nb_samples;
    
            ref_clock = get_master_clock(is);
            diff = get_audio_clock(is) - ref_clock;
    
            if (diff < AV_NOSYNC_THRESHOLD) {
                is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
                if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
                    /* not enough measures to have a correct estimate */
                    is->audio_diff_avg_count++;
                } else {
                    /* estimate the A-V difference */
                    avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
    
                    if (fabs(avg_diff) >= is->audio_diff_threshold) {
    
                        wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
    
                        min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
                        max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
                        if (wanted_size < min_size)
                            wanted_size = min_size;
                        else if (wanted_size > max_size)
                            wanted_size = max_size;
    
                        /* add or remove samples to correction the synchro */
                        if (wanted_size < samples_size) {
                            /* remove samples */
                            samples_size = wanted_size;
                        } else if (wanted_size > samples_size) {
                            uint8_t *samples_end, *q;
                            int nb;
    
                            /* add samples */
                            nb = (samples_size - wanted_size);
                            samples_end = (uint8_t *)samples + samples_size - n;
                            q = samples_end + n;
                            while (nb > 0) {
                                memcpy(q, samples_end, n);
                                q += n;
                                nb -= n;
                            }
                            samples_size = wanted_size;
                        }
                    }
    
                    av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
                            diff, avg_diff, samples_size - samples_size1,
                            is->audio_clock, is->video_clock, is->audio_diff_threshold);
    
            } else {
                /* too big difference : may be initial PTS errors, so
                   reset A-V filter */
                is->audio_diff_avg_count = 0;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                is->audio_diff_cum       = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            }
        }
    
        return samples_size;
    }
    
    /* decode one audio frame and returns its uncompressed size */
    
    static int audio_decode_frame(VideoState *is, double *pts_ptr)
    
        AVPacket *pkt_temp = &is->audio_pkt_temp;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        AVPacket *pkt = &is->audio_pkt;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        AVCodecContext *dec = is->audio_st->codec;
    
        int n, len1, data_size, got_frame;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        double pts;
    
        int new_packet = 0;
        int flush_complete = 0;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
        for (;;) {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            /* NOTE: the audio packet can contain several frames */
    
            while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
    
                if (!is->frame) {
                    if (!(is->frame = avcodec_alloc_frame()))
                        return AVERROR(ENOMEM);
                } else
                    avcodec_get_frame_defaults(is->frame);
    
    
                len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                if (len1 < 0) {
                    /* if error, we skip the frame */
    
                    pkt_temp->size = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                    break;
    
                pkt_temp->data += len1;
                pkt_temp->size -= len1;
    
                if (!got_frame) {
    
                    /* stop sending empty packets if the decoder is finished */
                    if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
                        flush_complete = 1;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                    continue;
    
                data_size = av_samples_get_buffer_size(NULL, dec->channels,
                                                       is->frame->nb_samples,
    
                audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
                                 is->frame->channel_layout != is->sdl_channel_layout ||
                                 is->frame->sample_rate    != is->sdl_sample_rate;
    
                resample_changed = is->frame->format         != is->resample_sample_fmt     ||
                                   is->frame->channel_layout != is->resample_channel_layout ||
                                   is->frame->sample_rate    != is->resample_sample_rate;
    
    
                if ((!is->avr && audio_resample) || resample_changed) {
    
                    if (is->avr)
                        avresample_close(is->avr);
                    else if (audio_resample) {
                        is->avr = avresample_alloc_context();
                        if (!is->avr) {
                            fprintf(stderr, "error allocating AVAudioResampleContext\n");
    
                        av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
                        av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
                        av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
    
                        av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
                        av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
                        av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
    
    
                        if ((ret = avresample_open(is->avr)) < 0) {
                            fprintf(stderr, "error initializing libavresample\n");
                            break;
                        }
    
                    is->resample_sample_fmt     = is->frame->format;
                    is->resample_channel_layout = is->frame->channel_layout;
    
                    is->resample_sample_rate    = is->frame->sample_rate;
    
                if (audio_resample) {
                    void *tmp_out;
                    int out_samples, out_size, out_linesize;
                    int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
                    int nb_samples = is->frame->nb_samples;
    
                    out_size = av_samples_get_buffer_size(&out_linesize,
                                                          is->sdl_channels,
                                                          nb_samples,
                                                          is->sdl_sample_fmt, 0);
                    tmp_out = av_realloc(is->audio_buf1, out_size);
                    if (!tmp_out)
    
                        return AVERROR(ENOMEM);
    
                    is->audio_buf1 = tmp_out;
    
                    out_samples = avresample_convert(is->avr,
    
                                                     is->frame->linesize[0],
                                                     is->frame->nb_samples);
                    if (out_samples < 0) {
                        fprintf(stderr, "avresample_convert() failed\n");
    
                    is->audio_buf = is->audio_buf1;
    
                    data_size = out_samples * osize * is->sdl_channels;
    
    Aneesh Dogra's avatar
    Aneesh Dogra committed
                } else {
    
                    is->audio_buf = is->frame->data[0];
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                /* if no pts, then compute it */
                pts = is->audio_clock;
                *pts_ptr = pts;
    
                n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
    
                is->audio_clock += (double)data_size /
    
                    (double)(n * is->sdl_sample_rate);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                {
                    static double last_clock;
                    printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
                           is->audio_clock - last_clock,
                           is->audio_clock, pts);
                    last_clock = is->audio_clock;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    #endif
                return data_size;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            /* free the current packet */
            if (pkt->data)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                av_free_packet(pkt);