Skip to content
Snippets Groups Projects
ffplay.c 98 KiB
Newer Older
  • Learn to ignore specific revisions
  •     double pts;
        int hw_buf_size, bytes_per_sec;
        pts = is->audio_clock;
        hw_buf_size = audio_write_get_buf_size(is);
        bytes_per_sec = 0;
        if (is->audio_st) {
    
            bytes_per_sec = is->audio_st->codec->sample_rate *
    
        }
        if (bytes_per_sec)
            pts -= (double)hw_buf_size / bytes_per_sec;
        return pts;
    }
    
    /* get the current video clock value */
    static double get_video_clock(VideoState *is)
    {
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        if (is->paused) {
    
            return is->video_current_pts;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        } else {
    
            return is->video_current_pts_drift + av_gettime() / 1000000.0;
    
    }
    
    /* get the current external clock value */
    static double get_external_clock(VideoState *is)
    {
        int64_t ti;
        ti = av_gettime();
        return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
    }
    
    /* get the current master clock value */
    static double get_master_clock(VideoState *is)
    {
        double val;
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
            if (is->video_st)
                val = get_video_clock(is);
            else
                val = get_audio_clock(is);
        } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
            if (is->audio_st)
                val = get_audio_clock(is);
            else
                val = get_video_clock(is);
        } else {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    /* seek in the stream */
    
    static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
    
        if (!is->seek_req) {
            is->seek_pos = pos;
    
            is->seek_rel = rel;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            is->seek_flags &= ~AVSEEK_FLAG_BYTE;
    
            if (seek_by_bytes)
                is->seek_flags |= AVSEEK_FLAG_BYTE;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    }
    
    /* pause or resume the video */
    static void stream_pause(VideoState *is)
    {
    
        if (is->paused) {
            is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
    
            if(is->read_pause_return != AVERROR(ENOSYS)){
    
                is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
    
            is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
    
    static double compute_target_time(double frame_current_pts, VideoState *is)
    
        double delay, sync_threshold, diff;
    
    
        /* compute nominal delay */
        delay = frame_current_pts - is->frame_last_pts;
        if (delay <= 0 || delay >= 10.0) {
            /* if incorrect delay, use previous one */
            delay = is->frame_last_delay;
    
            is->frame_last_delay = delay;
    
        is->frame_last_pts = frame_current_pts;
    
        /* update delay to follow master synchronisation source */
        if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
             is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
            /* if video is slave, we try to correct big delays by
               duplicating or deleting a frame */
    
            diff = get_video_clock(is) - get_master_clock(is);
    
    
            /* skip or repeat frame. We take into account the
               delay to compute the threshold. I still don't know
               if it is the best guess */
            sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
            if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
                if (diff <= -sync_threshold)
                    delay = 0;
                else if (diff >= sync_threshold)
                    delay = 2 * delay;
            }
        }
        is->frame_timer += delay;
    
    #if defined(DEBUG_SYNC)
        printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
                delay, actual_delay, frame_current_pts, -diff);
    #endif
    
    
        return is->frame_timer;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    /* called to display each frame */
    static void video_refresh_timer(void *opaque)
    {
        VideoState *is = opaque;
        VideoPicture *vp;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
        if (is->video_st) {
    
    retry:
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            if (is->pictq_size == 0) {
    
                //nothing to do, no picture to display in the que
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            } else {
    
                double time= av_gettime()/1000000.0;
                double next_target;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                vp = &is->pictq[is->pictq_rindex];
    
                if(time < vp->target_clock)
                    return;
    
                /* update current video pts */
                is->video_current_pts = vp->pts;
    
                is->video_current_pts_drift = is->video_current_pts - time;
    
                if(is->pictq_size > 1){
                    VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
                    assert(nextvp->target_clock >= vp->target_clock);
                    next_target= nextvp->target_clock;
                }else{
                    next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
                }
                if(framedrop && time > next_target){
                    is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
                    if(is->pictq_size > 1 || time > next_target + 0.5){
                        /* update queue size and signal for next picture */
                        if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
                            is->pictq_rindex = 0;
    
                        SDL_LockMutex(is->pictq_mutex);
                        is->pictq_size--;
                        SDL_CondSignal(is->pictq_cond);
                        SDL_UnlockMutex(is->pictq_mutex);
                        goto retry;
                    }
                }
    
                if(is->subtitle_st) {
                    if (is->subtitle_stream_changed) {
                        SDL_LockMutex(is->subpq_mutex);
    
                        while (is->subpq_size) {
                            free_subpicture(&is->subpq[is->subpq_rindex]);
    
                            /* update queue size and signal for next picture */
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
                                is->subpq_rindex = 0;
    
                            is->subpq_size--;
                        }
                        is->subtitle_stream_changed = 0;
    
                        SDL_CondSignal(is->subpq_cond);
                        SDL_UnlockMutex(is->subpq_mutex);
                    } else {
                        if (is->subpq_size > 0) {
                            sp = &is->subpq[is->subpq_rindex];
    
                            if (is->subpq_size > 1)
                                sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
                            else
                                sp2 = NULL;
    
                            if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
                                    || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
                            {
                                free_subpicture(sp);
    
                                /* update queue size and signal for next picture */
                                if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
                                    is->subpq_rindex = 0;
    
                                SDL_LockMutex(is->subpq_mutex);
                                is->subpq_size--;
                                SDL_CondSignal(is->subpq_cond);
                                SDL_UnlockMutex(is->subpq_mutex);
                            }
                        }
                    }
                }
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                /* display picture */
    
                    video_display(is);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                /* update queue size and signal for next picture */
                if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
                    is->pictq_rindex = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                SDL_LockMutex(is->pictq_mutex);
                is->pictq_size--;
                SDL_CondSignal(is->pictq_cond);
                SDL_UnlockMutex(is->pictq_mutex);
            }
        } else if (is->audio_st) {
            /* draw the next audio frame */
    
            /* if only audio stream, then display the audio bars (better
               than nothing, just to test the implementation */
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            /* display picture */
    
                video_display(is);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
        if (show_status) {
            static int64_t last_time;
            int64_t cur_time;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            cur_time = av_gettime();
    
            if (!last_time || (cur_time - last_time) >= 30000) {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                aqsize = 0;
                vqsize = 0;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                if (is->audio_st)
                    aqsize = is->audioq.size;
                if (is->video_st)
                    vqsize = is->videoq.size;
    
                if (is->subtitle_st)
                    sqsize = is->subtitleq.size;
    
                av_diff = 0;
                if (is->audio_st && is->video_st)
                    av_diff = get_audio_clock(is) - get_video_clock(is);
    
                printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
    
                       get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                fflush(stdout);
                last_time = cur_time;
            }
        }
    }
    
    
    static void stream_close(VideoState *is)
    {
        VideoPicture *vp;
        int i;
        /* XXX: use a special url_shutdown call to abort parse cleanly */
        is->abort_request = 1;
        SDL_WaitThread(is->parse_tid, NULL);
        SDL_WaitThread(is->refresh_tid, NULL);
    
        /* free all pictures */
        for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
            vp = &is->pictq[i];
    #if CONFIG_AVFILTER
            if (vp->picref) {
                avfilter_unref_buffer(vp->picref);
                vp->picref = NULL;
            }
    #endif
            if (vp->bmp) {
                SDL_FreeYUVOverlay(vp->bmp);
                vp->bmp = NULL;
            }
        }
        SDL_DestroyMutex(is->pictq_mutex);
        SDL_DestroyCond(is->pictq_cond);
        SDL_DestroyMutex(is->subpq_mutex);
        SDL_DestroyCond(is->subpq_cond);
    #if !CONFIG_AVFILTER
        if (is->img_convert_ctx)
            sws_freeContext(is->img_convert_ctx);
    #endif
        av_free(is);
    }
    
    static void do_exit(void)
    {
        if (cur_stream) {
            stream_close(cur_stream);
            cur_stream = NULL;
        }
    
    #if CONFIG_AVFILTER
        avfilter_uninit();
    #endif
        if (show_status)
            printf("\n");
        SDL_Quit();
    
        av_log(NULL, AV_LOG_QUIET, "");
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    /* allocate a picture (needs to do that in main thread to avoid
       potential locking problems */
    static void alloc_picture(void *opaque)
    {
        VideoState *is = opaque;
        VideoPicture *vp;
    
        vp = &is->pictq[is->pictq_windex];
    
        if (vp->bmp)
            SDL_FreeYUVOverlay(vp->bmp);
    
    
    #if CONFIG_AVFILTER
        if (vp->picref)
    
            avfilter_unref_buffer(vp->picref);
    
        vp->picref = NULL;
    
        vp->width   = is->out_video_filter->inputs[0]->w;
        vp->height  = is->out_video_filter->inputs[0]->h;
        vp->pix_fmt = is->out_video_filter->inputs[0]->format;
    #else
        vp->width   = is->video_st->codec->width;
        vp->height  = is->video_st->codec->height;
        vp->pix_fmt = is->video_st->codec->pix_fmt;
    #endif
    
        vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
    
                                       SDL_YV12_OVERLAY,
    
        if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
            /* SDL allocates a buffer smaller than requested if the video
             * overlay hardware is unable to support the requested size. */
            fprintf(stderr, "Error: the video system does not support an image\n"
    
                            "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
    
                            "to reduce the image size.\n", vp->width, vp->height );
            do_exit();
        }
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
        SDL_LockMutex(is->pictq_mutex);
        vp->allocated = 1;
        SDL_CondSignal(is->pictq_cond);
        SDL_UnlockMutex(is->pictq_mutex);
    }
    
    
    /**
     *
     * @param pts the dts of the pkt / pts of the frame and guessed if not known
     */
    
    static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
        VideoPicture *vp;
        int dst_pix_fmt;
    
    #if CONFIG_AVFILTER
        AVPicture pict_src;
    #endif
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        /* wait until we have space to put a new picture */
        SDL_LockMutex(is->pictq_mutex);
    
    
        if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
            is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
               !is->videoq.abort_request) {
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
        }
        SDL_UnlockMutex(is->pictq_mutex);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        if (is->videoq.abort_request)
            return -1;
    
        vp = &is->pictq[is->pictq_windex];
    
        /* alloc or resize hardware picture buffer */
    
        if (!vp->bmp ||
    
    #if CONFIG_AVFILTER
            vp->width  != is->out_video_filter->inputs[0]->w ||
            vp->height != is->out_video_filter->inputs[0]->h) {
    #else
    
            vp->width != is->video_st->codec->width ||
            vp->height != is->video_st->codec->height) {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            SDL_Event event;
    
            vp->allocated = 0;
    
            /* the allocation must be done in the main thread to avoid
               locking problems */
            event.type = FF_ALLOC_EVENT;
            event.user.data1 = is;
            SDL_PushEvent(&event);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            /* wait until the picture is allocated */
            SDL_LockMutex(is->pictq_mutex);
            while (!vp->allocated && !is->videoq.abort_request) {
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
            }
            SDL_UnlockMutex(is->pictq_mutex);
    
            if (is->videoq.abort_request)
                return -1;
        }
    
    
        /* if the frame is not skipped, then display it */
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        if (vp->bmp) {
    
    #if CONFIG_AVFILTER
            if(vp->picref)
    
                avfilter_unref_buffer(vp->picref);
    
            vp->picref = src_frame->opaque;
    #endif
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            /* get a pointer on the bitmap */
            SDL_LockYUVOverlay (vp->bmp);
    
            dst_pix_fmt = PIX_FMT_YUV420P;
    
            memset(&pict,0,sizeof(AVPicture));
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            pict.data[0] = vp->bmp->pixels[0];
            pict.data[1] = vp->bmp->pixels[2];
            pict.data[2] = vp->bmp->pixels[1];
    
            pict.linesize[0] = vp->bmp->pitches[0];
            pict.linesize[1] = vp->bmp->pitches[2];
            pict.linesize[2] = vp->bmp->pitches[1];
    
    
    #if CONFIG_AVFILTER
            pict_src.data[0] = src_frame->data[0];
            pict_src.data[1] = src_frame->data[1];
            pict_src.data[2] = src_frame->data[2];
    
            pict_src.linesize[0] = src_frame->linesize[0];
            pict_src.linesize[1] = src_frame->linesize[1];
            pict_src.linesize[2] = src_frame->linesize[2];
    
            //FIXME use direct rendering
            av_picture_copy(&pict, &pict_src,
                            vp->pix_fmt, vp->width, vp->height);
    #else
    
            sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
    
            is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
    
                vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
    
                dst_pix_fmt, sws_flags, NULL, NULL, NULL);
    
            if (is->img_convert_ctx == NULL) {
    
                fprintf(stderr, "Cannot initialize the conversion context\n");
                exit(1);
            }
    
            sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
    
                      0, vp->height, pict.data, pict.linesize);
    #endif
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            /* update the bitmap content */
            SDL_UnlockYUVOverlay(vp->bmp);
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
            /* now we can update the picture count */
            if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
                is->pictq_windex = 0;
            SDL_LockMutex(is->pictq_mutex);
    
            vp->target_clock= compute_target_time(vp->pts, is);
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            is->pictq_size++;
            SDL_UnlockMutex(is->pictq_mutex);
        }
    
    /**
     * compute the exact PTS for the picture if it is omitted in the stream
    
     * @param pts1 the dts of the pkt / pts of the frame
     */
    
    static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        if (pts != 0) {
    
            /* update video clock with pts, if present */
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            is->video_clock = pts;
        } else {
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            pts = is->video_clock;
        }
        /* update video clock for next frame */
    
        frame_delay = av_q2d(is->video_st->codec->time_base);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        /* for MPEG2, the frame can be repeated, so we update the
           clock accordingly */
    
        frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        is->video_clock += frame_delay;
    
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
               av_get_pict_type_char(src_frame->pict_type), pts, pts1);
    
    static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
    
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
            return -1;
    
        if (pkt->data == flush_pkt.data) {
            avcodec_flush_buffers(is->video_st->codec);
    
            SDL_LockMutex(is->pictq_mutex);
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
            for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
                is->pictq[i].target_clock= 0;
            }
            while (is->pictq_size && !is->videoq.abort_request) {
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
    
            is->video_current_pos = -1;
            SDL_UnlockMutex(is->pictq_mutex);
    
            init_pts_correction(&is->pts_ctx);
            is->frame_last_pts = AV_NOPTS_VALUE;
            is->frame_last_delay = 0;
            is->frame_timer = (double)av_gettime() / 1000000.0;
            is->skip_frames = 1;
            is->skip_frames_index = 0;
            return 0;
        }
    
        len1 = avcodec_decode_video2(is->video_st->codec,
                                     frame, &got_picture,
                                     pkt);
    
        if (got_picture) {
            if (decoder_reorder_pts == -1) {
    
                *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
    
            } else if (decoder_reorder_pts) {
    
                *pts = frame->pkt_dts;
    
            }
    
            if (*pts == AV_NOPTS_VALUE) {
                *pts = 0;
    
            is->skip_frames_index += 1;
            if(is->skip_frames_index >= is->skip_frames){
                is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
                return 1;
            }
    
        }
    
        return 0;
    }
    
    #if CONFIG_AVFILTER
    typedef struct {
        VideoState *is;
        AVFrame *frame;
    
    static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
    {
        AVFilterContext *ctx = codec->opaque;
    
        if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
            perms |= AV_PERM_NEG_LINESIZES;
    
    
        if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
            if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
            if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
            if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
        }
        if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
    
        w = codec->width;
        h = codec->height;
        avcodec_align_dimensions2(codec, &w, &h, stride);
        edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
        w += edge << 1;
        h += edge << 1;
    
        if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
            return -1;
    
    
        ref->video->w = codec->width;
        ref->video->h = codec->height;
    
            unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
            unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
    
                ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
    
            pic->data[i]     = ref->data[i];
            pic->linesize[i] = ref->linesize[i];
        }
        pic->opaque = ref;
        pic->age    = INT_MAX;
        pic->type   = FF_BUFFER_TYPE_USER;
    
        pic->reordered_opaque = codec->reordered_opaque;
    
        if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
        else           pic->pkt_pts = AV_NOPTS_VALUE;
    
        return 0;
    }
    
    static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
    {
        memset(pic->data, 0, sizeof(pic->data));
    
        avfilter_unref_buffer(pic->opaque);
    
    static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
    {
    
        AVFilterBufferRef *ref = pic->opaque;
    
    
        if (pic->data[0] == NULL) {
            pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
            return codec->get_buffer(codec, pic);
        }
    
    
        if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
    
            (codec->pix_fmt != ref->format)) {
    
            av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
            return -1;
        }
    
        pic->reordered_opaque = codec->reordered_opaque;
    
        if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
        else           pic->pkt_pts = AV_NOPTS_VALUE;
    
    static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
    {
        FilterPriv *priv = ctx->priv;
    
        if(!opaque) return -1;
    
        priv->is = opaque;
    
        codec    = priv->is->video_st->codec;
        codec->opaque = ctx;
        if(codec->codec->capabilities & CODEC_CAP_DR1) {
            priv->use_dr1 = 1;
            codec->get_buffer     = input_get_buffer;
            codec->release_buffer = input_release_buffer;
    
            codec->reget_buffer   = input_reget_buffer;
    
            codec->thread_safe_callbacks = 1;
    
        priv->frame = avcodec_alloc_frame();
    
        return 0;
    }
    
    static void input_uninit(AVFilterContext *ctx)
    {
        FilterPriv *priv = ctx->priv;
        av_free(priv->frame);
    }
    
    static int input_request_frame(AVFilterLink *link)
    {
        FilterPriv *priv = link->src->priv;
    
        AVPacket pkt;
        int ret;
    
        while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
            av_free_packet(&pkt);
        if (ret < 0)
            return -1;
    
    
            picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
    
    Bobby Bingham's avatar
    Bobby Bingham committed
            picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
    
            av_image_copy(picref->data, picref->linesize,
    
    Stefano Sabatini's avatar
    Stefano Sabatini committed
                          priv->frame->data, priv->frame->linesize,
                          picref->format, link->w, link->h);
    
        av_free_packet(&pkt);
    
        picref->pts = pts;
    
        picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
    
        avfilter_start_frame(link, picref);
    
        avfilter_draw_slice(link, 0, link->h, 1);
        avfilter_end_frame(link);
    
        return 0;
    }
    
    static int input_query_formats(AVFilterContext *ctx)
    {
        FilterPriv *priv = ctx->priv;
        enum PixelFormat pix_fmts[] = {
            priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
        };
    
        avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
        return 0;
    }
    
    static int input_config_props(AVFilterLink *link)
    {
        FilterPriv *priv  = link->src->priv;
        AVCodecContext *c = priv->is->video_st->codec;
    
        link->w = c->width;
        link->h = c->height;
    
        link->time_base = priv->is->video_st->time_base;
    
    
        return 0;
    }
    
    static AVFilter input_filter =
    {
        .name      = "ffplay_input",
    
        .priv_size = sizeof(FilterPriv),
    
        .init      = input_init,
        .uninit    = input_uninit,
    
        .query_formats = input_query_formats,
    
        .inputs    = (AVFilterPad[]) {{ .name = NULL }},
        .outputs   = (AVFilterPad[]) {{ .name = "default",
    
                                        .request_frame = input_request_frame,
                                        .config_props  = input_config_props, },
                                      { .name = NULL }},
    };
    
    
    static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
    
    Stefano Sabatini's avatar
    Stefano Sabatini committed
        char sws_flags_str[128];
    
        FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
    
        AVFilterContext *filt_src = NULL, *filt_out = NULL;
    
    Stefano Sabatini's avatar
    Stefano Sabatini committed
        snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
        graph->scale_sws_opts = av_strdup(sws_flags_str);
    
        if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
                                                NULL, is, graph)) < 0)
    
            goto the_end;
    
        if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
                                                NULL, &ffsink_ctx, graph)) < 0)
    
            goto the_end;
    
    
        if(vfilters) {
            AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
            AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
    
            outputs->name    = av_strdup("in");
    
            outputs->pad_idx = 0;
            outputs->next    = NULL;
    
            inputs->name    = av_strdup("out");
    
            inputs->pad_idx = 0;
            inputs->next    = NULL;
    
    
            if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
    
                goto the_end;
            av_freep(&vfilters);
        } else {
    
            if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
                goto the_end;
    
        if ((ret = avfilter_graph_config(graph, NULL)) < 0)
    
            goto the_end;
    
    
        is->out_video_filter = filt_out;
    
    the_end:
        return ret;
    }
    
    #endif  /* CONFIG_AVFILTER */
    
    static int video_thread(void *arg)
    {
        VideoState *is = arg;
        AVFrame *frame= avcodec_alloc_frame();
        int64_t pts_int;
        double pts;
        int ret;
    
    #if CONFIG_AVFILTER
        AVFilterGraph *graph = avfilter_graph_alloc();
        AVFilterContext *filt_out = NULL;
        int64_t pos;
    
        if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
            goto the_end;
        filt_out = is->out_video_filter;
    
    #endif
    
        for(;;) {
    #if !CONFIG_AVFILTER
            AVPacket pkt;
    
    #endif
            while (is->paused && !is->videoq.abort_request)
                SDL_Delay(10);
    #if CONFIG_AVFILTER
    
            ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
            if (picref) {
                pts_int = picref->pts;
                pos     = picref->pos;
                frame->opaque = picref;
            }
    
    
            if (av_cmp_q(tb, is->video_st->time_base)) {
    
                av_unused int64_t pts1 = pts_int;
    
                pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
    
                av_dlog(NULL, "video_thread(): "
                        "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
                        tb.num, tb.den, pts1,
                        is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
    
    #else
            ret = get_video_frame(is, frame, &pts_int, &pkt);
    #endif
    
            if (ret < 0) goto the_end;
    
            if (!ret)
                continue;
    
    
            pts = pts_int*av_q2d(is->video_st->time_base);
    
    
    #if CONFIG_AVFILTER
    
            ret = output_picture2(is, frame, pts, pos);
    
            ret = output_picture2(is, frame, pts,  pkt.pos);
    
            av_free_packet(&pkt);
    #endif
            if (ret < 0)
                goto the_end;
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
     the_end:
    
    #if CONFIG_AVFILTER
    
        avfilter_graph_free(&graph);
    
        av_free(frame);
    
    static int subtitle_thread(void *arg)
    {
        VideoState *is = arg;
        SubPicture *sp;
        AVPacket pkt1, *pkt = &pkt1;
        int len1, got_subtitle;
        double pts;
        int i, j;
        int r, g, b, y, u, v, a;
    
        for(;;) {
            while (is->paused && !is->subtitleq.abort_request) {
                SDL_Delay(10);
            }
            if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
                break;
    
            if(pkt->data == flush_pkt.data){
                avcodec_flush_buffers(is->subtitle_st->codec);
                continue;
            }
    
            SDL_LockMutex(is->subpq_mutex);
            while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
                   !is->subtitleq.abort_request) {
                SDL_CondWait(is->subpq_cond, is->subpq_mutex);
            }
            SDL_UnlockMutex(is->subpq_mutex);
    
            if (is->subtitleq.abort_request)
                goto the_end;
    
            sp = &is->subpq[is->subpq_windex];
    
           /* NOTE: ipts is the PTS of the _first_ picture beginning in
               this packet, if any */
            pts = 0;
            if (pkt->pts != AV_NOPTS_VALUE)
                pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
    
    
            len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
    
                                        &sp->sub, &got_subtitle,
    
    //            if (len1 < 0)
    //                break;
            if (got_subtitle && sp->sub.format == 0) {
                sp->pts = pts;
    
                for (i = 0; i < sp->sub.num_rects; i++)
                {
    
                    for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
    
                        RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
    
                        y = RGB_TO_Y_CCIR(r, g, b);
                        u = RGB_TO_U_CCIR(r, g, b, 0);
                        v = RGB_TO_V_CCIR(r, g, b, 0);
    
                        YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
    
                    }
                }
    
                /* now we can update the picture count */
                if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
                    is->subpq_windex = 0;
                SDL_LockMutex(is->subpq_mutex);
                is->subpq_size++;
                SDL_UnlockMutex(is->subpq_mutex);
            }
            av_free_packet(pkt);
    
    //            if (cur_stream)
    //                stream_pause(cur_stream);
        }
     the_end:
        return 0;
    }
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    /* copy samples for viewing in editor window */
    static void update_sample_display(VideoState *is, short *samples, int samples_size)
    {
        int size, len, channels;
    
    
        channels = is->audio_st->codec->channels;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
        size = samples_size / sizeof(short);
        while (size > 0) {
            len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
            if (len > size)
                len = size;
            memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
            samples += len;
            is->sample_array_index += len;
            if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
                is->sample_array_index = 0;
            size -= len;
        }
    }
    
    /* return the new audio buffer size (samples can be added or deleted
       to get better sync if video or external master clock) */
    
    static int synchronize_audio(VideoState *is, short *samples,
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        double ref_clock;
    
        n = 2 * is->audio_st->codec->channels;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
        /* if not master, then we try to remove or add samples to correct the clock */
        if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
    
             is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
            double diff, avg_diff;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            int wanted_size, min_size, max_size, nb_samples;
    
            ref_clock = get_master_clock(is);
            diff = get_audio_clock(is) - ref_clock;
    
            if (diff < AV_NOSYNC_THRESHOLD) {
                is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
                if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
                    /* not enough measures to have a correct estimate */
                    is->audio_diff_avg_count++;
                } else {
                    /* estimate the A-V difference */
                    avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
    
                    if (fabs(avg_diff) >= is->audio_diff_threshold) {