Skip to content
Snippets Groups Projects
utils.c 128 KiB
Newer Older
  • Learn to ignore specific revisions
  •     if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
            /* this will estimate bitrate based on this frame's duration and size */
            offset = av_rescale(pc->offset, pkt->duration, pkt->size);
            if(pkt->pts != AV_NOPTS_VALUE)
                pkt->pts += offset;
            if(pkt->dts != AV_NOPTS_VALUE)
                pkt->dts += offset;
        }
    
    
        if (pc && pc->dts_sync_point >= 0) {
            // we have synchronization info from the parser
            int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
            if (den > 0) {
                int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
                if (pkt->dts != AV_NOPTS_VALUE) {
                    // got DTS from the stream, update reference timestamp
                    st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
                    pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
                } else if (st->reference_dts != AV_NOPTS_VALUE) {
                    // compute DTS based on reference timestamp
                    pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
                    pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
                }
                if (pc->dts_sync_point > 0)
                    st->reference_dts = pkt->dts; // new reference
            }
        }
    
    
    Diego Biurrun's avatar
    Diego Biurrun committed
        /* This may be redundant, but it should not hurt. */
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
            presentation_delayed = 1;
    
    //    av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
    
        /* interpolate PTS and DTS if they are not present */
    
        //We skip H264 currently because delay and has_b_frames are not reliably set
        if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            if (presentation_delayed) {
    
                /* DTS = decompression timestamp */
                /* PTS = presentation timestamp */
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                if (pkt->dts == AV_NOPTS_VALUE)
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                    pkt->dts = st->last_IP_pts;
    
                update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                if (pkt->dts == AV_NOPTS_VALUE)
                    pkt->dts = st->cur_dts;
    
                /* this is tricky: the dts must be incremented by the duration
    
                of the frame we are displaying, i.e. the last I- or P-frame */
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                if (st->last_IP_duration == 0)
                    st->last_IP_duration = pkt->duration;
    
                if(pkt->dts != AV_NOPTS_VALUE)
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                    st->cur_dts = pkt->dts + st->last_IP_duration;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                st->last_IP_duration  = pkt->duration;
                st->last_IP_pts= pkt->pts;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                /* cannot compute PTS if not present (we can compute it only
    
                by knowing the future */
    
            } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
                    int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
                    int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
                    if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
                        pkt->pts += pkt->duration;
        //                av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
                    }
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                /* presentation is not delayed : PTS and DTS are the same */
                if(pkt->pts == AV_NOPTS_VALUE)
                    pkt->pts = pkt->dts;
    
                update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                if(pkt->pts == AV_NOPTS_VALUE)
                    pkt->pts = st->cur_dts;
                pkt->dts = pkt->pts;
    
                if(pkt->pts != AV_NOPTS_VALUE)
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                    st->cur_dts = pkt->pts + pkt->duration;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            }
    
        if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
    
            st->pts_buffer[0]= pkt->pts;
            for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
                FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
            if(pkt->dts == AV_NOPTS_VALUE)
                pkt->dts= st->pts_buffer[0];
    
            if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
    
                update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
    
            if(pkt->dts > st->cur_dts)
                st->cur_dts = pkt->dts;
        }
    
    //    av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
    
            pkt->flags |= AV_PKT_FLAG_KEY;
    
            /* keyframe computation */
    
                pkt->flags |= AV_PKT_FLAG_KEY;
    
            else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
    
                pkt->flags |= AV_PKT_FLAG_KEY;
    
        if (pc)
            pkt->convergence_duration = pc->convergence_duration;
    
    static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
    
        for(;;) {
            /* select current input stream component */
            st = s->cur_st;
            if (st) {
    
                    /* no parsing needed: we just output the packet as is */
                    /* raw data support */
    
                    *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
    
                    compute_pkt_fields(s, st, NULL, pkt);
                    s->cur_st = NULL;
    
                    if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
    
                        (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
    
                        ff_reduce_index(s, st->index);
                        av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
                    }
    
                } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
    
                    len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
                                           st->cur_ptr, st->cur_len,
                                           st->cur_pkt.pts, st->cur_pkt.dts,
                                           st->cur_pkt.pos);
    
                    st->cur_pkt.pts = AV_NOPTS_VALUE;
                    st->cur_pkt.dts = AV_NOPTS_VALUE;
    
                    /* increment read pointer */
    
                    /* return packet if any */
                    if (pkt->size) {
    
                        pkt->duration = 0;
                        pkt->stream_index = st->index;
    
                        pkt->pts = st->parser->pts;
                        pkt->dts = st->parser->dts;
    
                        if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
                            s->cur_st = NULL;
                            pkt->destruct= st->cur_pkt.destruct;
    
                            st->cur_pkt.data    = NULL;
                            assert(st->cur_len == 0);
                        }else{
    
                            pkt->destruct = NULL;
    
                        compute_pkt_fields(s, st, st->parser, pkt);
    
                        if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
    
                            ff_reduce_index(s, st->index);
    
                            av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
                                               0, 0, AVINDEX_KEYFRAME);
                        }
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
                    /* free packet */
    
                        return ret;
                    /* return the last frames, if any */
                    for(i = 0; i < s->nb_streams; i++) {
                        st = s->streams[i];
    
                                            &pkt->data, &pkt->size,
                                            NULL, 0,
    
                                            AV_NOPTS_VALUE, AV_NOPTS_VALUE,
                                            AV_NOPTS_VALUE);
    
                    /* no more packets: really terminate parsing */
    
                st = s->streams[cur_pkt.stream_index];
                st->cur_pkt= cur_pkt;
    
                if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
                   st->cur_pkt.dts != AV_NOPTS_VALUE &&
                   st->cur_pkt.pts < st->cur_pkt.dts){
    
                    av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
    
                        st->cur_pkt.stream_index,
                        st->cur_pkt.pts,
                        st->cur_pkt.dts,
                        st->cur_pkt.size);
    //                av_free_packet(&st->cur_pkt);
    
                if(s->debug & FF_FDEBUG_TS)
    
                    av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
    
                        st->cur_pkt.stream_index,
                        st->cur_pkt.pts,
                        st->cur_pkt.dts,
                        st->cur_pkt.size,
    
                st->cur_ptr = st->cur_pkt.data;
                st->cur_len = st->cur_pkt.size;
    
                if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
    
                    st->parser = av_parser_init(st->codec->codec_id);
    
                        /* no parser available: just output the raw packets */
    
                        st->need_parsing = AVSTREAM_PARSE_NONE;
                    }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
    
                        st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
    
                    }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
                        st->parser->flags |= PARSER_FLAG_ONCE;
    
        if(s->debug & FF_FDEBUG_TS)
    
            av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
    
                pkt->stream_index,
                pkt->pts,
                pkt->dts,
    
    }
    
    int av_read_frame(AVFormatContext *s, AVPacket *pkt)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
        AVPacketList *pktl;
    
        int eof=0;
        const int genpts= s->flags & AVFMT_FLAG_GENPTS;
    
        for(;;){
            pktl = s->packet_buffer;
            if (pktl) {
                AVPacket *next_pkt= &pktl->pkt;
    
                if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
    
                    int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
    
                    while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
    
                        if(   pktl->pkt.stream_index == next_pkt->stream_index
    
                           && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
                           && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
    
                            next_pkt->pts= pktl->pkt.dts;
                        }
                        pktl= pktl->next;
                    }
                    pktl = s->packet_buffer;
                }
    
    
                if(   next_pkt->pts != AV_NOPTS_VALUE
                   || next_pkt->dts == AV_NOPTS_VALUE
    
                   || !genpts || eof){
                    /* read packet from packet buffer, if there is data */
                    *pkt = *next_pkt;
                    s->packet_buffer = pktl->next;
                    av_free(pktl);
                    return 0;
                }
            }
            if(genpts){
    
                int ret= read_frame_internal(s, pkt);
    
                    if(pktl && ret != AVERROR(EAGAIN)){
    
                if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
                                               &s->packet_buffer_end)) < 0)
    
            }else{
                assert(!s->packet_buffer);
    
                return read_frame_internal(s, pkt);
    
        }
    }
    
    /* XXX: suppress the packet queue */
    static void flush_packet_queue(AVFormatContext *s)
    {
        AVPacketList *pktl;
    
        for(;;) {
            pktl = s->packet_buffer;
    
                break;
            s->packet_buffer = pktl->next;
            av_free_packet(&pktl->pkt);
            av_free(pktl);
    
        while(s->raw_packet_buffer){
            pktl = s->raw_packet_buffer;
            s->raw_packet_buffer = pktl->next;
            av_free_packet(&pktl->pkt);
            av_free(pktl);
        }
    
        s->packet_buffer_end=
        s->raw_packet_buffer_end= NULL;
    
        s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
    
    /*******************************************************/
    /* seek support */
    
    
    int av_find_default_stream_index(AVFormatContext *s)
    {
    
        int i;
        AVStream *st;
    
        if (s->nb_streams <= 0)
            return -1;
        for(i = 0; i < s->nb_streams; i++) {
            st = s->streams[i];
    
            if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
    
            if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
    
        return first_audio_index >= 0 ? first_audio_index : 0;
    
    void ff_read_frame_flush(AVFormatContext *s)
    
        /* for each stream, reset read state */
        for(i = 0; i < s->nb_streams; i++) {
            st = s->streams[i];
    
            if (st->parser) {
                av_parser_close(st->parser);
                st->parser = NULL;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            st->last_IP_pts = AV_NOPTS_VALUE;
    
            st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
    
            st->reference_dts = AV_NOPTS_VALUE;
    
            /* fail safe */
            st->cur_ptr = NULL;
            st->cur_len = 0;
    
    
            for(j=0; j<MAX_REORDER_DELAY+1; j++)
                st->pts_buffer[j]= AV_NOPTS_VALUE;
    
    #if FF_API_SEEK_PUBLIC
    void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
    {
    
        ff_update_cur_dts(s, ref_st, timestamp);
    
    }
    #endif
    
    void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
    {
    
        int i;
    
        for(i = 0; i < s->nb_streams; i++) {
    
            st->cur_dts = av_rescale(timestamp,
    
                                     st->time_base.den * (int64_t)ref_st->time_base.num,
                                     st->time_base.num * (int64_t)ref_st->time_base.den);
    
    void ff_reduce_index(AVFormatContext *s, int stream_index)
    {
        AVStream *st= s->streams[stream_index];
        unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
    
        if((unsigned)st->nb_index_entries >= max_entries){
            int i;
            for(i=0; 2*i<st->nb_index_entries; i++)
                st->index_entries[i]= st->index_entries[2*i];
            st->nb_index_entries= i;
        }
    }
    
    
    int ff_add_index_entry(AVIndexEntry **index_entries,
                           int *nb_index_entries,
                           unsigned int *index_entries_allocated_size,
                           int64_t pos, int64_t timestamp, int size, int distance, int flags)
    
    {
        AVIndexEntry *entries, *ie;
    
        if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
    
        entries = av_fast_realloc(*index_entries,
                                  index_entries_allocated_size,
                                  (*nb_index_entries + 1) *
    
        *index_entries= entries;
    
        index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
    
        if(index<0){
    
            index= (*nb_index_entries)++;
    
            assert(index==0 || ie[-1].timestamp < timestamp);
        }else{
            ie= &entries[index];
            if(ie->timestamp != timestamp){
    
                if(ie->timestamp <= timestamp)
                    return -1;
    
                memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
                (*nb_index_entries)++;
    
    Diego Biurrun's avatar
    Diego Biurrun committed
            }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
    
                distance= ie->min_distance;
    
        ie->pos = pos;
        ie->timestamp = timestamp;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        ie->size= size;
    
    int av_add_index_entry(AVStream *st,
                           int64_t pos, int64_t timestamp, int size, int distance, int flags)
    {
        return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
                                  &st->index_entries_allocated_size, pos,
                                  timestamp, size, distance, flags);
    }
    
    int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
                                  int64_t wanted_timestamp, int flags)
    
        a = - 1;
        b = nb_entries;
    
        //optimize appending index entries at the end
        if(b && entries[b-1].timestamp < wanted_timestamp)
            a= b-1;
    
    
        while (b - a > 1) {
            m = (a + b) >> 1;
    
            timestamp = entries[m].timestamp;
    
            if(timestamp >= wanted_timestamp)
                b = m;
            if(timestamp <= wanted_timestamp)
    
        m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
    
        if(!(flags & AVSEEK_FLAG_ANY)){
            while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
                m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
            }
        }
    
        if(m == nb_entries)
    
            return -1;
        return  m;
    
    int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
                                  int flags)
    {
        return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
                                         wanted_timestamp, flags);
    }
    
    
    #if FF_API_SEEK_PUBLIC
    
    int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
    
        return ff_seek_frame_binary(s, stream_index, target_ts, flags);
    }
    #endif
    
    int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
    {
    
        int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
    
        av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
    
    Diego Biurrun's avatar
    Diego Biurrun committed
        pos_limit= -1; //gcc falsely says it may be uninitialized
    
    
        st= s->streams[stream_index];
        if(st->index_entries){
            AVIndexEntry *e;
    
    
            index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
    
            index= FFMAX(index, 0);
    
            e= &st->index_entries[index];
    
            if(e->timestamp <= target_ts || e->pos == e->min_distance){
                pos_min= e->pos;
                ts_min= e->timestamp;
    
                av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
                        pos_min,ts_min);
    
    
            index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
    
            assert(index < st->nb_index_entries);
            if(index >= 0){
    
                e= &st->index_entries[index];
                assert(e->timestamp >= target_ts);
                pos_max= e->pos;
                ts_max= e->timestamp;
                pos_limit= pos_max - e->min_distance;
    
                av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
                        pos_max,pos_limit, ts_max);
    
        pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
    
        if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
    
        ff_update_cur_dts(s, st, ts);
    
    #if FF_API_SEEK_PUBLIC
    int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
                          int64_t pos_min, int64_t pos_max, int64_t pos_limit,
                          int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
                          int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
    {
        return ff_gen_search(s, stream_index, target_ts, pos_min, pos_max,
                             pos_limit, ts_min, ts_max, flags, ts_ret,
                             read_timestamp);
    }
    #endif
    
    int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
                          int64_t pos_min, int64_t pos_max, int64_t pos_limit,
                          int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
                          int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
    {
    
        av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
    
        if(ts_min == AV_NOPTS_VALUE){
            pos_min = s->data_offset;
    
            ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
    
            if (ts_min == AV_NOPTS_VALUE)
                return -1;
        }
    
        if(ts_max == AV_NOPTS_VALUE){
            int step= 1024;
    
            filesize = avio_size(s->pb);
    
                ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
    
                step += step;
            }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
            if (ts_max == AV_NOPTS_VALUE)
                return -1;
    
                int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
    
                if(tmp_ts == AV_NOPTS_VALUE)
                    break;
                ts_max= tmp_ts;
                pos_max= tmp_pos;
    
        if(ts_min > ts_max){
            return -1;
        }else if(ts_min == ts_max){
            pos_limit= pos_min;
        }
    
    
            av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
                    pos_min, pos_max, ts_min, ts_max);
    
            assert(pos_limit <= pos_max);
    
            if(no_change==0){
                int64_t approximate_keyframe_distance= pos_max - pos_limit;
                // interpolate position (better than dichotomy)
    
                pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
                    + pos_min - approximate_keyframe_distance;
    
            }else if(no_change==1){
                // bisection, if interpolation failed to change min or max pos last time
                pos = (pos_min + pos_limit)>>1;
            }else{
    
                /* linear search if bisection failed, can only happen if there
                   are very few or no keyframes between min/max */
    
                pos=pos_min;
            }
            if(pos <= pos_min)
                pos= pos_min + 1;
            else if(pos > pos_limit)
                pos= pos_limit;
            start_pos= pos;
    
    
            ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
    
            av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
                    pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
                    pos_limit, start_pos, no_change);
    
            if(ts == AV_NOPTS_VALUE){
                av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
                return -1;
            }
    
            if (target_ts <= ts) {
    
                pos_limit = start_pos - 1;
                pos_max = pos;
                ts_max = ts;
    
            }
            if (target_ts >= ts) {
    
        pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
        ts  = (flags & AVSEEK_FLAG_BACKWARD) ?  ts_min :  ts_max;
    
        ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
    
        ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
    
        av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
                pos, ts_min, target_ts, ts_max);
    
    static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
    
        int64_t pos_min, pos_max;
    #if 0
        AVStream *st;
    
        if (stream_index < 0)
            return -1;
    
        st= s->streams[stream_index];
    #endif
    
        pos_min = s->data_offset;
    
        pos_max = avio_size(s->pb) - 1;
    
    
        if     (pos < pos_min) pos= pos_min;
        else if(pos > pos_max) pos= pos_max;
    
    
        avio_seek(s->pb, pos, SEEK_SET);
    
        av_update_cur_dts(s, st, ts);
    
    static int seek_frame_generic(AVFormatContext *s,
    
                                     int stream_index, int64_t timestamp, int flags)
    
        AVStream *st;
        AVIndexEntry *ie;
    
        st = s->streams[stream_index];
    
        index = av_index_search_timestamp(st, timestamp, flags);
    
        if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
            return -1;
    
    
        if(index < 0 || index==st->nb_index_entries-1){
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            if(st->nb_index_entries){
                assert(st->index_entries);
    
                ie= &st->index_entries[st->nb_index_entries-1];
    
                if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
    
                ff_update_cur_dts(s, st, ie->timestamp);
    
                if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
    
                int read_status;
    
                    read_status = av_read_frame(s, &pkt);
                } while (read_status == AVERROR(EAGAIN));
                if (read_status < 0)
    
                    break;
                av_free_packet(&pkt);
                if(stream_index == pkt.stream_index){
    
                    if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
    
                        break;
                }
            }
            index = av_index_search_timestamp(st, timestamp, flags);
        }
    
        ff_read_frame_flush(s);
    
        if (s->iformat->read_seek){
            if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
                return 0;
        }
        ie = &st->index_entries[index];
    
        if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
    
        ff_update_cur_dts(s, st, ie->timestamp);
    
    int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
    
            if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
                return -1;
    
            return seek_frame_byte(s, stream_index, timestamp, flags);
    
        if(stream_index < 0){
            stream_index= av_find_default_stream_index(s);
            if(stream_index < 0)
                return -1;
    
            st= s->streams[stream_index];
    
            /* timestamp for default must be expressed in AV_TIME_BASE units */
    
            timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
    
        /* first, we try the format specific seek */
    
        if (s->iformat->read_seek) {
            ff_read_frame_flush(s);
    
            ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
    
        if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
            ff_read_frame_flush(s);
    
            return ff_seek_frame_binary(s, stream_index, timestamp, flags);
    
        } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
            ff_read_frame_flush(s);
    
            return seek_frame_generic(s, stream_index, timestamp, flags);
    
    int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
    {
        if(min_ts > ts || max_ts < ts)
            return -1;
    
    
        if (s->iformat->read_seek2) {
            ff_read_frame_flush(s);
    
            return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
    
    
        if(s->iformat->read_timestamp){
            //try to seek via read_timestamp()
        }
    
        //Fallback to old API if new is not implemented but old is
        //Note the old has somewat different sematics
        if(s->iformat->read_seek || 1)
    
            return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
    
        // try some generic seek like seek_frame_generic() but with new ts semantics
    
    /*******************************************************/
    
     * Return TRUE if the stream has accurate duration in any stream.
    
     * @return TRUE if the stream has accurate duration for at least one component.
    
    static int has_duration(AVFormatContext *ic)
    
    {
        int i;
        AVStream *st;
    
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
    
            if (st->duration != AV_NOPTS_VALUE)
    
    /**
     * Estimate the stream timings from the one of each components.
     *
     * Also computes the global bitrate if possible.
     */
    
    static void update_stream_timings(AVFormatContext *ic)
    
        int64_t start_time, start_time1, end_time, end_time1;
    
        int64_t duration, duration1, filesize;
    
        start_time = INT64_MAX;
        end_time = INT64_MIN;
    
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
    
            if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
    
                start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
    
                start_time = FFMIN(start_time, start_time1);
    
                    end_time1 = start_time1
                              + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
    
                    end_time = FFMAX(end_time, end_time1);
    
            if (st->duration != AV_NOPTS_VALUE) {
                duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
    
                duration = FFMAX(duration, duration1);
    
        if (start_time != INT64_MAX) {
    
            if (end_time != INT64_MIN)
                duration = FFMAX(duration, end_time - start_time);
    
        }
        if (duration != INT64_MIN) {
            ic->duration = duration;
    
            if (ic->pb && (filesize = avio_size(ic->pb)) > 0) {
    
                /* compute the bitrate */
    
                ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
    
            }
        }
    }
    
    static void fill_all_stream_timings(AVFormatContext *ic)
    {
        int i;
        AVStream *st;
    
    
        update_stream_timings(ic);
    
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
            if (st->start_time == AV_NOPTS_VALUE) {
    
                if(ic->start_time != AV_NOPTS_VALUE)
                    st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
                if(ic->duration != AV_NOPTS_VALUE)
                    st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
    
    static void estimate_timings_from_bit_rate(AVFormatContext *ic)
    
    {
        int64_t filesize, duration;
        int bit_rate, i;
        AVStream *st;
    
        /* if bit_rate is already set, we believe it */
    
        if (ic->bit_rate <= 0) {
    
            bit_rate = 0;
            for(i=0;i<ic->nb_streams;i++) {
                st = ic->streams[i];
    
                if (st->codec->bit_rate > 0)
    
            }
            ic->bit_rate = bit_rate;
        }
    
        /* if duration is already set, we believe it */
    
        if (ic->duration == AV_NOPTS_VALUE &&
    
            ic->bit_rate != 0) {
            filesize = ic->pb ? avio_size(ic->pb) : 0;
    
            if (filesize > 0) {
                for(i = 0; i < ic->nb_streams; i++) {
                    st = ic->streams[i];
    
                    duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
    
                    if (st->duration == AV_NOPTS_VALUE)
    
                        st->duration = duration;
                }
            }
        }
    }
    
    #define DURATION_MAX_READ_SIZE 250000
    
    static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
    
    {
        AVPacket pkt1, *pkt = &pkt1;
        AVStream *st;
        int read_size, i, ret;
    
        ic->cur_st = NULL;
    
        /* flush packet queue */
        flush_packet_queue(ic);
    
    
        for (i=0; i<ic->nb_streams; i++) {
    
            if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
    
                av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
    
            if (st->parser) {
                av_parser_close(st->parser);
                st->parser= NULL;
    
        /* estimate the end time (duration) */
        /* XXX: may need to support wrapping */
    
        filesize = ic->pb ? avio_size(ic->pb) : 0;