Skip to content
Snippets Groups Projects
utils.c 130 KiB
Newer Older
  • Learn to ignore specific revisions
  •             if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
                    duration) {
    
    Justin Ruggles's avatar
    Justin Ruggles committed
                    /* presentation is not delayed : PTS and DTS are the same */
                    if (pkt->pts == AV_NOPTS_VALUE)
                        pkt->pts = pkt->dts;
                    update_initial_timestamps(s, pkt->stream_index, pkt->pts,
                                              pkt->pts);
                    if (pkt->pts == AV_NOPTS_VALUE)
                        pkt->pts = st->cur_dts;
                    pkt->dts = pkt->pts;
                    if (pkt->pts != AV_NOPTS_VALUE)
                        st->cur_dts = pkt->pts + duration;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            }
    
        if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
    
            st->pts_buffer[0]= pkt->pts;
            for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
                FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
            if(pkt->dts == AV_NOPTS_VALUE)
                pkt->dts= st->pts_buffer[0];
    
            if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here
    
                update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
    
            if(pkt->dts > st->cur_dts)
                st->cur_dts = pkt->dts;
        }
    
    //    av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
    
            pkt->flags |= AV_PKT_FLAG_KEY;
    
        if (pc)
            pkt->convergence_duration = pc->convergence_duration;
    
    static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
    {
        while (*pkt_buf) {
            AVPacketList *pktl = *pkt_buf;
            *pkt_buf = pktl->next;
            av_free_packet(&pktl->pkt);
            av_freep(&pktl);
        }
        *pkt_buf_end = NULL;
    }
    
    
    /**
     * Parse a packet, add all split parts to parse_queue
     *
     * @param pkt packet to parse, NULL when flushing the parser at end of stream
     */
    static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
    {
        AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
        AVStream     *st = s->streams[stream_index];
        uint8_t    *data = pkt ? pkt->data : NULL;
        int         size = pkt ? pkt->size : 0;
        int ret = 0, got_output = 0;
    
        if (!pkt) {
            av_init_packet(&flush_pkt);
            pkt = &flush_pkt;
            got_output = 1;
        }
    
        while (size > 0 || (pkt == &flush_pkt && got_output)) {
            int len;
    
            av_init_packet(&out_pkt);
            len = av_parser_parse2(st->parser,  st->codec,
                                   &out_pkt.data, &out_pkt.size, data, size,
                                   pkt->pts, pkt->dts, pkt->pos);
    
            pkt->pts = pkt->dts = AV_NOPTS_VALUE;
            /* increment read pointer */
            data += len;
            size -= len;
    
            got_output = !!out_pkt.size;
    
            if (!out_pkt.size)
                continue;
    
            /* set the duration */
            out_pkt.duration = 0;
            if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
                if (st->codec->sample_rate > 0) {
                    out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
                                                        (AVRational){ 1, st->codec->sample_rate },
                                                        st->time_base,
                                                        AV_ROUND_DOWN);
                }
            } else if (st->codec->time_base.num != 0 &&
                       st->codec->time_base.den != 0) {
                out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
                                                    st->codec->time_base,
                                                    st->time_base,
                                                    AV_ROUND_DOWN);
            }
    
            out_pkt.stream_index = st->index;
            out_pkt.pts = st->parser->pts;
            out_pkt.dts = st->parser->dts;
            out_pkt.pos = st->parser->pos;
    
            if (st->parser->key_frame == 1 ||
                (st->parser->key_frame == -1 &&
                 st->parser->pict_type == AV_PICTURE_TYPE_I))
                out_pkt.flags |= AV_PKT_FLAG_KEY;
    
            compute_pkt_fields(s, st, st->parser, &out_pkt);
    
            if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
                out_pkt.flags & AV_PKT_FLAG_KEY) {
                ff_reduce_index(s, st->index);
                av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
                                   0, 0, AVINDEX_KEYFRAME);
            }
    
            if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
                out_pkt.destruct = pkt->destruct;
                pkt->destruct = NULL;
            }
            if ((ret = av_dup_packet(&out_pkt)) < 0)
                goto fail;
    
            if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
                av_free_packet(&out_pkt);
                ret = AVERROR(ENOMEM);
                goto fail;
            }
        }
    
    
        /* end of the stream => close and free the parser */
        if (pkt == &flush_pkt) {
            av_parser_close(st->parser);
            st->parser = NULL;
        }
    
    fail:
        av_free_packet(pkt);
        return ret;
    }
    
    
    static int read_from_packet_buffer(AVPacketList **pkt_buffer,
                                       AVPacketList **pkt_buffer_end,
                                       AVPacket      *pkt)
    {
        AVPacketList *pktl;
        av_assert0(*pkt_buffer);
        pktl = *pkt_buffer;
        *pkt = pktl->pkt;
        *pkt_buffer = pktl->next;
        if (!pktl->next)
            *pkt_buffer_end = NULL;
        av_freep(&pktl);
        return 0;
    }
    
    
    static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
    
        int ret = 0, i, got_packet = 0;
    
        while (!got_packet && !s->parse_queue) {
            AVStream *st;
            AVPacket cur_pkt;
    
            /* read next packet */
    
            ret = ff_read_packet(s, &cur_pkt);
    
            if (ret < 0) {
                if (ret == AVERROR(EAGAIN))
    
                /* flush the parsers */
                for(i = 0; i < s->nb_streams; i++) {
                    st = s->streams[i];
                    if (st->parser && st->need_parsing)
                        parse_packet(s, NULL, st->index);
    
                /* all remaining packets are now in parse_queue =>
                 * really terminate parsing */
                break;
            }
            ret = 0;
            st  = s->streams[cur_pkt.stream_index];
    
            if (cur_pkt.pts != AV_NOPTS_VALUE &&
                cur_pkt.dts != AV_NOPTS_VALUE &&
                cur_pkt.pts < cur_pkt.dts) {
                av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
                       cur_pkt.stream_index,
                       cur_pkt.pts,
                       cur_pkt.dts,
                       cur_pkt.size);
            }
            if (s->debug & FF_FDEBUG_TS)
    
                av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
    
                       cur_pkt.stream_index,
                       cur_pkt.pts,
                       cur_pkt.dts,
                       cur_pkt.size,
                       cur_pkt.duration,
                       cur_pkt.flags);
    
            if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
                st->parser = av_parser_init(st->codec->codec_id);
                if (!st->parser) {
                    /* no parser available: just output the raw packets */
                    st->need_parsing = AVSTREAM_PARSE_NONE;
                } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
                    st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
                } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
                    st->parser->flags |= PARSER_FLAG_ONCE;
    
            if (!st->need_parsing || !st->parser) {
                /* no parsing needed: we just output the packet as is */
                *pkt = cur_pkt;
                compute_pkt_fields(s, st, NULL, pkt);
                if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
                    (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
                    ff_reduce_index(s, st->index);
                    av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
    
                got_packet = 1;
            } else if (st->discard < AVDISCARD_ALL) {
                if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
                    return ret;
            } else {
                /* free packet */
                av_free_packet(&cur_pkt);
    
    
        if (!got_packet && s->parse_queue)
            ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
    
    
        if(s->debug & FF_FDEBUG_TS)
    
            av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
    
                pkt->stream_index,
                pkt->pts,
                pkt->dts,
    
    }
    
    int av_read_frame(AVFormatContext *s, AVPacket *pkt)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
    
        const int genpts = s->flags & AVFMT_FLAG_GENPTS;
        int          eof = 0;
    
            return s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
                                                              &s->packet_buffer_end,
                                                              pkt) :
    
            AVPacketList *pktl = s->packet_buffer;
    
                AVPacket *next_pkt = &pktl->pkt;
    
                if (next_pkt->dts != AV_NOPTS_VALUE) {
    
                    int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
    
                    while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
                        if (pktl->pkt.stream_index == next_pkt->stream_index &&
                            (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
                             av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
                            next_pkt->pts = pktl->pkt.dts;
    
                        pktl = pktl->next;
    
                /* read packet from packet buffer, if there is data */
                if (!(next_pkt->pts == AV_NOPTS_VALUE &&
                      next_pkt->dts != AV_NOPTS_VALUE && !eof))
    
                    return read_from_packet_buffer(&s->packet_buffer,
                                                   &s->packet_buffer_end, pkt);
    
            ret = read_frame_internal(s, pkt);
            if (ret < 0) {
                if (pktl && ret != AVERROR(EAGAIN)) {
                    eof = 1;
                    continue;
                } else
                    return ret;
    
    
            if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
                              &s->packet_buffer_end)) < 0)
                return AVERROR(ENOMEM);
    
        }
    }
    
    /* XXX: suppress the packet queue */
    static void flush_packet_queue(AVFormatContext *s)
    {
    
        free_packet_buffer(&s->parse_queue,       &s->parse_queue_end);
    
        free_packet_buffer(&s->packet_buffer,     &s->packet_buffer_end);
        free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
    
        s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
    
    /*******************************************************/
    /* seek support */
    
    
    int av_find_default_stream_index(AVFormatContext *s)
    {
    
        int i;
        AVStream *st;
    
        if (s->nb_streams <= 0)
            return -1;
        for(i = 0; i < s->nb_streams; i++) {
            st = s->streams[i];
    
            if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
                !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
    
            if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
    
        return first_audio_index >= 0 ? first_audio_index : 0;
    
    void ff_read_frame_flush(AVFormatContext *s)
    
    
        flush_packet_queue(s);
    
        /* for each stream, reset read state */
        for(i = 0; i < s->nb_streams; i++) {
            st = s->streams[i];
    
            if (st->parser) {
                av_parser_close(st->parser);
                st->parser = NULL;
            }
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            st->last_IP_pts = AV_NOPTS_VALUE;
    
            st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
    
            st->reference_dts = AV_NOPTS_VALUE;
    
    
            for(j=0; j<MAX_REORDER_DELAY+1; j++)
                st->pts_buffer[j]= AV_NOPTS_VALUE;
    
    void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
    {
    
        int i;
    
        for(i = 0; i < s->nb_streams; i++) {
    
            st->cur_dts = av_rescale(timestamp,
    
                                     st->time_base.den * (int64_t)ref_st->time_base.num,
                                     st->time_base.num * (int64_t)ref_st->time_base.den);
    
    void ff_reduce_index(AVFormatContext *s, int stream_index)
    {
        AVStream *st= s->streams[stream_index];
        unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
    
        if((unsigned)st->nb_index_entries >= max_entries){
            int i;
            for(i=0; 2*i<st->nb_index_entries; i++)
                st->index_entries[i]= st->index_entries[2*i];
            st->nb_index_entries= i;
        }
    }
    
    
    int ff_add_index_entry(AVIndexEntry **index_entries,
                           int *nb_index_entries,
                           unsigned int *index_entries_allocated_size,
                           int64_t pos, int64_t timestamp, int size, int distance, int flags)
    
    {
        AVIndexEntry *entries, *ie;
    
        if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
    
        entries = av_fast_realloc(*index_entries,
                                  index_entries_allocated_size,
                                  (*nb_index_entries + 1) *
    
        *index_entries= entries;
    
        index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
    
        if(index<0){
    
            index= (*nb_index_entries)++;
    
            assert(index==0 || ie[-1].timestamp < timestamp);
        }else{
            ie= &entries[index];
            if(ie->timestamp != timestamp){
    
                if(ie->timestamp <= timestamp)
                    return -1;
    
                memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
                (*nb_index_entries)++;
    
    Diego Biurrun's avatar
    Diego Biurrun committed
            }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
    
                distance= ie->min_distance;
    
        ie->pos = pos;
        ie->timestamp = timestamp;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        ie->size= size;
    
    int av_add_index_entry(AVStream *st,
                           int64_t pos, int64_t timestamp, int size, int distance, int flags)
    {
        return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
                                  &st->index_entries_allocated_size, pos,
                                  timestamp, size, distance, flags);
    }
    
    int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
                                  int64_t wanted_timestamp, int flags)
    
        a = - 1;
        b = nb_entries;
    
        //optimize appending index entries at the end
        if(b && entries[b-1].timestamp < wanted_timestamp)
            a= b-1;
    
    
        while (b - a > 1) {
            m = (a + b) >> 1;
    
            timestamp = entries[m].timestamp;
    
            if(timestamp >= wanted_timestamp)
                b = m;
            if(timestamp <= wanted_timestamp)
    
        m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
    
        if(!(flags & AVSEEK_FLAG_ANY)){
            while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
                m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
            }
        }
    
        if(m == nb_entries)
    
            return -1;
        return  m;
    
    int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
                                  int flags)
    {
        return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
                                         wanted_timestamp, flags);
    }
    
    
    int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
    {
    
        int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
    
        av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
    
    Diego Biurrun's avatar
    Diego Biurrun committed
        pos_limit= -1; //gcc falsely says it may be uninitialized
    
    
        st= s->streams[stream_index];
        if(st->index_entries){
            AVIndexEntry *e;
    
    
            index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
    
            index= FFMAX(index, 0);
    
            e= &st->index_entries[index];
    
            if(e->timestamp <= target_ts || e->pos == e->min_distance){
                pos_min= e->pos;
                ts_min= e->timestamp;
    
                av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
                        pos_min,ts_min);
    
    
            index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
    
            assert(index < st->nb_index_entries);
            if(index >= 0){
    
                e= &st->index_entries[index];
                assert(e->timestamp >= target_ts);
                pos_max= e->pos;
                ts_max= e->timestamp;
                pos_limit= pos_max - e->min_distance;
    
                av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
                        pos_max,pos_limit, ts_max);
    
        pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
    
        if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
    
        ff_update_cur_dts(s, st, ts);
    
    int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
                          int64_t pos_min, int64_t pos_max, int64_t pos_limit,
                          int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
                          int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
    {
    
        av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
    
        if(ts_min == AV_NOPTS_VALUE){
            pos_min = s->data_offset;
    
            ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
    
            if (ts_min == AV_NOPTS_VALUE)
                return -1;
        }
    
        if(ts_max == AV_NOPTS_VALUE){
            int step= 1024;
    
            filesize = avio_size(s->pb);
    
                ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
    
                step += step;
            }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
            if (ts_max == AV_NOPTS_VALUE)
                return -1;
    
                int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
    
                if(tmp_ts == AV_NOPTS_VALUE)
                    break;
                ts_max= tmp_ts;
                pos_max= tmp_pos;
    
        if(ts_min > ts_max){
            return -1;
        }else if(ts_min == ts_max){
            pos_limit= pos_min;
        }
    
    
            av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
                    pos_min, pos_max, ts_min, ts_max);
    
            assert(pos_limit <= pos_max);
    
            if(no_change==0){
                int64_t approximate_keyframe_distance= pos_max - pos_limit;
                // interpolate position (better than dichotomy)
    
                pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
                    + pos_min - approximate_keyframe_distance;
    
            }else if(no_change==1){
                // bisection, if interpolation failed to change min or max pos last time
                pos = (pos_min + pos_limit)>>1;
            }else{
    
                /* linear search if bisection failed, can only happen if there
                   are very few or no keyframes between min/max */
    
                pos=pos_min;
            }
            if(pos <= pos_min)
                pos= pos_min + 1;
            else if(pos > pos_limit)
                pos= pos_limit;
            start_pos= pos;
    
    
            ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
    
            av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
                    pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
                    pos_limit, start_pos, no_change);
    
            if(ts == AV_NOPTS_VALUE){
                av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
                return -1;
            }
    
            if (target_ts <= ts) {
    
                pos_limit = start_pos - 1;
                pos_max = pos;
                ts_max = ts;
    
            }
            if (target_ts >= ts) {
    
        pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
        ts  = (flags & AVSEEK_FLAG_BACKWARD) ?  ts_min :  ts_max;
    
        ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
    
        ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
    
        av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
                pos, ts_min, target_ts, ts_max);
    
    static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
    
        int64_t pos_min, pos_max;
    
        pos_min = s->data_offset;
    
        pos_max = avio_size(s->pb) - 1;
    
    
        if     (pos < pos_min) pos= pos_min;
        else if(pos > pos_max) pos= pos_max;
    
    
        avio_seek(s->pb, pos, SEEK_SET);
    
    static int seek_frame_generic(AVFormatContext *s,
    
                                     int stream_index, int64_t timestamp, int flags)
    
        AVStream *st;
        AVIndexEntry *ie;
    
        st = s->streams[stream_index];
    
        index = av_index_search_timestamp(st, timestamp, flags);
    
        if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
            return -1;
    
    
        if(index < 0 || index==st->nb_index_entries-1){
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            if(st->nb_index_entries){
                assert(st->index_entries);
    
                ie= &st->index_entries[st->nb_index_entries-1];
    
                if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
    
                ff_update_cur_dts(s, st, ie->timestamp);
    
                if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
    
                int read_status;
    
                    read_status = av_read_frame(s, &pkt);
                } while (read_status == AVERROR(EAGAIN));
                if (read_status < 0)
    
                    break;
                av_free_packet(&pkt);
                if(stream_index == pkt.stream_index){
    
                    if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
    
                        break;
                }
            }
            index = av_index_search_timestamp(st, timestamp, flags);
        }
    
        ff_read_frame_flush(s);
    
        if (s->iformat->read_seek){
            if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
                return 0;
        }
        ie = &st->index_entries[index];
    
        if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
    
        ff_update_cur_dts(s, st, ie->timestamp);
    
    static int seek_frame_internal(AVFormatContext *s, int stream_index,
                                   int64_t timestamp, int flags)
    
            if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
                return -1;
    
            return seek_frame_byte(s, stream_index, timestamp, flags);
    
        if(stream_index < 0){
            stream_index= av_find_default_stream_index(s);
            if(stream_index < 0)
                return -1;
    
            st= s->streams[stream_index];
    
            /* timestamp for default must be expressed in AV_TIME_BASE units */
    
            timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
    
        /* first, we try the format specific seek */
    
        if (s->iformat->read_seek) {
            ff_read_frame_flush(s);
    
            ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
    
        if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
            ff_read_frame_flush(s);
    
            return ff_seek_frame_binary(s, stream_index, timestamp, flags);
    
        } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
            ff_read_frame_flush(s);
    
            return seek_frame_generic(s, stream_index, timestamp, flags);
    
    int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
    {
        int ret = seek_frame_internal(s, stream_index, timestamp, flags);
    
        if (ret >= 0)
            queue_attached_pictures(s);
    
        return ret;
    }
    
    
    int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
    {
        if(min_ts > ts || max_ts < ts)
            return -1;
    
    
            ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
    
            if (ret >= 0)
                queue_attached_pictures(s);
            return ret;
    
    
        if(s->iformat->read_timestamp){
            //try to seek via read_timestamp()
        }
    
        //Fallback to old API if new is not implemented but old is
        //Note the old has somewat different sematics
        if(s->iformat->read_seek || 1)
    
            return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
    
        // try some generic seek like seek_frame_generic() but with new ts semantics
    
    /*******************************************************/
    
     * Return TRUE if the stream has accurate duration in any stream.
    
     * @return TRUE if the stream has accurate duration for at least one component.
    
    static int has_duration(AVFormatContext *ic)
    
    {
        int i;
        AVStream *st;
    
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
    
            if (st->duration != AV_NOPTS_VALUE)
    
        if (ic->duration != AV_NOPTS_VALUE)
    
    /**
     * Estimate the stream timings from the one of each components.
     *
     * Also computes the global bitrate if possible.
     */
    
    static void update_stream_timings(AVFormatContext *ic)
    
        int64_t start_time, start_time1, end_time, end_time1;
    
        int64_t duration, duration1, filesize;
    
        start_time = INT64_MAX;
        end_time = INT64_MIN;
    
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
    
            if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
    
                start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
    
                start_time = FFMIN(start_time, start_time1);
    
                    end_time1 = start_time1
                              + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
    
                    end_time = FFMAX(end_time, end_time1);
    
            if (st->duration != AV_NOPTS_VALUE) {
                duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
    
                duration = FFMAX(duration, duration1);
    
        if (start_time != INT64_MAX) {
    
            if (end_time != INT64_MIN)
                duration = FFMAX(duration, end_time - start_time);
    
        }
        if (duration != INT64_MIN) {
            ic->duration = duration;
    
            if (ic->pb && (filesize = avio_size(ic->pb)) > 0) {
    
                /* compute the bitrate */
    
                ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
    
            }
        }
    }
    
    static void fill_all_stream_timings(AVFormatContext *ic)
    {
        int i;
        AVStream *st;
    
    
        update_stream_timings(ic);
    
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
            if (st->start_time == AV_NOPTS_VALUE) {
    
                if(ic->start_time != AV_NOPTS_VALUE)
                    st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
                if(ic->duration != AV_NOPTS_VALUE)
                    st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
    
    static void estimate_timings_from_bit_rate(AVFormatContext *ic)
    
    {
        int64_t filesize, duration;
        int bit_rate, i;
        AVStream *st;
    
        /* if bit_rate is already set, we believe it */
    
        if (ic->bit_rate <= 0) {
    
            bit_rate = 0;
            for(i=0;i<ic->nb_streams;i++) {
                st = ic->streams[i];
    
                if (st->codec->bit_rate > 0)
    
            }
            ic->bit_rate = bit_rate;
        }
    
        /* if duration is already set, we believe it */
    
        if (ic->duration == AV_NOPTS_VALUE &&
    
            ic->bit_rate != 0) {
            filesize = ic->pb ? avio_size(ic->pb) : 0;
    
            if (filesize > 0) {
                for(i = 0; i < ic->nb_streams; i++) {
                    st = ic->streams[i];
    
                    duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
    
                    if (st->duration == AV_NOPTS_VALUE)
    
                        st->duration = duration;
                }
            }
        }
    }
    
    #define DURATION_MAX_READ_SIZE 250000
    
    static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
    
    {
        AVPacket pkt1, *pkt = &pkt1;
        AVStream *st;
        int read_size, i, ret;
    
        /* flush packet queue */
        flush_packet_queue(ic);
    
    
        for (i=0; i<ic->nb_streams; i++) {
    
            if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
    
                av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
    
            if (st->parser) {
                av_parser_close(st->parser);
                st->parser= NULL;
            }
        }
    
        /* estimate the end time (duration) */
        /* XXX: may need to support wrapping */
    
        filesize = ic->pb ? avio_size(ic->pb) : 0;
    
            offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
            if (offset < 0)
                offset = 0;
    
            avio_seek(ic->pb, offset, SEEK_SET);
            read_size = 0;
            for(;;) {
                if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
                    break;
    
                    ret = ff_read_packet(ic, pkt);
    
                } while(ret == AVERROR(EAGAIN));
                if (ret != 0)
                    break;
                read_size += pkt->size;
                st = ic->streams[pkt->stream_index];
                if (pkt->pts != AV_NOPTS_VALUE &&
                    (st->start_time != AV_NOPTS_VALUE ||
                     st->first_dts  != AV_NOPTS_VALUE)) {
                    duration = end_time = pkt->pts;
                    if (st->start_time != AV_NOPTS_VALUE)
                        duration -= st->start_time;
                    else
                        duration -= st->first_dts;
                    if (duration < 0)
                        duration += 1LL<<st->pts_wrap_bits;
                    if (duration > 0) {
                        if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
                            st->duration = duration;
                    }