Skip to content
Snippets Groups Projects
utils.c 106 KiB
Newer Older
  • Learn to ignore specific revisions
  •                     return ret;
                    /* return the last frames, if any */
                    for(i = 0; i < s->nb_streams; i++) {
                        st = s->streams[i];
    
                                            &pkt->data, &pkt->size,
                                            NULL, 0,
    
                                            AV_NOPTS_VALUE, AV_NOPTS_VALUE,
                                            AV_NOPTS_VALUE);
    
                    /* no more packets: really terminate parsing */
    
                st = s->streams[cur_pkt.stream_index];
                st->cur_pkt= cur_pkt;
    
                if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
                   st->cur_pkt.dts != AV_NOPTS_VALUE &&
                   st->cur_pkt.pts < st->cur_pkt.dts){
    
                    av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
    
                        st->cur_pkt.stream_index,
                        st->cur_pkt.pts,
                        st->cur_pkt.dts,
                        st->cur_pkt.size);
    //                av_free_packet(&st->cur_pkt);
    
                if(s->debug & FF_FDEBUG_TS)
    
                    av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
    
                        st->cur_pkt.stream_index,
                        st->cur_pkt.pts,
                        st->cur_pkt.dts,
                        st->cur_pkt.size,
    
                st->cur_ptr = st->cur_pkt.data;
                st->cur_len = st->cur_pkt.size;
    
                if (st->need_parsing && !st->parser) {
    
                    st->parser = av_parser_init(st->codec->codec_id);
    
                        /* no parser available: just output the raw packets */
    
                        st->need_parsing = AVSTREAM_PARSE_NONE;
                    }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
    
                        st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
    
                    if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
    
                        st->parser->cur_offset= st->cur_pkt.pos;
    
        if(s->debug & FF_FDEBUG_TS)
    
            av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
    
                pkt->stream_index,
                pkt->pts,
                pkt->dts,
    
    }
    
    int av_read_frame(AVFormatContext *s, AVPacket *pkt)
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    {
        AVPacketList *pktl;
    
        int eof=0;
        const int genpts= s->flags & AVFMT_FLAG_GENPTS;
    
        for(;;){
            pktl = s->packet_buffer;
            if (pktl) {
                AVPacket *next_pkt= &pktl->pkt;
    
                if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
                    while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
    
                        if(   pktl->pkt.stream_index == next_pkt->stream_index
    
                           && next_pkt->dts < pktl->pkt.dts
                           && pktl->pkt.pts != pktl->pkt.dts //not b frame
                           /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
                            next_pkt->pts= pktl->pkt.dts;
                        }
                        pktl= pktl->next;
                    }
                    pktl = s->packet_buffer;
                }
    
    
                if(   next_pkt->pts != AV_NOPTS_VALUE
                   || next_pkt->dts == AV_NOPTS_VALUE
    
                   || !genpts || eof){
                    /* read packet from packet buffer, if there is data */
                    *pkt = *next_pkt;
                    s->packet_buffer = pktl->next;
                    av_free(pktl);
                    return 0;
                }
            }
            if(genpts){
                int ret= av_read_frame_internal(s, pkt);
                if(ret<0){
    
                    if(pktl && ret != AVERROR(EAGAIN)){
    
                if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
                                               &s->packet_buffer_end)) < 0)
    
            }else{
                assert(!s->packet_buffer);
                return av_read_frame_internal(s, pkt);
            }
    
        }
    }
    
    /* XXX: suppress the packet queue */
    static void flush_packet_queue(AVFormatContext *s)
    {
        AVPacketList *pktl;
    
        for(;;) {
            pktl = s->packet_buffer;
    
                break;
            s->packet_buffer = pktl->next;
            av_free_packet(&pktl->pkt);
            av_free(pktl);
    
        while(s->raw_packet_buffer){
            pktl = s->raw_packet_buffer;
            s->raw_packet_buffer = pktl->next;
            av_free_packet(&pktl->pkt);
            av_free(pktl);
        }
    
        s->packet_buffer_end=
        s->raw_packet_buffer_end= NULL;
    
        s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
    
    /*******************************************************/
    /* seek support */
    
    
    int av_find_default_stream_index(AVFormatContext *s)
    {
    
        int i;
        AVStream *st;
    
        if (s->nb_streams <= 0)
            return -1;
        for(i = 0; i < s->nb_streams; i++) {
            st = s->streams[i];
    
            if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
    
            if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
                first_audio_index = i;
    
        return first_audio_index >= 0 ? first_audio_index : 0;
    
    void av_read_frame_flush(AVFormatContext *s)
    
        /* for each stream, reset read state */
        for(i = 0; i < s->nb_streams; i++) {
            st = s->streams[i];
    
            if (st->parser) {
                av_parser_close(st->parser);
                st->parser = NULL;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            st->last_IP_pts = AV_NOPTS_VALUE;
    
            st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
    
            st->reference_dts = AV_NOPTS_VALUE;
    
            /* fail safe */
            st->cur_ptr = NULL;
            st->cur_len = 0;
    
    
            for(j=0; j<MAX_REORDER_DELAY+1; j++)
                st->pts_buffer[j]= AV_NOPTS_VALUE;
    
    void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
    
        int i;
    
        for(i = 0; i < s->nb_streams; i++) {
    
            st->cur_dts = av_rescale(timestamp,
    
                                     st->time_base.den * (int64_t)ref_st->time_base.num,
                                     st->time_base.num * (int64_t)ref_st->time_base.den);
    
    void ff_reduce_index(AVFormatContext *s, int stream_index)
    {
        AVStream *st= s->streams[stream_index];
        unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
    
        if((unsigned)st->nb_index_entries >= max_entries){
            int i;
            for(i=0; 2*i<st->nb_index_entries; i++)
                st->index_entries[i]= st->index_entries[2*i];
            st->nb_index_entries= i;
        }
    }
    
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
                                int64_t pos, int64_t timestamp, int size, int distance, int flags)
    
    {
        AVIndexEntry *entries, *ie;
    
        if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
            return -1;
    
        entries = av_fast_realloc(st->index_entries,
                                  &st->index_entries_allocated_size,
    
                                  (st->nb_index_entries + 1) *
    
        index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
    
        if(index<0){
    
            index= st->nb_index_entries++;
            ie= &entries[index];
    
            assert(index==0 || ie[-1].timestamp < timestamp);
        }else{
            ie= &entries[index];
            if(ie->timestamp != timestamp){
    
                if(ie->timestamp <= timestamp)
                    return -1;
    
                memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
                st->nb_index_entries++;
    
    Diego Biurrun's avatar
    Diego Biurrun committed
            }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
    
                distance= ie->min_distance;
    
        ie->pos = pos;
        ie->timestamp = timestamp;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        ie->size= size;
    
    int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
    
        AVIndexEntry *entries= st->index_entries;
        int nb_entries= st->nb_index_entries;
    
        a = - 1;
        b = nb_entries;
    
        while (b - a > 1) {
            m = (a + b) >> 1;
    
            timestamp = entries[m].timestamp;
    
            if(timestamp >= wanted_timestamp)
                b = m;
            if(timestamp <= wanted_timestamp)
    
        m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
    
        if(!(flags & AVSEEK_FLAG_ANY)){
            while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
                m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
            }
        }
    
        if(m == nb_entries)
    
            return -1;
        return  m;
    
    Benoit Fouet's avatar
    Benoit Fouet committed
    #define DEBUG_SEEK
    
    int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
    
        int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
    
        av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
    
    Diego Biurrun's avatar
    Diego Biurrun committed
        pos_limit= -1; //gcc falsely says it may be uninitialized
    
    
        st= s->streams[stream_index];
        if(st->index_entries){
            AVIndexEntry *e;
    
    
            index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
    
            index= FFMAX(index, 0);
    
            e= &st->index_entries[index];
    
            if(e->timestamp <= target_ts || e->pos == e->min_distance){
                pos_min= e->pos;
                ts_min= e->timestamp;
    #ifdef DEBUG_SEEK
    
                av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
                       pos_min,ts_min);
    
    
            index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
    
            assert(index < st->nb_index_entries);
            if(index >= 0){
    
                e= &st->index_entries[index];
                assert(e->timestamp >= target_ts);
                pos_max= e->pos;
                ts_max= e->timestamp;
                pos_limit= pos_max - e->min_distance;
    #ifdef DEBUG_SEEK
    
                av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
                       pos_max,pos_limit, ts_max);
    
        pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
        if(pos<0)
            return -1;
    
        /* do the seek */
    
        if ((ret = url_fseek(s->pb, pos, SEEK_SET)) < 0)
            return ret;
    
    
        av_update_cur_dts(s, st, ts);
    
        return 0;
    }
    
    int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
        int64_t pos, ts;
        int64_t start_pos, filesize;
        int no_change;
    
    #ifdef DEBUG_SEEK
        av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
    #endif
    
    
        if(ts_min == AV_NOPTS_VALUE){
            pos_min = s->data_offset;
    
            ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
    
            if (ts_min == AV_NOPTS_VALUE)
                return -1;
        }
    
        if(ts_max == AV_NOPTS_VALUE){
            int step= 1024;
    
            filesize = url_fsize(s->pb);
    
                ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
    
                step += step;
            }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
            if (ts_max == AV_NOPTS_VALUE)
                return -1;
    
                int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
    
                if(tmp_ts == AV_NOPTS_VALUE)
                    break;
                ts_max= tmp_ts;
                pos_max= tmp_pos;
    
        if(ts_min > ts_max){
            return -1;
        }else if(ts_min == ts_max){
            pos_limit= pos_min;
        }
    
    
        no_change=0;
        while (pos_min < pos_limit) {
    #ifdef DEBUG_SEEK
    
            av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
    
                   pos_min, pos_max,
                   ts_min, ts_max);
    #endif
            assert(pos_limit <= pos_max);
    
            if(no_change==0){
                int64_t approximate_keyframe_distance= pos_max - pos_limit;
                // interpolate position (better than dichotomy)
    
                pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
                    + pos_min - approximate_keyframe_distance;
    
            }else if(no_change==1){
                // bisection, if interpolation failed to change min or max pos last time
                pos = (pos_min + pos_limit)>>1;
            }else{
    
                /* linear search if bisection failed, can only happen if there
                   are very few or no keyframes between min/max */
    
                pos=pos_min;
            }
            if(pos <= pos_min)
                pos= pos_min + 1;
            else if(pos > pos_limit)
                pos= pos_limit;
            start_pos= pos;
    
    
            ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
    
            if(pos == pos_max)
                no_change++;
            else
                no_change=0;
    #ifdef DEBUG_SEEK
    
            av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
                   pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
                   start_pos, no_change);
    
            if(ts == AV_NOPTS_VALUE){
                av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
                return -1;
            }
    
            if (target_ts <= ts) {
    
                pos_limit = start_pos - 1;
                pos_max = pos;
                ts_max = ts;
    
            }
            if (target_ts >= ts) {
    
        pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
        ts  = (flags & AVSEEK_FLAG_BACKWARD) ?  ts_min :  ts_max;
    
        ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
    
        ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
    
        av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
    
    static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
        int64_t pos_min, pos_max;
    #if 0
        AVStream *st;
    
        if (stream_index < 0)
            return -1;
    
        st= s->streams[stream_index];
    #endif
    
        pos_min = s->data_offset;
    
        pos_max = url_fsize(s->pb) - 1;
    
    
        if     (pos < pos_min) pos= pos_min;
        else if(pos > pos_max) pos= pos_max;
    
    
        url_fseek(s->pb, pos, SEEK_SET);
    
        av_update_cur_dts(s, st, ts);
    
    static int av_seek_frame_generic(AVFormatContext *s,
    
                                     int stream_index, int64_t timestamp, int flags)
    
        AVStream *st;
        AVIndexEntry *ie;
    
        st = s->streams[stream_index];
    
        index = av_index_search_timestamp(st, timestamp, flags);
    
        if(index < 0 || index==st->nb_index_entries-1){
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            if(st->nb_index_entries){
                assert(st->index_entries);
    
                ie= &st->index_entries[st->nb_index_entries-1];
    
                if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
                    return ret;
    
                av_update_cur_dts(s, st, ie->timestamp);
    
                if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
    
                int ret;
                do{
                    ret = av_read_frame(s, &pkt);
                }while(ret == AVERROR(EAGAIN));
    
                if(ret<0)
                    break;
                av_free_packet(&pkt);
                if(stream_index == pkt.stream_index){
                    if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
                        break;
                }
            }
            index = av_index_search_timestamp(st, timestamp, flags);
        }
    
        if (index < 0)
            return -1;
    
        av_read_frame_flush(s);
    
        if (s->iformat->read_seek){
            if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
                return 0;
        }
        ie = &st->index_entries[index];
    
        if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
            return ret;
    
        av_update_cur_dts(s, st, ie->timestamp);
    
    int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
    
        av_read_frame_flush(s);
    
        if(flags & AVSEEK_FLAG_BYTE)
            return av_seek_frame_byte(s, stream_index, timestamp, flags);
    
        if(stream_index < 0){
            stream_index= av_find_default_stream_index(s);
            if(stream_index < 0)
                return -1;
    
            st= s->streams[stream_index];
    
           /* timestamp for default must be expressed in AV_TIME_BASE units */
    
            timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
    
        /* first, we try the format specific seek */
        if (s->iformat->read_seek)
    
            ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
    
            return av_seek_frame_binary(s, stream_index, timestamp, flags);
    
            return av_seek_frame_generic(s, stream_index, timestamp, flags);
    
    int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
    {
        if(min_ts > ts || max_ts < ts)
            return -1;
    
        av_read_frame_flush(s);
    
        if (s->iformat->read_seek2)
            return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
    
        if(s->iformat->read_timestamp){
            //try to seek via read_timestamp()
        }
    
        //Fallback to old API if new is not implemented but old is
        //Note the old has somewat different sematics
        if(s->iformat->read_seek || 1)
    
            return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
    
    
        // try some generic seek like av_seek_frame_generic() but with new ts semantics
    }
    
    
    /*******************************************************/
    
     * Returns TRUE if the stream has accurate duration in any stream.
    
     * @return TRUE if the stream has accurate duration for at least one component.
    
    static int av_has_duration(AVFormatContext *ic)
    
    {
        int i;
        AVStream *st;
    
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
    
            if (st->duration != AV_NOPTS_VALUE)
    
    /**
     * Estimate the stream timings from the one of each components.
     *
     * Also computes the global bitrate if possible.
     */
    
    static void av_update_stream_timings(AVFormatContext *ic)
    {
    
        int64_t start_time, start_time1, end_time, end_time1;
    
        start_time = INT64_MAX;
        end_time = INT64_MIN;
    
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
    
            if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
    
                start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
                if (start_time1 < start_time)
                    start_time = start_time1;
    
                    end_time1 = start_time1
                              + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
    
            if (st->duration != AV_NOPTS_VALUE) {
                duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
                if (duration1 > duration)
                    duration = duration1;
            }
    
        if (start_time != INT64_MAX) {
    
            if (end_time != INT64_MIN) {
    
                if (end_time - start_time > duration)
                    duration = end_time - start_time;
            }
        }
        if (duration != INT64_MIN) {
            ic->duration = duration;
            if (ic->file_size > 0) {
    
                /* compute the bitrate */
    
                ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
                    (double)ic->duration;
    
            }
        }
    }
    
    static void fill_all_stream_timings(AVFormatContext *ic)
    {
        int i;
        AVStream *st;
    
        av_update_stream_timings(ic);
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
            if (st->start_time == AV_NOPTS_VALUE) {
    
                if(ic->start_time != AV_NOPTS_VALUE)
                    st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
                if(ic->duration != AV_NOPTS_VALUE)
                    st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
    
            }
        }
    }
    
    static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
    {
        int64_t filesize, duration;
        int bit_rate, i;
        AVStream *st;
    
        /* if bit_rate is already set, we believe it */
        if (ic->bit_rate == 0) {
            bit_rate = 0;
            for(i=0;i<ic->nb_streams;i++) {
                st = ic->streams[i];
    
            }
            ic->bit_rate = bit_rate;
        }
    
        /* if duration is already set, we believe it */
    
        if (ic->duration == AV_NOPTS_VALUE &&
            ic->bit_rate != 0 &&
    
            ic->file_size != 0)  {
            filesize = ic->file_size;
            if (filesize > 0) {
                for(i = 0; i < ic->nb_streams; i++) {
                    st = ic->streams[i];
    
                    duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
    
                    if (st->duration == AV_NOPTS_VALUE)
    
                        st->duration = duration;
                }
            }
        }
    }
    
    #define DURATION_MAX_READ_SIZE 250000
    
    /* only usable for MPEG-PS streams */
    
    static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
    
    {
        AVPacket pkt1, *pkt = &pkt1;
        AVStream *st;
        int read_size, i, ret;
    
        int64_t end_time, start_time[MAX_STREAMS];
    
        ic->cur_st = NULL;
    
        /* flush packet queue */
        flush_packet_queue(ic);
    
        for(i=0;i<ic->nb_streams;i++) {
            st = ic->streams[i];
    
            if(st->start_time != AV_NOPTS_VALUE){
                start_time[i]= st->start_time;
            }else if(st->first_dts != AV_NOPTS_VALUE){
                start_time[i]= st->first_dts;
            }else
                av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
    
    
            if (st->parser) {
                av_parser_close(st->parser);
                st->parser= NULL;
    
        /* estimate the end time (duration) */
        /* XXX: may need to support wrapping */
        filesize = ic->file_size;
        offset = filesize - DURATION_MAX_READ_SIZE;
        if (offset < 0)
            offset = 0;
    
    
        url_fseek(ic->pb, offset, SEEK_SET);
    
        read_size = 0;
        for(;;) {
            if (read_size >= DURATION_MAX_READ_SIZE)
                break;
    
            do{
                ret = av_read_packet(ic, pkt);
            }while(ret == AVERROR(EAGAIN));
    
            if (ret != 0)
                break;
            read_size += pkt->size;
            st = ic->streams[pkt->stream_index];
    
            if (pkt->pts != AV_NOPTS_VALUE &&
    
                start_time[pkt->stream_index] != AV_NOPTS_VALUE) {
    
                end_time = pkt->pts;
    
                duration = end_time - start_time[pkt->stream_index];
    
                if (duration > 0) {
                    if (st->duration == AV_NOPTS_VALUE ||
                        st->duration < duration)
                        st->duration = duration;
                }
            }
            av_free_packet(pkt);
        }
    
        fill_all_stream_timings(ic);
    
        url_fseek(ic->pb, old_offset, SEEK_SET);
    
        for(i=0; i<ic->nb_streams; i++){
            st= ic->streams[i];
            st->cur_dts= st->first_dts;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
            st->last_IP_pts = AV_NOPTS_VALUE;
    
    static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
    
    {
        int64_t file_size;
    
        /* get the file size, if possible */
        if (ic->iformat->flags & AVFMT_NOFILE) {
            file_size = 0;
        } else {
    
            file_size = url_fsize(ic->pb);
    
            if (file_size < 0)
                file_size = 0;
        }
        ic->file_size = file_size;
    
    
        if ((!strcmp(ic->iformat->name, "mpeg") ||
             !strcmp(ic->iformat->name, "mpegts")) &&
    
            file_size && !url_is_streamed(ic->pb)) {
    
            /* get accurate estimate from the PTSes */
    
            av_estimate_timings_from_pts(ic, old_offset);
    
        } else if (av_has_duration(ic)) {
    
            /* at least one component has timings - we use them for all
    
               the components */
            fill_all_stream_timings(ic);
        } else {
    
            av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
    
            /* less precise: use bitrate info */
    
            av_estimate_timings_from_bit_rate(ic);
        }
        av_update_stream_timings(ic);
    
    #if 0
        {
            int i;
            AVStream *st;
            for(i = 0;i < ic->nb_streams; i++) {
                st = ic->streams[i];
    
            printf("%d: start_time: %0.3f duration: %0.3f\n",
                   i, (double)st->start_time / AV_TIME_BASE,
    
            printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
                   (double)ic->start_time / AV_TIME_BASE,
    
                   (double)ic->duration / AV_TIME_BASE,
                   ic->bit_rate / 1000);
        }
    #endif
    }
    
    
    static int has_codec_parameters(AVCodecContext *enc)
    {
        int val;
        switch(enc->codec_type) {
        case CODEC_TYPE_AUDIO:
    
            val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
    
            if(!enc->frame_size &&
               (enc->codec_id == CODEC_ID_VORBIS ||
    
                enc->codec_id == CODEC_ID_AAC ||
    
                enc->codec_id == CODEC_ID_SPEEX))
    
            val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
    
        return enc->codec_id != CODEC_ID_NONE && val != 0;
    
    static int try_decode_frame(AVStream *st, AVPacket *avpkt)
    
    {
        int16_t *samples;
        AVCodec *codec;
    
        int got_picture, data_size, ret=0;
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
        if(!st->codec->codec){
            codec = avcodec_find_decoder(st->codec->codec_id);
            if (!codec)
                return -1;
            ret = avcodec_open(st->codec, codec);
            if (ret < 0)
                return ret;
        }
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
        if(!has_codec_parameters(st->codec)){
            switch(st->codec->codec_type) {
            case CODEC_TYPE_VIDEO:
    
                avcodec_get_frame_defaults(&picture);
    
    Baptiste Coudurier's avatar
    Baptiste Coudurier committed
                ret = avcodec_decode_video2(st->codec, &picture,
                                            &got_picture, avpkt);
                break;
            case CODEC_TYPE_AUDIO:
                data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
                samples = av_malloc(data_size);
                if (!samples)
                    goto fail;
                ret = avcodec_decode_audio3(st->codec, samples,
                                            &data_size, avpkt);
                av_free(samples);
                break;
            default:
                break;
            }
    
    unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id)
    
    {
        while (tags->id != CODEC_ID_NONE) {
            if (tags->id == id)
                return tags->tag;
            tags++;
        }
        return 0;
    }
    
    
    enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
    
        int i;
        for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
            if(tag == tags[i].tag)
                return tags[i].id;
        }
        for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
            if(   toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
               && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
               && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
               && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
                return tags[i].id;
    
    unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
    
            int tag= ff_codec_get_tag(tags[i], id);
    
    enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
    
            enum CodecID id= ff_codec_get_id(tags[i], tag);
    
            if(id!=CODEC_ID_NONE) return id;
        }
        return CODEC_ID_NONE;
    }
    
    
    static void compute_chapters_end(AVFormatContext *s)
    {
        unsigned int i;
    
        for (i=0; i+1<s->nb_chapters; i++)
            if (s->chapters[i]->end == AV_NOPTS_VALUE) {
                assert(s->chapters[i]->start <= s->chapters[i+1]->start);
                assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
                s->chapters[i]->end = s->chapters[i+1]->start;
            }
    
        if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
            assert(s->start_time != AV_NOPTS_VALUE);
            assert(s->duration > 0);
            s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,