Skip to content
Snippets Groups Projects
utils.c 79.1 KiB
Newer Older
  • Learn to ignore specific revisions
  • /**
     * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
     * this isnt supposed to be called directly by a user application, but by demuxers
     * @param target_ts target timestamp in the time base of the given stream
     * @param stream_index stream number
     */
    
    int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts){
        AVInputFormat *avif= s->iformat;
        int64_t pos_min, pos_max, pos, pos_limit;
        int64_t ts_min, ts_max, ts;
        int64_t start_pos;
    
    #ifdef DEBUG_SEEK
        av_log(s, AV_LOG_DEBUG, "read_seek: %d %lld\n", stream_index, target_ts);
    #endif
    
        ts_max=
        ts_min= AV_NOPTS_VALUE;
        pos_limit= -1; //gcc falsely says it may be uninitalized
    
        st= s->streams[stream_index];
        if(st->index_entries){
            AVIndexEntry *e;
    
            index= av_index_search_timestamp(st, target_ts);
            e= &st->index_entries[index];
    
            if(e->timestamp <= target_ts || e->pos == e->min_distance){
                pos_min= e->pos;
                ts_min= e->timestamp;
    #ifdef DEBUG_SEEK
    
            av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%llx dts_min=%lld\n", 
    
                   pos_min,ts_min);
    #endif
            }else{
                assert(index==0);
            }
            index++;
            if(index < st->nb_index_entries){
                e= &st->index_entries[index];
                assert(e->timestamp >= target_ts);
                pos_max= e->pos;
                ts_max= e->timestamp;
                pos_limit= pos_max - e->min_distance;
    #ifdef DEBUG_SEEK
    
            av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%llx pos_limit=0x%llx dts_max=%lld\n", 
    
                   pos_max,pos_limit, ts_max);
    #endif
            }
        }
    
        if(ts_min == AV_NOPTS_VALUE){
            pos_min = s->data_offset;
            ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
            if (ts_min == AV_NOPTS_VALUE)
                return -1;
        }
    
        if(ts_max == AV_NOPTS_VALUE){
            int step= 1024;
            pos_max = url_filesize(url_fileno(&s->pb)) - 1;
            do{
                pos_max -= step;
                ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
                step += step;
            }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
            if (ts_max == AV_NOPTS_VALUE)
                return -1;
            
            for(;;){
                int64_t tmp_pos= pos_max + 1;
                int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
                if(tmp_ts == AV_NOPTS_VALUE)
                    break;
                ts_max= tmp_ts;
                pos_max= tmp_pos;
            }
            pos_limit= pos_max;
        }
    
        no_change=0;
        while (pos_min < pos_limit) {
    #ifdef DEBUG_SEEK
            av_log(s, AV_LOG_DEBUG, "pos_min=0x%llx pos_max=0x%llx dts_min=%lld dts_max=%lld\n", 
                   pos_min, pos_max,
                   ts_min, ts_max);
    #endif
            assert(pos_limit <= pos_max);
    
            if(no_change==0){
                int64_t approximate_keyframe_distance= pos_max - pos_limit;
                // interpolate position (better than dichotomy)
                pos = (int64_t)((double)(pos_max - pos_min) *
                                (double)(target_ts - ts_min) /
                                (double)(ts_max - ts_min)) + pos_min - approximate_keyframe_distance;
            }else if(no_change==1){
                // bisection, if interpolation failed to change min or max pos last time
                pos = (pos_min + pos_limit)>>1;
            }else{
                // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
                pos=pos_min;
            }
            if(pos <= pos_min)
                pos= pos_min + 1;
            else if(pos > pos_limit)
                pos= pos_limit;
            start_pos= pos;
    
            ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
            if(pos == pos_max)
                no_change++;
            else
                no_change=0;
    #ifdef DEBUG_SEEK
    av_log(s, AV_LOG_DEBUG, "%Ld %Ld %Ld / %Ld %Ld %Ld target:%Ld limit:%Ld start:%Ld noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
    #endif
            assert(ts != AV_NOPTS_VALUE);
            if (target_ts < ts) {
                pos_limit = start_pos - 1;
                pos_max = pos;
                ts_max = ts;
            } else {
                pos_min = pos;
                ts_min = ts;
                /* check if we are lucky */
                if (target_ts == ts)
                    break;
            }
        }
        
        pos = pos_min;
    #ifdef DEBUG_SEEK
        pos_min = pos;
        ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
        pos_min++;
        ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
        av_log(s, AV_LOG_DEBUG, "pos=0x%llx %lld<=%lld<=%lld\n", 
               pos, ts_min, target_ts, ts_max);
    #endif
        /* do the seek */
        url_fseek(&s->pb, pos, SEEK_SET);
    
    
        ts= av_rescale(ts_min, AV_TIME_BASE*(int64_t)st->time_base.num, st->time_base.den);
        for(i = 0; i < s->nb_streams; i++) {
            st = s->streams[i];
    
    
            st->cur_dts = av_rescale(ts, st->time_base.den, AV_TIME_BASE*(int64_t)st->time_base.num);
    
    static int av_seek_frame_generic(AVFormatContext *s, 
                                     int stream_index, int64_t timestamp)
    {
    
        AVStream *st;
        AVIndexEntry *ie;
    
        if (!s->index_built) {
            if (is_raw_stream(s)) {
                av_build_index_raw(s);
            } else {
                return -1;
            }
            s->index_built = 1;
        }
    
        st = s->streams[stream_index];
    
        index = av_index_search_timestamp(st, timestamp);
    
        if (index < 0)
            return -1;
    
        /* now we have found the index, we can seek */
        ie = &st->index_entries[index];
        av_read_frame_flush(s);
        url_fseek(&s->pb, ie->pos, SEEK_SET);
    
        
        timestamp= av_rescale(ie->timestamp, AV_TIME_BASE*(int64_t)st->time_base.num, st->time_base.den);
        for(i = 0; i < s->nb_streams; i++) {
            st = s->streams[i];
    
    
            st->cur_dts = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE*(int64_t)st->time_base.num);
    
        return 0;
    }
    
    /**
     * Seek to the key frame just before the frame at timestamp
    
     * 'timestamp' in 'stream_index'.
     * @param stream_index If stream_index is (-1), a default
     * stream is selected
     * @param timestamp timestamp in AV_TIME_BASE units
     * @return >= 0 on success
    
     */
    int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp)
    {
        int ret;
    
        
        if(stream_index < 0){
            stream_index= av_find_default_stream_index(s);
            if(stream_index < 0)
                return -1;
        }
        st= s->streams[stream_index];
    
        timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
    
    
        /* first, we try the format specific seek */
        if (s->iformat->read_seek)
            ret = s->iformat->read_seek(s, stream_index, timestamp);
        else
            ret = -1;
        if (ret >= 0) {
            return 0;
        }
    
    
        if(s->iformat->read_timestamp)
            return av_seek_frame_binary(s, stream_index, timestamp);
        else
            return av_seek_frame_generic(s, stream_index, timestamp);
    
    /*******************************************************/
    
    
    /* return TRUE if the stream has accurate timings for at least one component */
    static int av_has_timings(AVFormatContext *ic)
    {
        int i;
        AVStream *st;
    
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
            if (st->start_time != AV_NOPTS_VALUE &&
                st->duration != AV_NOPTS_VALUE)
                return 1;
        }
        return 0;
    }
    
    /* estimate the stream timings from the one of each components. Also
       compute the global bitrate if possible */
    static void av_update_stream_timings(AVFormatContext *ic)
    {
        int64_t start_time, end_time, end_time1;
        int i;
        AVStream *st;
    
        start_time = MAXINT64;
        end_time = MININT64;
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
            if (st->start_time != AV_NOPTS_VALUE) {
                if (st->start_time < start_time)
                    start_time = st->start_time;
                if (st->duration != AV_NOPTS_VALUE) {
                    end_time1 = st->start_time + st->duration;
                    if (end_time1 > end_time)
                        end_time = end_time1;
                }
            }
        }
        if (start_time != MAXINT64) {
            ic->start_time = start_time;
            if (end_time != MAXINT64) {
                ic->duration = end_time - start_time;
                if (ic->file_size > 0) {
                    /* compute the bit rate */
                    ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE / 
                        (double)ic->duration;
                }
            }
        }
    
    }
    
    static void fill_all_stream_timings(AVFormatContext *ic)
    {
        int i;
        AVStream *st;
    
        av_update_stream_timings(ic);
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
            if (st->start_time == AV_NOPTS_VALUE) {
                st->start_time = ic->start_time;
                st->duration = ic->duration;
            }
        }
    }
    
    static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
    {
        int64_t filesize, duration;
        int bit_rate, i;
        AVStream *st;
    
        /* if bit_rate is already set, we believe it */
        if (ic->bit_rate == 0) {
            bit_rate = 0;
            for(i=0;i<ic->nb_streams;i++) {
                st = ic->streams[i];
                bit_rate += st->codec.bit_rate;
            }
            ic->bit_rate = bit_rate;
        }
    
        /* if duration is already set, we believe it */
        if (ic->duration == AV_NOPTS_VALUE && 
            ic->bit_rate != 0 && 
            ic->file_size != 0)  {
            filesize = ic->file_size;
            if (filesize > 0) {
                duration = (int64_t)((8 * AV_TIME_BASE * (double)filesize) / (double)ic->bit_rate);
                for(i = 0; i < ic->nb_streams; i++) {
                    st = ic->streams[i];
                    if (st->start_time == AV_NOPTS_VALUE ||
                        st->duration == AV_NOPTS_VALUE) {
                        st->start_time = 0;
                        st->duration = duration;
                    }
                }
            }
        }
    }
    
    #define DURATION_MAX_READ_SIZE 250000
    
    /* only usable for MPEG-PS streams */
    static void av_estimate_timings_from_pts(AVFormatContext *ic)
    {
        AVPacket pkt1, *pkt = &pkt1;
        AVStream *st;
        int read_size, i, ret;
        int64_t start_time, end_time, end_time1;
        int64_t filesize, offset, duration;
        
    
        /* free previous packet */
        if (ic->cur_st && ic->cur_st->parser)
            av_free_packet(&ic->cur_pkt); 
        ic->cur_st = NULL;
    
        /* flush packet queue */
        flush_packet_queue(ic);
    
    
        for(i=0;i<ic->nb_streams;i++) {
            st = ic->streams[i];
            if (st->parser) {
                av_parser_close(st->parser);
                st->parser= NULL;
            }
        }
        
    
        /* we read the first packets to get the first PTS (not fully
           accurate, but it is enough now) */
        url_fseek(&ic->pb, 0, SEEK_SET);
        read_size = 0;
        for(;;) {
            if (read_size >= DURATION_MAX_READ_SIZE)
                break;
            /* if all info is available, we can stop */
            for(i = 0;i < ic->nb_streams; i++) {
                st = ic->streams[i];
                if (st->start_time == AV_NOPTS_VALUE)
                    break;
            }
            if (i == ic->nb_streams)
                break;
    
            ret = av_read_packet(ic, pkt);
            if (ret != 0)
                break;
            read_size += pkt->size;
            st = ic->streams[pkt->stream_index];
            if (pkt->pts != AV_NOPTS_VALUE) {
                if (st->start_time == AV_NOPTS_VALUE)
    
                    st->start_time = av_rescale(pkt->pts, st->time_base.num * (int64_t)AV_TIME_BASE, st->time_base.den);
    
    
        /* we compute the minimum start_time and use it as default */
        start_time = MAXINT64;
        for(i = 0; i < ic->nb_streams; i++) {
            st = ic->streams[i];
            if (st->start_time != AV_NOPTS_VALUE &&
                st->start_time < start_time)
                start_time = st->start_time;
        }
        if (start_time != MAXINT64)
            ic->start_time = start_time;
        
        /* estimate the end time (duration) */
        /* XXX: may need to support wrapping */
        filesize = ic->file_size;
        offset = filesize - DURATION_MAX_READ_SIZE;
        if (offset < 0)
            offset = 0;
    
        url_fseek(&ic->pb, offset, SEEK_SET);
        read_size = 0;
        for(;;) {
            if (read_size >= DURATION_MAX_READ_SIZE)
                break;
            /* if all info is available, we can stop */
            for(i = 0;i < ic->nb_streams; i++) {
                st = ic->streams[i];
                if (st->duration == AV_NOPTS_VALUE)
                    break;
            }
            if (i == ic->nb_streams)
                break;
            
            ret = av_read_packet(ic, pkt);
            if (ret != 0)
                break;
            read_size += pkt->size;
            st = ic->streams[pkt->stream_index];
            if (pkt->pts != AV_NOPTS_VALUE) {
    
                end_time = av_rescale(pkt->pts, st->time_base.num * (int64_t)AV_TIME_BASE, st->time_base.den);
    
                duration = end_time - st->start_time;
                if (duration > 0) {
                    if (st->duration == AV_NOPTS_VALUE ||
                        st->duration < duration)
                        st->duration = duration;
                }
            }
            av_free_packet(pkt);
        }
        
        /* estimate total duration */
        end_time = MININT64;
        for(i = 0;i < ic->nb_streams; i++) {
            st = ic->streams[i];
            if (st->duration != AV_NOPTS_VALUE) {
                end_time1 = st->start_time + st->duration;
                if (end_time1 > end_time)
                    end_time = end_time1;
            }
        }
        
        /* update start_time (new stream may have been created, so we do
           it at the end */
        if (ic->start_time != AV_NOPTS_VALUE) {
            for(i = 0; i < ic->nb_streams; i++) {
                st = ic->streams[i];
                if (st->start_time == AV_NOPTS_VALUE)
                    st->start_time = ic->start_time;
            }
        }
    
        if (end_time != MININT64) {
            /* put dummy values for duration if needed */
            for(i = 0;i < ic->nb_streams; i++) {
                st = ic->streams[i];
                if (st->duration == AV_NOPTS_VALUE && 
                    st->start_time != AV_NOPTS_VALUE)
                    st->duration = end_time - st->start_time;
            }
            ic->duration = end_time - ic->start_time;
        }
    
        url_fseek(&ic->pb, 0, SEEK_SET);
    }
    
    static void av_estimate_timings(AVFormatContext *ic)
    {
        URLContext *h;
        int64_t file_size;
    
        /* get the file size, if possible */
        if (ic->iformat->flags & AVFMT_NOFILE) {
            file_size = 0;
        } else {
            h = url_fileno(&ic->pb);
            file_size = url_filesize(h);
            if (file_size < 0)
                file_size = 0;
        }
        ic->file_size = file_size;
    
        if (ic->iformat == &mpegps_demux) {
            /* get accurate estimate from the PTSes */
            av_estimate_timings_from_pts(ic);
        } else if (av_has_timings(ic)) {
            /* at least one components has timings - we use them for all
               the components */
            fill_all_stream_timings(ic);
        } else {
            /* less precise: use bit rate info */
            av_estimate_timings_from_bit_rate(ic);
        }
        av_update_stream_timings(ic);
    
    #if 0
        {
            int i;
            AVStream *st;
            for(i = 0;i < ic->nb_streams; i++) {
                st = ic->streams[i];
            printf("%d: start_time: %0.3f duration: %0.3f\n", 
                   i, (double)st->start_time / AV_TIME_BASE, 
                   (double)st->duration / AV_TIME_BASE);
            }
            printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n", 
                   (double)ic->start_time / AV_TIME_BASE, 
                   (double)ic->duration / AV_TIME_BASE,
                   ic->bit_rate / 1000);
        }
    #endif
    }
    
    
    static int has_codec_parameters(AVCodecContext *enc)
    {
        int val;
        switch(enc->codec_type) {
        case CODEC_TYPE_AUDIO:
            val = enc->sample_rate;
            break;
        case CODEC_TYPE_VIDEO:
            val = enc->width;
            break;
        default:
            val = 1;
            break;
        }
        return (val != 0);
    }
    
    
    static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
    {
        int16_t *samples;
        AVCodec *codec;
        int got_picture, ret;
        AVFrame picture;
        
        codec = avcodec_find_decoder(st->codec.codec_id);
        if (!codec)
            return -1;
        ret = avcodec_open(&st->codec, codec);
        if (ret < 0)
            return ret;
        switch(st->codec.codec_type) {
        case CODEC_TYPE_VIDEO:
            ret = avcodec_decode_video(&st->codec, &picture, 
                                       &got_picture, (uint8_t *)data, size);
            break;
        case CODEC_TYPE_AUDIO:
            samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
            if (!samples)
                goto fail;
            ret = avcodec_decode_audio(&st->codec, samples, 
                                       &got_picture, (uint8_t *)data, size);
            av_free(samples);
            break;
        default:
            break;
        }
     fail:
        avcodec_close(&st->codec);
        return ret;
    }
    
    /* absolute maximum size we read until we abort */
    #define MAX_READ_SIZE        5000000
    
    /* maximum duration until we stop analysing the stream */
    #define MAX_STREAM_DURATION  ((int)(AV_TIME_BASE * 1.0))
    
    
    /**
     * Read the beginning of a media file to get stream information. This
     * is useful for file formats with no headers such as MPEG. This
     * function also compute the real frame rate in case of mpeg2 repeat
     * frame mode.
     *
     * @param ic media file handle
     * @return >=0 if OK. AVERROR_xxx if error.  
     */
    int av_find_stream_info(AVFormatContext *ic)
    {
    
        int i, count, ret, read_size;
    
        AVPacketList *pktl=NULL, **ppktl;
    
        count = 0;
        read_size = 0;
        ppktl = &ic->packet_buffer;
        for(;;) {
            /* check if one codec still needs to be handled */
            for(i=0;i<ic->nb_streams;i++) {
                st = ic->streams[i];
    
                if (!has_codec_parameters(&st->codec))
    
                    break;
            }
            if (i == ic->nb_streams) {
                /* NOTE: if the format has no header, then we need to read
                   some packets to get most of the streams, so we cannot
                   stop here */
    
                if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
    
                    /* if we found the info for all the codecs, we can stop */
                    ret = count;
                    break;
                }
            } else {
                /* we did not get all the codec info, but we read too much data */
    
                if (read_size >= MAX_READ_SIZE) {
    
            /* NOTE: a new stream can be added there if no header in file
               (AVFMTCTX_NOHEADER) */
            ret = av_read_frame_internal(ic, &pkt1);
            if (ret < 0) {
                /* EOF or error */
                ret = -1; /* we could not have all the codec parameters before EOF */
                if ((ic->ctx_flags & AVFMTCTX_NOHEADER) &&
                    i == ic->nb_streams)
                    ret = 0;
                break;
            }
    
    
            pktl = av_mallocz(sizeof(AVPacketList));
            if (!pktl) {
                ret = AVERROR_NOMEM;
                break;
            }
    
            /* add the packet in the buffered packet list */
            *ppktl = pktl;
            ppktl = &pktl->next;
    
            pkt = &pktl->pkt;
    
            *pkt = pkt1;
            
            /* duplicate the packet */
            if (av_dup_packet(pkt) < 0) {
                    ret = AVERROR_NOMEM;
                    break;
    
            read_size += pkt->size;
    
            st->codec_info_duration += pkt->duration;
            if (pkt->duration != 0)
                st->codec_info_nb_frames++;
    
            /* if still no information, we try to open the codec and to
               decompress the frame. We try to avoid that in most cases as
               it takes longer and uses more memory. For MPEG4, we need to
               decompress for Quicktime. */
            if (!has_codec_parameters(&st->codec) &&
                (st->codec.codec_id == CODEC_ID_FLV1 ||
                 st->codec.codec_id == CODEC_ID_H264 ||
                 st->codec.codec_id == CODEC_ID_H263 ||
    
                 st->codec.codec_id == CODEC_ID_VORBIS ||
    
                 st->codec.codec_id == CODEC_ID_MJPEG ||
    
                 (st->codec.codec_id == CODEC_ID_MPEG4 && !st->need_parsing)))
                try_decode_frame(st, pkt->data, pkt->size);
            
            if (st->codec_info_duration >= MAX_STREAM_DURATION) {
                break;
    
            }
            count++;
        }
    
        /* set real frame rate info */
        for(i=0;i<ic->nb_streams;i++) {
            st = ic->streams[i];
            if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
    
                /* compute the real frame rate for telecine */
                if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||
                     st->codec.codec_id == CODEC_ID_MPEG2VIDEO) &&
                    st->codec.sub_id == 2) {
                    if (st->codec_info_nb_frames >= 20) {
                        float coded_frame_rate, est_frame_rate;
                        est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) / 
                            (double)st->codec_info_duration ;
                        coded_frame_rate = (double)st->codec.frame_rate /
                            (double)st->codec.frame_rate_base;
    #if 0
                        printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n", 
                               coded_frame_rate, est_frame_rate);
    #endif
                        /* if we detect that it could be a telecine, we
                           signal it. It would be better to do it at a
                           higher level as it can change in a film */
                        if (coded_frame_rate >= 24.97 && 
                            (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
                            st->r_frame_rate = 24024;
                            st->r_frame_rate_base = 1001;
                        }
                    }
                }
                /* if no real frame rate, use the codec one */
    
                if (!st->r_frame_rate){
                    st->r_frame_rate      = st->codec.frame_rate;
                    st->r_frame_rate_base = st->codec.frame_rate_base;
                }
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
    #if 0
        /* correct DTS for b frame streams with no timestamps */
        for(i=0;i<ic->nb_streams;i++) {
            st = ic->streams[i];
            if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
                if(b-frames){
                    ppktl = &ic->packet_buffer;
                    while(ppkt1){
                        if(ppkt1->stream_index != i)
                            continue;
                        if(ppkt1->pkt->dts < 0)
                            break;
                        if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
                            break;
                        ppkt1->pkt->dts -= delta;
                        ppkt1= ppkt1->next;
                    }
                    if(ppkt1)
                        continue;
                    st->cur_dts -= delta;
                }
            }
        }
    #endif
    
    /*******************************************************/
    
    /**
     * start playing a network based stream (e.g. RTSP stream) at the
     * current position 
     */
    int av_read_play(AVFormatContext *s)
    {
        if (!s->iformat->read_play)
            return AVERROR_NOTSUPP;
        return s->iformat->read_play(s);
    }
    
    /**
     * pause a network based stream (e.g. RTSP stream). Use av_read_play()
     * to resume it.
     */
    int av_read_pause(AVFormatContext *s)
    {
        if (!s->iformat->read_pause)
            return AVERROR_NOTSUPP;
        return s->iformat->read_pause(s);
    }
    
    
    /**
     * Close a media file (but not its codecs)
     *
     * @param s media file handle
     */
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    void av_close_input_file(AVFormatContext *s)
    {
    
        int i, must_open_file;
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
        /* free previous packet */
        if (s->cur_st && s->cur_st->parser)
            av_free_packet(&s->cur_pkt); 
    
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        for(i=0;i<s->nb_streams;i++) {
    
            /* free all data in a stream component */
            st = s->streams[i];
    
            if (st->parser) {
                av_parser_close(st->parser);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
            }
    
            av_free(st->index_entries);
            av_free(st);
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
        }
    
        must_open_file = 1;
    
        if (s->iformat->flags & AVFMT_NOFILE) {
    
            must_open_file = 0;
        }
        if (must_open_file) {
    
            url_fclose(&s->pb);
        }
    
        av_free(s);
    
    /**
     * Add a new stream to a media file. Can only be called in the
    
     * read_header function. If the flag AVFMTCTX_NOHEADER is in the
     * format context, then new streams can be added in read_packet too.
    
     * @param id file format dependent stream id 
    
     */
    AVStream *av_new_stream(AVFormatContext *s, int id)
    {
        AVStream *st;
    
        if (s->nb_streams >= MAX_STREAMS)
            return NULL;
    
        st = av_mallocz(sizeof(AVStream));
        if (!st)
            return NULL;
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
        avcodec_get_context_defaults(&st->codec);
    
        if (s->iformat) {
            /* no default bitrate if decoding */
            st->codec.bit_rate = 0;
        }
    
        st->start_time = AV_NOPTS_VALUE;
        st->duration = AV_NOPTS_VALUE;
    
        st->cur_dts = AV_NOPTS_VALUE;
    
    
        /* default pts settings is MPEG like */
        av_set_pts_info(st, 33, 1, 90000);
    
        st->last_IP_pts = AV_NOPTS_VALUE;
    
        s->streams[s->nb_streams++] = st;
        return st;
    }
    
    /************************************************************/
    /* output media file */
    
    Fabrice Bellard's avatar
    Fabrice Bellard committed
    
    
    int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
    {
        int ret;
    
        
        if (s->oformat->priv_data_size > 0) {
            s->priv_data = av_mallocz(s->oformat->priv_data_size);
            if (!s->priv_data)
                return AVERROR_NOMEM;
        } else
            s->priv_data = NULL;
    	
    
        if (s->oformat->set_parameters) {
            ret = s->oformat->set_parameters(s, ap);
            if (ret < 0)
                return ret;
        }
        return 0;
    }
    
    
    /**
     * allocate the stream private data and write the stream header to an
     * output media file
     *
     * @param s media file handle
     * @return 0 if OK. AVERROR_xxx if error.  
     */
    int av_write_header(AVFormatContext *s)
    {
    
        int ret, i;
        AVStream *st;
    
        ret = s->oformat->write_header(s);
        if (ret < 0)
            return ret;
    
        /* init PTS generation */
        for(i=0;i<s->nb_streams;i++) {
            st = s->streams[i];
    
            switch (st->codec.codec_type) {
            case CODEC_TYPE_AUDIO:
                av_frac_init(&st->pts, 0, 0, 
    
                             (int64_t)st->time_base.num * st->codec.sample_rate);
    
                break;
            case CODEC_TYPE_VIDEO:
                av_frac_init(&st->pts, 0, 0, 
    
                             (int64_t)st->time_base.num * st->codec.frame_rate);
    
    //FIXME merge with compute_pkt_fields
    static void compute_pkt_fields2(AVStream *st, AVPacket *pkt){
        int b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames);
        int num, den, frame_size;
    
    //    av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
    
        
    /*    if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
            return -1;*/
                
        if(pkt->pts != AV_NOPTS_VALUE)
            pkt->pts = av_rescale(pkt->pts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
        if(pkt->dts != AV_NOPTS_VALUE)
            pkt->dts = av_rescale(pkt->dts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
    
        /* duration field */
        pkt->duration = av_rescale(pkt->duration, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
    
        if (pkt->duration == 0) {
            compute_frame_duration(&num, &den, st, NULL, pkt);
            if (den && num) {
                pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
            }
        }
    
    
        //XXX/FIXME this is a temporary hack until all encoders output pts
        if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
            pkt->dts=
    //        pkt->pts= st->cur_dts;
            pkt->pts= st->pts.val;
        }
    
        //calculate dts from pts    
        if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
            if(b_frames){
                if(st->last_IP_pts == AV_NOPTS_VALUE){
    
                    st->last_IP_pts= -pkt->duration;
    
                }
                if(st->last_IP_pts < pkt->pts){
                    pkt->dts= st->last_IP_pts;
                    st->last_IP_pts= pkt->pts;
                }else
                    pkt->dts= pkt->pts;
            }else
                pkt->dts= pkt->pts;
        }
        
    
    //    av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
    
        st->cur_dts= pkt->dts;
        st->pts.val= pkt->dts;
    
    
        /* update pts */
        switch (st->codec.codec_type) {
        case CODEC_TYPE_AUDIO:
    
            frame_size = get_audio_frame_size(&st->codec, pkt->size);
    
            /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
    
               but it would be better if we had the real timestamps from the encoder */
    
            if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
    
                av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
    
            break;
        case CODEC_TYPE_VIDEO:
    
            av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec.frame_rate_base);
    
    }
    
    static void truncate_ts(AVStream *st, AVPacket *pkt){
        int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
        
    
    Michael Niedermayer's avatar
    Michael Niedermayer committed
    //    if(pkt->dts < 0)
    //        pkt->dts= 0;  //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
    
        
        pkt->pts &= pts_mask;
        pkt->dts &= pts_mask;
    }
    
    /**
     * Write a packet to an output media file. The packet shall contain
     * one audio or video frame.
     *
     * @param s media file handle
     * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
     * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
     */
    int av_write_frame(AVFormatContext *s, AVPacket *pkt)
    {