Newer
Older
buf->refcount++;
av_buffersrc_buffer(ost->input_video_filter, fb);
} else
av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame,
ist->pts, decoded_frame->sample_aspect_ratio);
if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
av_free(buffer_to_free);
return AVERROR(ENOMEM);
} else
avcodec_get_frame_defaults(ist->filtered_frame);
filtered_frame = ist->filtered_frame;
frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
while (frame_available) {
AVRational ist_pts_tb;
if (ost->output_video_filter)
get_filtered_video_frame(ost->output_video_filter, filtered_frame, &ost->picref, &ist_pts_tb);
if (ost->picref)
ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
if (ost->picref->video && !ost->frame_aspect_ratio)
ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
#else
filtered_frame = decoded_frame;
#endif
do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame, &frame_size,
same_quant ? quality : ost->st->codec->global_quality);
if (vstats_filename && frame_size)
do_video_stats(output_files[ost->file_index].ctx, ost, frame_size);
#if CONFIG_AVFILTER
frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
if (ost->picref)
avfilter_unref_buffer(ost->picref);
}
#endif
}
av_free(buffer_to_free);
return ret;
}
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
{
AVSubtitle subtitle;
int i, ret = avcodec_decode_subtitle2(ist->st->codec,
&subtitle, got_output, pkt);
if (ret < 0)
return ret;
if (!*got_output)
return ret;
rate_emu_sleep(ist);
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = &output_streams[i];
if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
continue;
do_subtitle_out(output_files[ost->file_index].ctx, ost, ist, &subtitle, pkt->pts);
}
avsubtitle_free(&subtitle);
return ret;
/* pkt = NULL means EOF (needed to flush decoder buffers) */
static int output_packet(InputStream *ist,
OutputStream *ost_table, int nb_ostreams,
const AVPacket *pkt)
{
int got_output;
int64_t pkt_pts = AV_NOPTS_VALUE;
AVPacket avpkt;
if (ist->next_pts == AV_NOPTS_VALUE)
ist->next_pts = ist->pts;
if (pkt == NULL) {
/* EOF handling */
av_init_packet(&avpkt);
avpkt.data = NULL;
avpkt.size = 0;
goto handle_eof;
} else {
avpkt = *pkt;
}
ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
// while we have more to decode or while the decoder did output something on EOF
while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
int ret = 0;
ist->pts = ist->next_pts;
if (avpkt.size && avpkt.size != pkt->size) {
av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
"Multiple frames in a packet from stream %d\n", pkt->stream_index);
ist->showed_multi_packet_warning = 1;
}
case AVMEDIA_TYPE_AUDIO:
ret = transcode_audio (ist, &avpkt, &got_output);
break;
case AVMEDIA_TYPE_VIDEO:
ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts);
break;
case AVMEDIA_TYPE_SUBTITLE:
ret = transcode_subtitles(ist, &avpkt, &got_output);
break;
if (ret < 0)
return ret;
// touch data and size only if not EOF
if (pkt) {
avpkt.data += ret;
avpkt.size -= ret;
}
if (!got_output) {
/* handle stream copy */
if (!ist->decoding_needed) {
rate_emu_sleep(ist);
ist->pts = ist->next_pts;
switch (ist->st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
ist->st->codec->sample_rate;
break;
case AVMEDIA_TYPE_VIDEO:
if (ist->st->codec->time_base.num != 0) {
int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
ist->next_pts += ((int64_t)AV_TIME_BASE *
ist->st->codec->time_base.num * ticks) /
ist->st->codec->time_base.den;
}
break;
}
}
for (i = 0; pkt && i < nb_ostreams; i++) {
OutputStream *ost = &ost_table[i];
if (!check_output_constraints(ist, ost) || ost->encoding_needed)
continue;
do_streamcopy(ist, ost, pkt);
}
return 0;
}
Anton Khirnov
committed
static void print_sdp(OutputFile *output_files, int n)
{
char sdp[2048];
Anton Khirnov
committed
int i;
AVFormatContext **avc = av_malloc(sizeof(*avc) * n);
Anton Khirnov
committed
if (!avc)
exit_program(1);
for (i = 0; i < n; i++)
avc[i] = output_files[i].ctx;
av_sdp_create(avc, n, sdp, sizeof(sdp));
printf("SDP:\n%s\n", sdp);
fflush(stdout);
Anton Khirnov
committed
av_freep(&avc);
static int init_input_stream(int ist_index, OutputStream *output_streams, int nb_output_streams,
char *error, int error_len)
{
int i;
InputStream *ist = &input_streams[ist_index];
if (ist->decoding_needed) {
AVCodec *codec = ist->dec;
if (!codec) {
snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
ist->st->codec->codec_id, ist->file_index, ist->st->index);
return AVERROR(EINVAL);
}
/* update requested sample format for the decoder based on the
corresponding encoder sample format */
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = &output_streams[i];
if (ost->source_index == ist_index) {
update_sample_fmt(ist->st->codec, codec, ost->st->codec);
break;
}
}
if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
ist->st->codec->get_buffer = codec_get_buffer;
ist->st->codec->release_buffer = codec_release_buffer;
ist->st->codec->opaque = ist;
}
if (!av_dict_get(ist->opts, "threads", NULL, 0))
av_dict_set(&ist->opts, "threads", "auto", 0);
if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
ist->file_index, ist->st->index);
return AVERROR(EINVAL);
}
assert_codec_experimental(ist->st->codec, 0);
assert_avoptions(ist->opts);
}
ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
ist->next_pts = AV_NOPTS_VALUE;
init_pts_correction(&ist->pts_ctx);
ist->is_start = 1;
return 0;
}
static int transcode_init(OutputFile *output_files,
int nb_output_files,
InputFile *input_files,
int nb_input_files)
int ret = 0, i, j, k;
AVCodecContext *codec, *icodec;
OutputStream *ost;
InputStream *ist;
char error[1024];
int want_sdp = 1;
/* init framerate emulation */
for (i = 0; i < nb_input_files; i++) {
InputFile *ifile = &input_files[i];
if (ifile->rate_emu)
for (j = 0; j < ifile->nb_streams; j++)
input_streams[j + ifile->ist_index].start = av_gettime();
}
/* output stream init */
for (i = 0; i < nb_output_files; i++) {
oc = output_files[i].ctx;
if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
av_dump_format(oc, i, oc->filename, 1);
av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
return AVERROR(EINVAL);
}
}
/* for each output stream, we compute the right encoding parameters */
for (i = 0; i < nb_output_streams; i++) {
ost = &output_streams[i];
oc = output_files[ost->file_index].ctx;
ist = &input_streams[ost->source_index];
if (ost->attachment_filename)
continue;
icodec = ist->st->codec;
ost->st->disposition = ist->st->disposition;
codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
codec->chroma_sample_location = icodec->chroma_sample_location;
uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
if (extra_size > INT_MAX) {
return AVERROR(EINVAL);
/* if stream_copy is selected, no need to decode or encode */
codec->codec_id = icodec->codec_id;
codec->codec_type = icodec->codec_type;
if (!codec->codec_tag) {
if (!oc->oformat->codec_tag ||
av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
codec->codec_tag = icodec->codec_tag;
}
codec->bit_rate = icodec->bit_rate;
codec->rc_max_rate = icodec->rc_max_rate;
codec->rc_buffer_size = icodec->rc_buffer_size;
codec->field_order = icodec->field_order;
codec->extradata = av_mallocz(extra_size);
if (!codec->extradata) {
return AVERROR(ENOMEM);
memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
codec->extradata_size = icodec->extradata_size;
codec->time_base = icodec->time_base;
codec->time_base.num *= icodec->ticks_per_frame;
av_reduce(&codec->time_base.num, &codec->time_base.den,
codec->time_base.num, codec->time_base.den, INT_MAX);
codec->time_base = ist->st->time_base;
case AVMEDIA_TYPE_AUDIO:
av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
exit_program(1);
}
codec->channel_layout = icodec->channel_layout;
codec->sample_rate = icodec->sample_rate;
codec->channels = icodec->channels;
codec->frame_size = icodec->frame_size;
codec->audio_service_type = icodec->audio_service_type;
codec->block_align = icodec->block_align;
break;
case AVMEDIA_TYPE_VIDEO:
codec->pix_fmt = icodec->pix_fmt;
codec->width = icodec->width;
codec->height = icodec->height;
codec->has_b_frames = icodec->has_b_frames;
if (!codec->sample_aspect_ratio.num) {
codec->sample_aspect_ratio =
ost->st->sample_aspect_ratio =
ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
ist->st->codec->sample_aspect_ratio.num ?
ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
}
break;
case AVMEDIA_TYPE_SUBTITLE:
codec->width = icodec->width;
codec->height = icodec->height;
break;
case AVMEDIA_TYPE_DATA:
case AVMEDIA_TYPE_ATTACHMENT:
break;
default:
abort();
}
} else {
if (!ost->enc)
ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
ist->decoding_needed = 1;
ost->encoding_needed = 1;
case AVMEDIA_TYPE_AUDIO:
ost->fifo = av_fifo_alloc(1024);
if (!ost->fifo) {
return AVERROR(ENOMEM);
ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE);
if (!codec->sample_rate)
codec->sample_rate = icodec->sample_rate;
choose_sample_rate(ost->st, ost->enc);
codec->time_base = (AVRational){ 1, codec->sample_rate };
if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
codec->sample_fmt = icodec->sample_fmt;
choose_sample_fmt(ost->st, ost->enc);
if (!codec->channels)
codec->channels = icodec->channels;
codec->channel_layout = icodec->channel_layout;
if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels)
codec->channel_layout = 0;
ost->audio_resample = codec-> sample_rate != icodec->sample_rate || audio_sync_method > 1;
icodec->request_channels = codec-> channels;
ost->resample_sample_fmt = icodec->sample_fmt;
ost->resample_sample_rate = icodec->sample_rate;
ost->resample_channels = icodec->channels;
break;
case AVMEDIA_TYPE_VIDEO:
if (codec->pix_fmt == PIX_FMT_NONE)
codec->pix_fmt = icodec->pix_fmt;
choose_pixel_fmt(ost->st, ost->enc);
if (ost->st->codec->pix_fmt == PIX_FMT_NONE) {
av_log(NULL, AV_LOG_FATAL, "Video pixel format is unknown, stream cannot be encoded\n");
exit_program(1);
}
if (!codec->width || !codec->height) {
codec->width = icodec->width;
codec->height = icodec->height;
}
ost->video_resample = codec->width != icodec->width ||
codec->height != icodec->height ||
codec->pix_fmt != icodec->pix_fmt;
if (ost->video_resample) {
#if !CONFIG_AVFILTER
avcodec_get_frame_defaults(&ost->pict_tmp);
if (avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
codec->width, codec->height)) {
av_log(NULL, AV_LOG_FATAL, "Cannot allocate temp picture, check pix fmt\n");
exit_program(1);
}
ost->img_resample_ctx = sws_getContext(
icodec->width,
icodec->height,
icodec->pix_fmt,
codec->width,
codec->height,
codec->pix_fmt,
ost->sws_flags, NULL, NULL, NULL);
if (ost->img_resample_ctx == NULL) {
av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n");
exit_program(1);
}
#endif
ost->resample_height = icodec->height;
ost->resample_width = icodec->width;
ost->resample_pix_fmt = icodec->pix_fmt;
if (!ost->frame_rate.num)
ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational) { 25, 1 };
if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
ost->frame_rate = ost->enc->supported_framerates[idx];
}
codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
#if CONFIG_AVFILTER
if (configure_video_filters(ist, ost)) {
av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
exit(1);
}
#endif
break;
case AVMEDIA_TYPE_SUBTITLE:
break;
default:
abort();
break;
}
/* two pass mode */
if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
char logfilename[1024];
FILE *f;
snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
i);
if (codec->flags & CODEC_FLAG_PASS1) {
f = fopen(logfilename, "wb");
if (!f) {
av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
logfilename, strerror(errno));
exit_program(1);
}
ost->logfile = f;
} else {
char *logbuffer;
size_t logbuffer_size;
if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
logfilename);
exit_program(1);
}
codec->stats_in = logbuffer;
}
}
}
int size = codec->width * codec->height;
bit_buffer_size = FFMAX(bit_buffer_size, 6 * size + 200);
}
}
if (!bit_buffer)
bit_buffer = av_malloc(bit_buffer_size);
if (!bit_buffer) {
av_log(NULL, AV_LOG_ERROR, "Cannot allocate %d bytes output buffer\n",
bit_buffer_size);
return AVERROR(ENOMEM);
}
/* open each encoder */
for (i = 0; i < nb_output_streams; i++) {
ost = &output_streams[i];
if (ost->encoding_needed) {
AVCodec *codec = ost->enc;
AVCodecContext *dec = input_streams[ost->source_index].st->codec;
if (!codec) {
snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d:%d",
ost->st->codec->codec_id, ost->file_index, ost->index);
ret = AVERROR(EINVAL);
goto dump_format;
}
if (dec->subtitle_header) {
ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
if (!ost->st->codec->subtitle_header) {
ret = AVERROR(ENOMEM);
goto dump_format;
}
memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
}
if (!av_dict_get(ost->opts, "threads", NULL, 0))
av_dict_set(&ost->opts, "threads", "auto", 0);
if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
ost->file_index, ost->index);
ret = AVERROR(EINVAL);
goto dump_format;
}
assert_codec_experimental(ost->st->codec, 1);
assert_avoptions(ost->opts);
if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
"It takes bits/s as argument, not kbits/s\n");
extra_size += ost->st->codec->extradata_size;
if (ost->st->codec->me_threshold)
input_streams[ost->source_index].st->codec->debug |= FF_DEBUG_MV;
/* init input streams */
for (i = 0; i < nb_input_streams; i++)
Michael Niedermayer
committed
if ((ret = init_input_stream(i, output_streams, nb_output_streams, error, sizeof(error))) < 0)
/* discard unused programs */
for (i = 0; i < nb_input_files; i++) {
InputFile *ifile = &input_files[i];
for (j = 0; j < ifile->ctx->nb_programs; j++) {
AVProgram *p = ifile->ctx->programs[j];
int discard = AVDISCARD_ALL;
for (k = 0; k < p->nb_stream_indexes; k++)
if (!input_streams[ifile->ist_index + p->stream_index[k]].discard) {
discard = AVDISCARD_DEFAULT;
break;
}
p->discard = discard;
}
}
/* open files and write file headers */
Anton Khirnov
committed
for (i = 0; i < nb_output_files; i++) {
oc = output_files[i].ctx;
oc->interrupt_callback = int_cb;
if (avformat_write_header(oc, &output_files[i].opts) < 0) {
snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
ret = AVERROR(EINVAL);
goto dump_format;
}
Anton Khirnov
committed
assert_avoptions(output_files[i].opts);
if (strcmp(oc->oformat->name, "rtp")) {
want_sdp = 0;
}
}
dump_format:
/* dump the file output parameters - cannot be done before in case
of stream copy */
for (i = 0; i < nb_output_files; i++) {
Anton Khirnov
committed
av_dump_format(output_files[i].ctx, i, output_files[i].ctx->filename, 1);
}
/* dump the stream mapping */
av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
for (i = 0; i < nb_output_streams; i++) {
ost = &output_streams[i];
if (ost->attachment_filename) {
/* an attached file */
av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
ost->attachment_filename, ost->file_index, ost->index);
continue;
}
av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
input_streams[ost->source_index].file_index,
input_streams[ost->source_index].st->index,
ost->file_index,
ost->index);
if (ost->sync_ist != &input_streams[ost->source_index])
av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
ost->sync_ist->file_index,
ost->sync_ist->st->index);
av_log(NULL, AV_LOG_INFO, " (copy)");
else
av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index].dec ?
input_streams[ost->source_index].dec->name : "?",
ost->enc ? ost->enc->name : "?");
av_log(NULL, AV_LOG_INFO, "\n");
}
if (ret) {
av_log(NULL, AV_LOG_ERROR, "%s\n", error);
return ret;
}
if (want_sdp) {
print_sdp(output_files, nb_output_files);
}
return 0;
}
/*
* The following code is the main loop of the file converter
*/
static int transcode(OutputFile *output_files,
int nb_output_files,
InputFile *input_files,
int nb_input_files)
{
int ret, i;
AVFormatContext *is, *os;
OutputStream *ost;
InputStream *ist;
uint8_t *no_packet;
int64_t timer_start;
if (!(no_packet = av_mallocz(nb_input_files)))
exit_program(1);
ret = transcode_init(output_files, nb_output_files, input_files, nb_input_files);
if (ret < 0)
goto fail;
av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
term_init();
timer_start = av_gettime();
int file_index, ist_index;
AVPacket pkt;
int64_t ipts_min;
double opts_min;
ipts_min = INT64_MAX;
/* select the stream that we must read now by looking at the
smallest output pts */
file_index = -1;
for (i = 0; i < nb_output_streams; i++) {
int64_t ipts;
double opts;
ost = &output_streams[i];
of = &output_files[ost->file_index];
Anton Khirnov
committed
os = output_files[ost->file_index].ctx;
ist = &input_streams[ost->source_index];
if (ost->is_past_recording_time || no_packet[ist->file_index] ||
(os->pb && avio_tell(os->pb) >= of->limit_filesize))
opts = ost->st->pts.val * av_q2d(ost->st->time_base);
if (!input_files[ist->file_index].eof_reached) {
if (ipts < ipts_min) {
if (input_sync)
file_index = ist->file_index;
if (ost->frame_number >= ost->max_frames) {
int j;
for (j = 0; j < of->ctx->nb_streams; j++)
output_streams[of->ost_index + j].is_past_recording_time = 1;
}
}
/* if none, if is finished */
if (file_index < 0) {
memset(no_packet, 0, nb_input_files);
usleep(10000);
continue;
}
break;
}
/* read a frame from it and output it in the fifo */
is = input_files[file_index].ctx;
ret = av_read_frame(is, &pkt);
if (ret == AVERROR(EAGAIN)) {
no_packet[file_index] = 1;
no_packet_count++;
continue;
}
if (ret < 0) {
input_files[file_index].eof_reached = 1;
if (opt_shortest)
break;
else
continue;
}
memset(no_packet, 0, nb_input_files);
if (do_pkt_dump) {
av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
is->streams[pkt.stream_index]);
}
/* the following test is needed in case new streams appear
dynamically in stream : we ignore them */
if (pkt.stream_index >= input_files[file_index].nb_streams)
goto discard_packet;
ist_index = input_files[file_index].ist_index + pkt.stream_index;
ist = &input_streams[ist_index];
if (ist->discard)
goto discard_packet;
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
//fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
// ist->next_pts,
// pkt.dts, input_files[ist->file_index].ts_offset,
// ist->st->codec->codec_type);
if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE
&& (is->iformat->flags & AVFMT_TS_DISCONT)) {
int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
int64_t delta = pkt_dts - ist->next_pts;
if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->pts) && !copy_ts) {
input_files[ist->file_index].ts_offset -= delta;
av_log(NULL, AV_LOG_DEBUG,
"timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
delta, input_files[ist->file_index].ts_offset);
pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
}
}
// fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
if (output_packet(ist, output_streams, nb_output_streams, &pkt) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
ist->file_index, ist->st->index);
if (exit_on_error)
exit_program(1);
av_free_packet(&pkt);
}
discard_packet:
av_free_packet(&pkt);
/* dump report by using the output first video and audio streams */
print_report(output_files, output_streams, nb_output_streams, 0, timer_start);
}
/* at the end of stream, we must flush the decoder buffers */
for (i = 0; i < nb_input_streams; i++) {
ist = &input_streams[i];
if (ist->decoding_needed) {
output_packet(ist, output_streams, nb_output_streams, NULL);
flush_encoders(output_streams, nb_output_streams);
term_exit();
/* write the trailer if needed and close file */
Anton Khirnov
committed
os = output_files[i].ctx;
av_write_trailer(os);
}
/* dump report by using the first video and audio streams */
print_report(output_files, output_streams, nb_output_streams, 1, timer_start);
/* close each encoder */
for (i = 0; i < nb_output_streams; i++) {
ost = &output_streams[i];
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
if (ost->encoding_needed) {
av_freep(&ost->st->codec->stats_in);
avcodec_close(ost->st->codec);
}
#if CONFIG_AVFILTER
avfilter_graph_free(&ost->graph);
#endif
}
/* close each decoder */
for (i = 0; i < nb_input_streams; i++) {
ist = &input_streams[i];
if (ist->decoding_needed) {
avcodec_close(ist->st->codec);
}
}
/* finished ! */
ret = 0;
fail:
av_freep(&bit_buffer);
av_freep(&no_packet);
if (output_streams) {
for (i = 0; i < nb_output_streams; i++) {
ost = &output_streams[i];
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
av_freep(&ost->st->codec->extradata);
if (ost->logfile) {
fclose(ost->logfile);
ost->logfile = NULL;
}
av_fifo_free(ost->fifo); /* works even if fifo is not
initialized but set to zero */
av_freep(&ost->st->codec->subtitle_header);
av_free(ost->pict_tmp.data[0]);
av_free(ost->forced_kf_pts);
if (ost->video_resample)
sws_freeContext(ost->img_resample_ctx);
if (ost->resample)
audio_resample_close(ost->resample);
if (ost->reformat_ctx)
av_audio_convert_free(ost->reformat_ctx);
av_dict_free(&ost->opts);
}
}
}
return ret;
}
static double parse_frame_aspect_ratio(const char *arg)
{
int x = 0, y = 0;
double ar = 0;
const char *p;
char *end;
p = strchr(arg, ':');
if (p) {
x = strtol(arg, &end, 10);
if (end == p)
if (x > 0 && y > 0)
ar = (double)x / (double)y;
} else
ar = strtod(arg, NULL);
if (!ar) {
av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
exit_program(1);
static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
return parse_option(o, "codec:a", arg, options);
static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
return parse_option(o, "codec:v", arg, options);
static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
return parse_option(o, "codec:s", arg, options);
static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
return parse_option(o, "codec:d", arg, options);
static int opt_map(OptionsContext *o, const char *opt, const char *arg)
StreamMap *m = NULL;
int i, negative = 0, file_idx;
int sync_file_idx = -1, sync_stream_idx;
char *p, *sync;
char *map;
if (*arg == '-') {
negative = 1;
arg++;
}
map = av_strdup(arg);
/* parse sync stream first, just pick first matching stream */
if (sync = strchr(map, ',')) {
*sync = 0;
sync_file_idx = strtol(sync + 1, &sync, 0);
if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
exit_program(1);
}
if (*sync)
sync++;
for (i = 0; i < input_files[sync_file_idx].nb_streams; i++)
if (check_stream_specifier(input_files[sync_file_idx].ctx,
input_files[sync_file_idx].ctx->streams[i], sync) == 1) {
sync_stream_idx = i;
break;
}
if (i == input_files[sync_file_idx].nb_streams) {
av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
"match any streams.\n", arg);
exit_program(1);
}
}
file_idx = strtol(map, &p, 0);
if (file_idx >= nb_input_files || file_idx < 0) {
av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
if (negative)
/* disable some already defined maps */
for (i = 0; i < o->nb_stream_maps; i++) {
m = &o->stream_maps[i];
if (file_idx == m->file_index &&
check_stream_specifier(input_files[m->file_index].ctx,
input_files[m->file_index].ctx->streams[m->stream_index],
*p == ':' ? p + 1 : p) > 0)
m->disabled = 1;
}
else
for (i = 0; i < input_files[file_idx].nb_streams; i++) {
if (check_stream_specifier(input_files[file_idx].ctx, input_files[file_idx].ctx->streams[i],
*p == ':' ? p + 1 : p) <= 0)
continue;
o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
&o->nb_stream_maps, o->nb_stream_maps + 1);
m = &o->stream_maps[o->nb_stream_maps - 1];
m->file_index = file_idx;
m->stream_index = i;
if (sync_file_idx >= 0) {
m->sync_file_index = sync_file_idx;
m->sync_stream_index = sync_stream_idx;
} else {
m->sync_file_index = file_idx;
m->sync_stream_index = i;
}
}
if (!m) {
av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
exit_program(1);
}
av_freep(&map);
return 0;
}
static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
{
o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
&o->nb_attachments, o->nb_attachments + 1);
o->attachments[o->nb_attachments - 1] = arg;
return 0;
}
/**
* Parse a metadata specifier in arg.
* @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
* @param index for type c/p, chapter/program index is written here
* @param stream_spec for type s, the stream specifier is written here
*/
static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)