avconv.c
Go to the documentation of this file.
1 /*
2  * avconv main
3  * Copyright (c) 2000-2011 The libav developers.
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "config.h"
23 #include <ctype.h>
24 #include <string.h>
25 #include <math.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include <signal.h>
29 #include <limits.h>
30 #include <unistd.h>
31 #include "libavformat/avformat.h"
32 #include "libavdevice/avdevice.h"
33 #include "libswscale/swscale.h"
34 #include "libavutil/opt.h"
36 #include "libavutil/audioconvert.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavformat/os_support.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersrc.h"
54 # include "libavfilter/vsrc_buffer.h"
55 #endif
56 
57 #if HAVE_SYS_RESOURCE_H
58 #include <sys/types.h>
59 #include <sys/time.h>
60 #include <sys/resource.h>
61 #elif HAVE_GETPROCESSTIMES
62 #include <windows.h>
63 #endif
64 #if HAVE_GETPROCESSMEMORYINFO
65 #include <windows.h>
66 #include <psapi.h>
67 #endif
68 
69 #if HAVE_SYS_SELECT_H
70 #include <sys/select.h>
71 #endif
72 
73 #include <time.h>
74 
75 #include "cmdutils.h"
76 
77 #include "libavutil/avassert.h"
78 
79 #define VSYNC_AUTO -1
80 #define VSYNC_PASSTHROUGH 0
81 #define VSYNC_CFR 1
82 #define VSYNC_VFR 2
83 
84 const char program_name[] = "avconv";
85 const int program_birth_year = 2000;
86 
87 /* select an input stream for an output stream */
88 typedef struct StreamMap {
89  int disabled;
94 } StreamMap;
95 
99 typedef struct MetadataMap {
100  int file;
101  char type;
102  int index;
103 } MetadataMap;
104 
105 static const OptionDef options[];
106 
107 static int video_discard = 0;
108 static int same_quant = 0;
109 static int do_deinterlace = 0;
110 static int intra_dc_precision = 8;
111 static int qp_hist = 0;
112 
113 static int file_overwrite = 0;
114 static int do_benchmark = 0;
115 static int do_hex_dump = 0;
116 static int do_pkt_dump = 0;
117 static int do_pass = 0;
120 static int audio_sync_method = 0;
121 static float audio_drift_threshold = 0.1;
122 static int copy_ts = 0;
123 static int copy_tb = 1;
124 static int opt_shortest = 0;
125 static char *vstats_filename;
126 static FILE *vstats_file;
127 
128 static int audio_volume = 256;
129 
130 static int exit_on_error = 0;
131 static int using_stdin = 0;
132 static int64_t video_size = 0;
133 static int64_t audio_size = 0;
134 static int64_t extra_size = 0;
135 static int nb_frames_dup = 0;
136 static int nb_frames_drop = 0;
137 static int input_sync;
138 
139 static float dts_delta_threshold = 10;
140 
141 static int print_stats = 1;
142 
143 static uint8_t *audio_buf;
144 static unsigned int allocated_audio_buf_size;
145 
146 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
147 
148 typedef struct FrameBuffer {
149  uint8_t *base[4];
150  uint8_t *data[4];
151  int linesize[4];
152 
153  int h, w;
155 
156  int refcount;
157  struct InputStream *ist;
158  struct FrameBuffer *next;
159 } FrameBuffer;
160 
161 typedef struct InputStream {
164  int discard; /* true if stream data should be discarded */
165  int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
169 
170  int64_t start; /* time when read started */
171  int64_t next_pts; /* synthetic pts for cases where pkt.pts
172  is not defined */
173  int64_t pts; /* current pts */
175  double ts_scale;
176  int is_start; /* is 1 at the start and after a discontinuity */
179 
180  /* a pool of free buffers for decoded data */
182 } InputStream;
183 
184 typedef struct InputFile {
186  int eof_reached; /* true if eof reached */
187  int ist_index; /* index of first stream in ist_table */
188  int buffer_size; /* current total buffer size */
189  int64_t ts_offset;
190  int nb_streams; /* number of stream that avconv is aware of; may be different
191  from ctx.nb_streams if new streams appear during av_read_frame() */
192  int rate_emu;
193 } InputFile;
194 
195 typedef struct OutputStream {
196  int file_index; /* file index */
197  int index; /* stream index in the output file */
198  int source_index; /* InputStream index */
199  AVStream *st; /* stream in the output file */
200  int encoding_needed; /* true if encoding needed for this stream */
202  /* input pts and corresponding output pts
203  for A/V sync */
204  // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
205  struct InputStream *sync_ist; /* input stream to sync against */
206  int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
209  int64_t max_frames;
211 
212  /* video only */
214  AVFrame pict_tmp; /* temporary image for resampling */
215  struct SwsContext *img_resample_ctx; /* for image resampling */
222 
224 
225  /* forced key frames */
226  int64_t *forced_kf_pts;
229 
230  /* audio only */
232  ReSampleContext *resample; /* for audio resampling */
238  AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */
239  FILE *logfile;
240 
241 #if CONFIG_AVFILTER
242  AVFilterContext *output_video_filter;
243  AVFilterContext *input_video_filter;
244  AVFilterBufferRef *picref;
245  char *avfilter;
246  AVFilterGraph *graph;
247 #endif
248 
249  int64_t sws_flags;
253  const char *attachment_filename;
255 } OutputStream;
256 
257 
258 typedef struct OutputFile {
261  int ost_index; /* index of the first stream in output_streams */
262  int64_t recording_time; /* desired length of the resulting file in microseconds */
263  int64_t start_time; /* start time in microseconds */
264  uint64_t limit_filesize;
265 } OutputFile;
266 
268 static int nb_input_streams = 0;
270 static int nb_input_files = 0;
271 
273 static int nb_output_streams = 0;
275 static int nb_output_files = 0;
276 
277 typedef struct OptionsContext {
278  /* input/output options */
279  int64_t start_time;
280  const char *format;
281 
294 
295  /* input options */
297  int rate_emu;
298 
303 
304  /* output options */
307  /* first item specifies output metadata, second is input */
313  const char **attachments;
315 
317 
318  int64_t recording_time;
319  uint64_t limit_filesize;
320  float mux_preload;
322 
327 
328  /* indexed by output file stream index */
331 
364 #if CONFIG_AVFILTER
366  int nb_filters;
367 #endif
369 
370 #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
371 {\
372  int i, ret;\
373  for (i = 0; i < o->nb_ ## name; i++) {\
374  char *spec = o->name[i].specifier;\
375  if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
376  outvar = o->name[i].u.type;\
377  else if (ret < 0)\
378  exit_program(1);\
379  }\
380 }
381 
383 {
384  const OptionDef *po = options;
385 
386  /* all OPT_SPEC and OPT_STRING can be freed in generic way */
387  while (po->name) {
388  void *dst = (uint8_t*)o + po->u.off;
389 
390  if (po->flags & OPT_SPEC) {
391  SpecifierOpt **so = dst;
392  int i, *count = (int*)(so + 1);
393  for (i = 0; i < *count; i++) {
394  av_freep(&(*so)[i].specifier);
395  if (po->flags & OPT_STRING)
396  av_freep(&(*so)[i].u.str);
397  }
398  av_freep(so);
399  *count = 0;
400  } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
401  av_freep(dst);
402  po++;
403  }
404 
405  av_freep(&o->stream_maps);
407  av_freep(&o->streamid_map);
408 
409  memset(o, 0, sizeof(*o));
410 
411  o->mux_max_delay = 0.7;
414  o->chapters_input_file = INT_MAX;
415 
416  uninit_opts();
417  init_opts();
418 }
419 
420 static int alloc_buffer(InputStream *ist, FrameBuffer **pbuf)
421 {
422  AVCodecContext *s = ist->st->codec;
423  FrameBuffer *buf = av_mallocz(sizeof(*buf));
424  int ret;
425  const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
426  int h_chroma_shift, v_chroma_shift;
427  int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
428  int w = s->width, h = s->height;
429 
430  if (!buf)
431  return AVERROR(ENOMEM);
432 
433  if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
434  w += 2*edge;
435  h += 2*edge;
436  }
437 
438  avcodec_align_dimensions(s, &w, &h);
439  if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
440  s->pix_fmt, 32)) < 0) {
441  av_freep(&buf);
442  return ret;
443  }
444  /* XXX this shouldn't be needed, but some tests break without this line
445  * those decoders are buggy and need to be fixed.
446  * the following tests fail:
447  * bethsoft-vid, cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
448  */
449  memset(buf->base[0], 128, ret);
450 
451  avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
452  for (int i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
453  const int h_shift = i==0 ? 0 : h_chroma_shift;
454  const int v_shift = i==0 ? 0 : v_chroma_shift;
455  if (s->flags & CODEC_FLAG_EMU_EDGE)
456  buf->data[i] = buf->base[i];
457  else
458  buf->data[i] = buf->base[i] +
459  FFALIGN((buf->linesize[i]*edge >> v_shift) +
460  (pixel_size*edge >> h_shift), 32);
461  }
462  buf->w = s->width;
463  buf->h = s->height;
464  buf->pix_fmt = s->pix_fmt;
465  buf->ist = ist;
466 
467  *pbuf = buf;
468  return 0;
469 }
470 
471 static void free_buffer_pool(InputStream *ist)
472 {
473  FrameBuffer *buf = ist->buffer_pool;
474  while (buf) {
475  ist->buffer_pool = buf->next;
476  av_freep(&buf->base[0]);
477  av_free(buf);
478  buf = ist->buffer_pool;
479  }
480 }
481 
482 static void unref_buffer(InputStream *ist, FrameBuffer *buf)
483 {
484  av_assert0(buf->refcount);
485  buf->refcount--;
486  if (!buf->refcount) {
487  buf->next = ist->buffer_pool;
488  ist->buffer_pool = buf;
489  }
490 }
491 
493 {
494  InputStream *ist = s->opaque;
495  FrameBuffer *buf;
496  int ret, i;
497 
498  if (!ist->buffer_pool && (ret = alloc_buffer(ist, &ist->buffer_pool)) < 0)
499  return ret;
500 
501  buf = ist->buffer_pool;
502  ist->buffer_pool = buf->next;
503  buf->next = NULL;
504  if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
505  av_freep(&buf->base[0]);
506  av_free(buf);
507  if ((ret = alloc_buffer(ist, &buf)) < 0)
508  return ret;
509  }
510  buf->refcount++;
511 
512  frame->opaque = buf;
513  frame->type = FF_BUFFER_TYPE_USER;
514  frame->extended_data = frame->data;
515  frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
516 
517  for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
518  frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
519  frame->data[i] = buf->data[i];
520  frame->linesize[i] = buf->linesize[i];
521  }
522 
523  return 0;
524 }
525 
527 {
528  InputStream *ist = s->opaque;
529  FrameBuffer *buf = frame->opaque;
530  int i;
531 
532  for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
533  frame->data[i] = NULL;
534 
535  unref_buffer(ist, buf);
536 }
537 
539 {
540  FrameBuffer *buf = fb->priv;
541  av_free(fb);
542  unref_buffer(buf->ist, buf);
543 }
544 
545 #if CONFIG_AVFILTER
546 
547 static int configure_video_filters(InputStream *ist, OutputStream *ost)
548 {
549  AVFilterContext *last_filter, *filter;
551  AVCodecContext *codec = ost->st->codec;
552  AVCodecContext *icodec = ist->st->codec;
553  AVSinkContext avsink_ctx = { .pix_fmt = codec->pix_fmt };
554  AVRational sample_aspect_ratio;
555  char args[255];
556  int ret;
557 
558  ost->graph = avfilter_graph_alloc();
559 
560  if (ist->st->sample_aspect_ratio.num) {
561  sample_aspect_ratio = ist->st->sample_aspect_ratio;
562  } else
563  sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
564 
565  snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
566  ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
567  sample_aspect_ratio.num, sample_aspect_ratio.den);
568 
569  ret = avfilter_graph_create_filter(&ost->input_video_filter, avfilter_get_by_name("buffer"),
570  "src", args, NULL, ost->graph);
571  if (ret < 0)
572  return ret;
573  ret = avfilter_graph_create_filter(&ost->output_video_filter, &avsink,
574  "out", NULL, &avsink_ctx, ost->graph);
575  if (ret < 0)
576  return ret;
577  last_filter = ost->input_video_filter;
578 
579  if (codec->width != icodec->width || codec->height != icodec->height) {
580  snprintf(args, 255, "%d:%d:flags=0x%X",
581  codec->width,
582  codec->height,
583  (unsigned)ost->sws_flags);
584  if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
585  NULL, args, NULL, ost->graph)) < 0)
586  return ret;
587  if ((ret = avfilter_link(last_filter, 0, filter, 0)) < 0)
588  return ret;
589  last_filter = filter;
590  }
591 
592  snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
593  ost->graph->scale_sws_opts = av_strdup(args);
594 
595  if (ost->avfilter) {
596  AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
597  AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
598 
599  outputs->name = av_strdup("in");
600  outputs->filter_ctx = last_filter;
601  outputs->pad_idx = 0;
602  outputs->next = NULL;
603 
604  inputs->name = av_strdup("out");
605  inputs->filter_ctx = ost->output_video_filter;
606  inputs->pad_idx = 0;
607  inputs->next = NULL;
608 
609  if ((ret = avfilter_graph_parse(ost->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
610  return ret;
611  } else {
612  if ((ret = avfilter_link(last_filter, 0, ost->output_video_filter, 0)) < 0)
613  return ret;
614  }
615 
616  if ((ret = avfilter_graph_config(ost->graph, NULL)) < 0)
617  return ret;
618 
619  codec->width = ost->output_video_filter->inputs[0]->w;
620  codec->height = ost->output_video_filter->inputs[0]->h;
622  ost->frame_aspect_ratio ? // overridden by the -aspect cli option
623  av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
624  ost->output_video_filter->inputs[0]->sample_aspect_ratio;
625 
626  return 0;
627 }
628 #endif /* CONFIG_AVFILTER */
629 
630 static void term_exit(void)
631 {
632  av_log(NULL, AV_LOG_QUIET, "");
633 }
634 
635 static volatile int received_sigterm = 0;
636 static volatile int received_nb_signals = 0;
637 
638 static void
640 {
641  received_sigterm = sig;
642  received_nb_signals++;
643  term_exit();
644 }
645 
646 static void term_init(void)
647 {
648  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
649  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
650 #ifdef SIGXCPU
651  signal(SIGXCPU, sigterm_handler);
652 #endif
653 }
654 
655 static int decode_interrupt_cb(void *ctx)
656 {
657  return received_nb_signals > 1;
658 }
659 
661 
662 void exit_program(int ret)
663 {
664  int i;
665 
666  /* close files */
667  for (i = 0; i < nb_output_files; i++) {
668  AVFormatContext *s = output_files[i].ctx;
669  if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
670  avio_close(s->pb);
672  av_dict_free(&output_files[i].opts);
673  }
674  for (i = 0; i < nb_output_streams; i++) {
675  AVBitStreamFilterContext *bsfc = output_streams[i].bitstream_filters;
676  while (bsfc) {
677  AVBitStreamFilterContext *next = bsfc->next;
679  bsfc = next;
680  }
681  output_streams[i].bitstream_filters = NULL;
682 
683  if (output_streams[i].output_frame) {
684  AVFrame *frame = output_streams[i].output_frame;
685  if (frame->extended_data != frame->data)
686  av_freep(&frame->extended_data);
687  av_freep(&frame);
688  }
689 
690 #if CONFIG_AVFILTER
691  av_freep(&output_streams[i].avfilter);
692 #endif
693  }
694  for (i = 0; i < nb_input_files; i++) {
695  avformat_close_input(&input_files[i].ctx);
696  }
697  for (i = 0; i < nb_input_streams; i++) {
698  av_freep(&input_streams[i].decoded_frame);
699  av_freep(&input_streams[i].filtered_frame);
700  av_dict_free(&input_streams[i].opts);
701  free_buffer_pool(&input_streams[i]);
702  }
703 
704  if (vstats_file)
705  fclose(vstats_file);
707 
708  av_freep(&input_streams);
709  av_freep(&input_files);
710  av_freep(&output_streams);
711  av_freep(&output_files);
712 
713  uninit_opts();
716 
717 #if CONFIG_AVFILTER
718  avfilter_uninit();
719 #endif
721 
722  if (received_sigterm) {
723  av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
724  (int) received_sigterm);
725  exit (255);
726  }
727 
728  exit(ret);
729 }
730 
732 {
734  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
735  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
736  exit_program(1);
737  }
738 }
739 
740 static void assert_codec_experimental(AVCodecContext *c, int encoder)
741 {
742  const char *codec_string = encoder ? "encoder" : "decoder";
743  AVCodec *codec;
746  av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
747  "results.\nAdd '-strict experimental' if you want to use it.\n",
748  codec_string, c->codec->name);
749  codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
750  if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
751  av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
752  codec_string, codec->name);
753  exit_program(1);
754  }
755 }
756 
757 static void choose_sample_fmt(AVStream *st, AVCodec *codec)
758 {
759  if (codec && codec->sample_fmts) {
760  const enum AVSampleFormat *p = codec->sample_fmts;
761  for (; *p != -1; p++) {
762  if (*p == st->codec->sample_fmt)
763  break;
764  }
765  if (*p == -1) {
767  "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
769  codec->name,
771  st->codec->sample_fmt = codec->sample_fmts[0];
772  }
773  }
774 }
775 
783 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
784  AVCodecContext *enc)
785 {
786  /* if sample formats match or a decoder sample format has already been
787  requested, just return */
788  if (enc->sample_fmt == dec->sample_fmt ||
790  return;
791 
792  /* if decoder supports more than one output format */
793  if (dec_codec && dec_codec->sample_fmts &&
794  dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
795  dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
796  const enum AVSampleFormat *p;
797  int min_dec = -1, min_inc = -1;
798 
799  /* find a matching sample format in the encoder */
800  for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
801  if (*p == enc->sample_fmt) {
802  dec->request_sample_fmt = *p;
803  return;
804  } else if (*p > enc->sample_fmt) {
805  min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
806  } else
807  min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
808  }
809 
810  /* if none match, provide the one that matches quality closest */
811  dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
812  enc->sample_fmt - min_dec;
813  }
814 }
815 
816 static void choose_sample_rate(AVStream *st, AVCodec *codec)
817 {
818  if (codec && codec->supported_samplerates) {
819  const int *p = codec->supported_samplerates;
820  int best = 0;
821  int best_dist = INT_MAX;
822  for (; *p; p++) {
823  int dist = abs(st->codec->sample_rate - *p);
824  if (dist < best_dist) {
825  best_dist = dist;
826  best = *p;
827  }
828  }
829  if (best_dist) {
830  av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
831  }
832  st->codec->sample_rate = best;
833  }
834 }
835 
836 static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
837 {
838  if (codec && codec->pix_fmts) {
839  const enum PixelFormat *p = codec->pix_fmts;
841  if (st->codec->codec_id == CODEC_ID_MJPEG) {
843  } else if (st->codec->codec_id == CODEC_ID_LJPEG) {
846  }
847  }
848  for (; *p != PIX_FMT_NONE; p++) {
849  if (*p == st->codec->pix_fmt)
850  break;
851  }
852  if (*p == PIX_FMT_NONE) {
853  if (st->codec->pix_fmt != PIX_FMT_NONE)
855  "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
857  codec->name,
859  st->codec->pix_fmt = codec->pix_fmts[0];
860  }
861  }
862 }
863 
864 static double
866 {
867  const InputStream *ist = ost->sync_ist;
868  OutputFile *of = &output_files[ost->file_index];
869  return (double)(ist->pts - of->start_time) / AV_TIME_BASE;
870 }
871 
873 {
875  AVCodecContext *avctx = ost->st->codec;
876  int ret;
877 
878  /*
879  * Audio encoders may split the packets -- #frames in != #packets out.
880  * But there is no reordering, so we can limit the number of output packets
881  * by simply dropping them here.
882  * Counting encoded video frames needs to be done separately because of
883  * reordering, see do_video_out()
884  */
885  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
886  if (ost->frame_number >= ost->max_frames)
887  return;
888  ost->frame_number++;
889  }
890 
891  while (bsfc) {
892  AVPacket new_pkt = *pkt;
893  int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
894  &new_pkt.data, &new_pkt.size,
895  pkt->data, pkt->size,
896  pkt->flags & AV_PKT_FLAG_KEY);
897  if (a > 0) {
898  av_free_packet(pkt);
899  new_pkt.destruct = av_destruct_packet;
900  } else if (a < 0) {
901  av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
902  bsfc->filter->name, pkt->stream_index,
903  avctx->codec ? avctx->codec->name : "copy");
904  print_error("", a);
905  if (exit_on_error)
906  exit_program(1);
907  }
908  *pkt = new_pkt;
909 
910  bsfc = bsfc->next;
911  }
912 
913  ret = av_interleaved_write_frame(s, pkt);
914  if (ret < 0) {
915  print_error("av_interleaved_write_frame()", ret);
916  exit_program(1);
917  }
918 }
919 
920 static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
921 {
922  int fill_char = 0x00;
923  if (sample_fmt == AV_SAMPLE_FMT_U8)
924  fill_char = 0x80;
925  memset(buf, fill_char, size);
926 }
927 
929  const uint8_t *buf, int buf_size)
930 {
931  AVCodecContext *enc = ost->st->codec;
932  AVFrame *frame = NULL;
933  AVPacket pkt;
934  int ret, got_packet;
935 
936  av_init_packet(&pkt);
937  pkt.data = NULL;
938  pkt.size = 0;
939 
940  if (buf) {
941  if (!ost->output_frame) {
943  if (!ost->output_frame) {
944  av_log(NULL, AV_LOG_FATAL, "out-of-memory in encode_audio_frame()\n");
945  exit_program(1);
946  }
947  }
948  frame = ost->output_frame;
949  if (frame->extended_data != frame->data)
950  av_freep(&frame->extended_data);
952 
953  frame->nb_samples = buf_size /
955  if ((ret = avcodec_fill_audio_frame(frame, enc->channels, enc->sample_fmt,
956  buf, buf_size, 1)) < 0) {
957  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
958  exit_program(1);
959  }
960  }
961 
962  got_packet = 0;
963  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
964  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
965  exit_program(1);
966  }
967 
968  if (got_packet) {
969  pkt.stream_index = ost->index;
970  if (pkt.pts != AV_NOPTS_VALUE)
971  pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
972  if (pkt.duration > 0)
973  pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
974 
975  write_frame(s, &pkt, ost);
976 
977  audio_size += pkt.size;
978  }
979 
980  if (frame)
981  ost->sync_opts += frame->nb_samples;
982 
983  return pkt.size;
984 }
985 
987  InputStream *ist, AVFrame *decoded_frame)
988 {
989  uint8_t *buftmp;
990  int64_t audio_buf_size;
991 
992  int size_out, frame_bytes, resample_changed;
993  AVCodecContext *enc = ost->st->codec;
994  AVCodecContext *dec = ist->st->codec;
995  int osize = av_get_bytes_per_sample(enc->sample_fmt);
996  int isize = av_get_bytes_per_sample(dec->sample_fmt);
997  uint8_t *buf = decoded_frame->data[0];
998  int size = decoded_frame->nb_samples * dec->channels * isize;
999  int64_t allocated_for_size = size;
1000 
1001 need_realloc:
1002  audio_buf_size = (allocated_for_size + isize * dec->channels - 1) / (isize * dec->channels);
1003  audio_buf_size = (audio_buf_size * enc->sample_rate + dec->sample_rate) / dec->sample_rate;
1004  audio_buf_size = audio_buf_size * 2 + 10000; // safety factors for the deprecated resampling API
1005  audio_buf_size = FFMAX(audio_buf_size, enc->frame_size);
1006  audio_buf_size *= osize * enc->channels;
1007 
1008  if (audio_buf_size > INT_MAX) {
1009  av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n");
1010  exit_program(1);
1011  }
1012 
1014  if (!audio_buf) {
1015  av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
1016  exit_program(1);
1017  }
1018 
1019  if (enc->channels != dec->channels || enc->sample_rate != dec->sample_rate)
1020  ost->audio_resample = 1;
1021 
1022  resample_changed = ost->resample_sample_fmt != dec->sample_fmt ||
1023  ost->resample_channels != dec->channels ||
1024  ost->resample_sample_rate != dec->sample_rate;
1025 
1026  if ((ost->audio_resample && !ost->resample) || resample_changed) {
1027  if (resample_changed) {
1028  av_log(NULL, AV_LOG_INFO, "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d to rate:%d fmt:%s ch:%d\n",
1029  ist->file_index, ist->st->index,
1032  ost->resample_sample_fmt = dec->sample_fmt;
1033  ost->resample_channels = dec->channels;
1034  ost->resample_sample_rate = dec->sample_rate;
1035  if (ost->resample)
1037  }
1038  /* if audio_sync_method is >1 the resampler is needed for audio drift compensation */
1039  if (audio_sync_method <= 1 &&
1040  ost->resample_sample_fmt == enc->sample_fmt &&
1041  ost->resample_channels == enc->channels &&
1042  ost->resample_sample_rate == enc->sample_rate) {
1043  ost->resample = NULL;
1044  ost->audio_resample = 0;
1045  } else if (ost->audio_resample) {
1046  if (dec->sample_fmt != AV_SAMPLE_FMT_S16)
1047  av_log(NULL, AV_LOG_WARNING, "Using s16 intermediate sample format for resampling\n");
1049  enc->sample_rate, dec->sample_rate,
1050  enc->sample_fmt, dec->sample_fmt,
1051  16, 10, 0, 0.8);
1052  if (!ost->resample) {
1053  av_log(NULL, AV_LOG_FATAL, "Can not resample %d channels @ %d Hz to %d channels @ %d Hz\n",
1054  dec->channels, dec->sample_rate,
1055  enc->channels, enc->sample_rate);
1056  exit_program(1);
1057  }
1058  }
1059  }
1060 
1061 #define MAKE_SFMT_PAIR(a,b) ((a)+AV_SAMPLE_FMT_NB*(b))
1062  if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt &&
1063  MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt) != ost->reformat_pair) {
1064  if (ost->reformat_ctx)
1067  dec->sample_fmt, 1, NULL, 0);
1068  if (!ost->reformat_ctx) {
1069  av_log(NULL, AV_LOG_FATAL, "Cannot convert %s sample format to %s sample format\n",
1072  exit_program(1);
1073  }
1075  }
1076 
1077  if (audio_sync_method) {
1078  double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts -
1079  av_fifo_size(ost->fifo) / (enc->channels * osize);
1080  int idelta = delta * dec->sample_rate / enc->sample_rate;
1081  int byte_delta = idelta * isize * dec->channels;
1082 
1083  // FIXME resample delay
1084  if (fabs(delta) > 50) {
1085  if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) {
1086  if (byte_delta < 0) {
1087  byte_delta = FFMAX(byte_delta, -size);
1088  size += byte_delta;
1089  buf -= byte_delta;
1090  av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n",
1091  -byte_delta / (isize * dec->channels));
1092  if (!size)
1093  return;
1094  ist->is_start = 0;
1095  } else {
1096  static uint8_t *input_tmp = NULL;
1097  input_tmp = av_realloc(input_tmp, byte_delta + size);
1098 
1099  if (byte_delta > allocated_for_size - size) {
1100  allocated_for_size = byte_delta + (int64_t)size;
1101  goto need_realloc;
1102  }
1103  ist->is_start = 0;
1104 
1105  generate_silence(input_tmp, dec->sample_fmt, byte_delta);
1106  memcpy(input_tmp + byte_delta, buf, size);
1107  buf = input_tmp;
1108  size += byte_delta;
1109  av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta);
1110  }
1111  } else if (audio_sync_method > 1) {
1112  int comp = av_clip(delta, -audio_sync_method, audio_sync_method);
1113  av_assert0(ost->audio_resample);
1114  av_log(NULL, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n",
1115  delta, comp, enc->sample_rate);
1116 // fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2));
1117  av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
1118  }
1119  }
1120  } else
1121  ost->sync_opts = lrintf(get_sync_ipts(ost) * enc->sample_rate) -
1122  av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong
1123 
1124  if (ost->audio_resample) {
1125  buftmp = audio_buf;
1126  size_out = audio_resample(ost->resample,
1127  (short *)buftmp, (short *)buf,
1128  size / (dec->channels * isize));
1129  size_out = size_out * enc->channels * osize;
1130  } else {
1131  buftmp = buf;
1132  size_out = size;
1133  }
1134 
1135  if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt) {
1136  const void *ibuf[6] = { buftmp };
1137  void *obuf[6] = { audio_buf };
1138  int istride[6] = { isize };
1139  int ostride[6] = { osize };
1140  int len = size_out / istride[0];
1141  if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len) < 0) {
1142  printf("av_audio_convert() failed\n");
1143  if (exit_on_error)
1144  exit_program(1);
1145  return;
1146  }
1147  buftmp = audio_buf;
1148  size_out = len * osize;
1149  }
1150 
1151  /* now encode as many frames as possible */
1153  /* output resampled raw samples */
1154  if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
1155  av_log(NULL, AV_LOG_FATAL, "av_fifo_realloc2() failed\n");
1156  exit_program(1);
1157  }
1158  av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
1159 
1160  frame_bytes = enc->frame_size * osize * enc->channels;
1161 
1162  while (av_fifo_size(ost->fifo) >= frame_bytes) {
1163  av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
1164  encode_audio_frame(s, ost, audio_buf, frame_bytes);
1165  }
1166  } else {
1167  encode_audio_frame(s, ost, buftmp, size_out);
1168  }
1169 }
1170 
1171 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1172 {
1173  AVCodecContext *dec;
1174  AVPicture *picture2;
1175  AVPicture picture_tmp;
1176  uint8_t *buf = 0;
1177 
1178  dec = ist->st->codec;
1179 
1180  /* deinterlace : must be done before any resize */
1181  if (do_deinterlace) {
1182  int size;
1183 
1184  /* create temporary picture */
1185  size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1186  buf = av_malloc(size);
1187  if (!buf)
1188  return;
1189 
1190  picture2 = &picture_tmp;
1191  avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1192 
1193  if (avpicture_deinterlace(picture2, picture,
1194  dec->pix_fmt, dec->width, dec->height) < 0) {
1195  /* if error, do not deinterlace */
1196  av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
1197  av_free(buf);
1198  buf = NULL;
1199  picture2 = picture;
1200  }
1201  } else {
1202  picture2 = picture;
1203  }
1204 
1205  if (picture != picture2)
1206  *picture = *picture2;
1207  *bufp = buf;
1208 }
1209 
1211  OutputStream *ost,
1212  InputStream *ist,
1213  AVSubtitle *sub,
1214  int64_t pts)
1215 {
1216  static uint8_t *subtitle_out = NULL;
1217  int subtitle_out_max_size = 1024 * 1024;
1218  int subtitle_out_size, nb, i;
1219  AVCodecContext *enc;
1220  AVPacket pkt;
1221 
1222  if (pts == AV_NOPTS_VALUE) {
1223  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1224  if (exit_on_error)
1225  exit_program(1);
1226  return;
1227  }
1228 
1229  enc = ost->st->codec;
1230 
1231  if (!subtitle_out) {
1232  subtitle_out = av_malloc(subtitle_out_max_size);
1233  }
1234 
1235  /* Note: DVB subtitle need one packet to draw them and one other
1236  packet to clear them */
1237  /* XXX: signal it in the codec context ? */
1238  if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1239  nb = 2;
1240  else
1241  nb = 1;
1242 
1243  for (i = 0; i < nb; i++) {
1244  sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
1245  // start_display_time is required to be 0
1246  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1247  sub->end_display_time -= sub->start_display_time;
1248  sub->start_display_time = 0;
1249  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1250  subtitle_out_max_size, sub);
1251  if (subtitle_out_size < 0) {
1252  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1253  exit_program(1);
1254  }
1255 
1256  av_init_packet(&pkt);
1257  pkt.stream_index = ost->index;
1258  pkt.data = subtitle_out;
1259  pkt.size = subtitle_out_size;
1260  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1261  if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1262  /* XXX: the pts correction is handled here. Maybe handling
1263  it in the codec would be better */
1264  if (i == 0)
1265  pkt.pts += 90 * sub->start_display_time;
1266  else
1267  pkt.pts += 90 * sub->end_display_time;
1268  }
1269  write_frame(s, &pkt, ost);
1270  }
1271 }
1272 
1273 static int bit_buffer_size = 1024 * 256;
1274 static uint8_t *bit_buffer = NULL;
1275 
1276 #if !CONFIG_AVFILTER
1278  InputStream *ist,
1279  AVFrame *in_picture,
1280  AVFrame **out_picture)
1281 {
1282  int resample_changed = 0;
1283  *out_picture = in_picture;
1284 
1285  resample_changed = ost->resample_width != in_picture->width ||
1286  ost->resample_height != in_picture->height ||
1287  ost->resample_pix_fmt != in_picture->format;
1288 
1289  if (resample_changed) {
1291  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1292  ist->file_index, ist->st->index,
1294  in_picture->width, in_picture->height, av_get_pix_fmt_name(in_picture->format));
1295  if (!ost->video_resample)
1296  ost->video_resample = 1;
1297  }
1298 
1299  if (ost->video_resample) {
1300  *out_picture = &ost->pict_tmp;
1301  if (resample_changed) {
1302  /* initialize a new scaler context */
1305  ist->st->codec->width,
1306  ist->st->codec->height,
1307  ist->st->codec->pix_fmt,
1308  ost->st->codec->width,
1309  ost->st->codec->height,
1310  ost->st->codec->pix_fmt,
1311  ost->sws_flags, NULL, NULL, NULL);
1312  if (ost->img_resample_ctx == NULL) {
1313  av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n");
1314  exit_program(1);
1315  }
1316  }
1317  sws_scale(ost->img_resample_ctx, in_picture->data, in_picture->linesize,
1318  0, ost->resample_height, (*out_picture)->data, (*out_picture)->linesize);
1319  }
1320  if (resample_changed) {
1321  ost->resample_width = in_picture->width;
1322  ost->resample_height = in_picture->height;
1323  ost->resample_pix_fmt = in_picture->format;
1324  }
1325 }
1326 #endif
1327 
1328 
1330  OutputStream *ost,
1331  InputStream *ist,
1332  AVFrame *in_picture,
1333  int *frame_size, float quality)
1334 {
1335  int nb_frames, i, ret, format_video_sync;
1336  AVFrame *final_picture;
1337  AVCodecContext *enc;
1338  double sync_ipts;
1339 
1340  enc = ost->st->codec;
1341 
1342  sync_ipts = get_sync_ipts(ost) / av_q2d(enc->time_base);
1343 
1344  /* by default, we output a single frame */
1345  nb_frames = 1;
1346 
1347  *frame_size = 0;
1348 
1349  format_video_sync = video_sync_method;
1350  if (format_video_sync == VSYNC_AUTO)
1351  format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
1353 
1354  if (format_video_sync != VSYNC_PASSTHROUGH) {
1355  double vdelta = sync_ipts - ost->sync_opts;
1356  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1357  if (vdelta < -1.1)
1358  nb_frames = 0;
1359  else if (format_video_sync == VSYNC_VFR) {
1360  if (vdelta <= -0.6) {
1361  nb_frames = 0;
1362  } else if (vdelta > 0.6)
1363  ost->sync_opts = lrintf(sync_ipts);
1364  } else if (vdelta > 1.1)
1365  nb_frames = lrintf(vdelta);
1366 //fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, get_sync_ipts(ost), nb_frames);
1367  if (nb_frames == 0) {
1368  ++nb_frames_drop;
1369  av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
1370  } else if (nb_frames > 1) {
1371  nb_frames_dup += nb_frames - 1;
1372  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1373  }
1374  } else
1375  ost->sync_opts = lrintf(sync_ipts);
1376 
1377  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1378  if (nb_frames <= 0)
1379  return;
1380 
1381 #if !CONFIG_AVFILTER
1382  do_video_resample(ost, ist, in_picture, &final_picture);
1383 #else
1384  final_picture = in_picture;
1385 #endif
1386 
1387  /* duplicates frame if needed */
1388  for (i = 0; i < nb_frames; i++) {
1389  AVPacket pkt;
1390  av_init_packet(&pkt);
1391  pkt.stream_index = ost->index;
1392 
1393  if (s->oformat->flags & AVFMT_RAWPICTURE &&
1394  enc->codec->id == CODEC_ID_RAWVIDEO) {
1395  /* raw pictures are written as AVPicture structure to
1396  avoid any copies. We support temporarily the older
1397  method. */
1398  enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
1399  enc->coded_frame->top_field_first = in_picture->top_field_first;
1400  pkt.data = (uint8_t *)final_picture;
1401  pkt.size = sizeof(AVPicture);
1402  pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
1403  pkt.flags |= AV_PKT_FLAG_KEY;
1404 
1405  write_frame(s, &pkt, ost);
1406  } else {
1407  AVFrame big_picture;
1408 
1409  big_picture = *final_picture;
1410  /* better than nothing: use input picture interlaced
1411  settings */
1412  big_picture.interlaced_frame = in_picture->interlaced_frame;
1414  if (ost->top_field_first == -1)
1415  big_picture.top_field_first = in_picture->top_field_first;
1416  else
1417  big_picture.top_field_first = !!ost->top_field_first;
1418  }
1419 
1420  /* handles same_quant here. This is not correct because it may
1421  not be a global option */
1422  big_picture.quality = quality;
1423  if (!enc->me_threshold)
1424  big_picture.pict_type = 0;
1425 // big_picture.pts = AV_NOPTS_VALUE;
1426  big_picture.pts = ost->sync_opts;
1427 // big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);
1428 // av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
1429  if (ost->forced_kf_index < ost->forced_kf_count &&
1430  big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1431  big_picture.pict_type = AV_PICTURE_TYPE_I;
1432  ost->forced_kf_index++;
1433  }
1434  ret = avcodec_encode_video(enc,
1435  bit_buffer, bit_buffer_size,
1436  &big_picture);
1437  if (ret < 0) {
1438  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1439  exit_program(1);
1440  }
1441 
1442  if (ret > 0) {
1443  pkt.data = bit_buffer;
1444  pkt.size = ret;
1445  if (enc->coded_frame->pts != AV_NOPTS_VALUE)
1446  pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1447 /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n",
1448  pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,
1449  pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/
1450 
1451  if (enc->coded_frame->key_frame)
1452  pkt.flags |= AV_PKT_FLAG_KEY;
1453  write_frame(s, &pkt, ost);
1454  *frame_size = ret;
1455  video_size += ret;
1456  // fprintf(stderr,"\nFrame: %3d size: %5d type: %d",
1457  // enc->frame_number-1, ret, enc->pict_type);
1458  /* if two pass, output log */
1459  if (ost->logfile && enc->stats_out) {
1460  fprintf(ost->logfile, "%s", enc->stats_out);
1461  }
1462  }
1463  }
1464  ost->sync_opts++;
1465  /*
1466  * For video, number of frames in == number of packets out.
1467  * But there may be reordering, so we can't throw away frames on encoder
1468  * flush, we need to limit them here, before they go into encoder.
1469  */
1470  ost->frame_number++;
1471  }
1472 }
1473 
1474 static double psnr(double d)
1475 {
1476  return -10.0 * log(d) / log(10.0);
1477 }
1478 
1480  int frame_size)
1481 {
1482  AVCodecContext *enc;
1483  int frame_number;
1484  double ti1, bitrate, avg_bitrate;
1485 
1486  /* this is executed just the first time do_video_stats is called */
1487  if (!vstats_file) {
1488  vstats_file = fopen(vstats_filename, "w");
1489  if (!vstats_file) {
1490  perror("fopen");
1491  exit_program(1);
1492  }
1493  }
1494 
1495  enc = ost->st->codec;
1496  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1497  frame_number = ost->frame_number;
1498  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
1499  if (enc->flags&CODEC_FLAG_PSNR)
1500  fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1501 
1502  fprintf(vstats_file,"f_size= %6d ", frame_size);
1503  /* compute pts value */
1504  ti1 = ost->sync_opts * av_q2d(enc->time_base);
1505  if (ti1 < 0.01)
1506  ti1 = 0.01;
1507 
1508  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1509  avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1510  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1511  (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1513  }
1514 }
1515 
1516 static void print_report(OutputFile *output_files,
1517  OutputStream *ost_table, int nb_ostreams,
1518  int is_last_report, int64_t timer_start)
1519 {
1520  char buf[1024];
1521  OutputStream *ost;
1522  AVFormatContext *oc;
1523  int64_t total_size;
1524  AVCodecContext *enc;
1525  int frame_number, vid, i;
1526  double bitrate, ti1, pts;
1527  static int64_t last_time = -1;
1528  static int qp_histogram[52];
1529 
1530  if (!print_stats && !is_last_report)
1531  return;
1532 
1533  if (!is_last_report) {
1534  int64_t cur_time;
1535  /* display the report every 0.5 seconds */
1536  cur_time = av_gettime();
1537  if (last_time == -1) {
1538  last_time = cur_time;
1539  return;
1540  }
1541  if ((cur_time - last_time) < 500000)
1542  return;
1543  last_time = cur_time;
1544  }
1545 
1546 
1547  oc = output_files[0].ctx;
1548 
1549  total_size = avio_size(oc->pb);
1550  if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
1551  total_size = avio_tell(oc->pb);
1552 
1553  buf[0] = '\0';
1554  ti1 = 1e10;
1555  vid = 0;
1556  for (i = 0; i < nb_ostreams; i++) {
1557  float q = -1;
1558  ost = &ost_table[i];
1559  enc = ost->st->codec;
1560  if (!ost->stream_copy && enc->coded_frame)
1561  q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1562  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1563  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1564  }
1565  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1566  float t = (av_gettime() - timer_start) / 1000000.0;
1567 
1568  frame_number = ost->frame_number;
1569  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1570  frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
1571  if (is_last_report)
1572  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1573  if (qp_hist) {
1574  int j;
1575  int qp = lrintf(q);
1576  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1577  qp_histogram[qp]++;
1578  for (j = 0; j < 32; j++)
1579  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
1580  }
1581  if (enc->flags&CODEC_FLAG_PSNR) {
1582  int j;
1583  double error, error_sum = 0;
1584  double scale, scale_sum = 0;
1585  char type[3] = { 'Y','U','V' };
1586  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1587  for (j = 0; j < 3; j++) {
1588  if (is_last_report) {
1589  error = enc->error[j];
1590  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1591  } else {
1592  error = enc->coded_frame->error[j];
1593  scale = enc->width * enc->height * 255.0 * 255.0;
1594  }
1595  if (j)
1596  scale /= 4;
1597  error_sum += error;
1598  scale_sum += scale;
1599  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
1600  }
1601  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1602  }
1603  vid = 1;
1604  }
1605  /* compute min output value */
1606  pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1607  if ((pts < ti1) && (pts > 0))
1608  ti1 = pts;
1609  }
1610  if (ti1 < 0.01)
1611  ti1 = 0.01;
1612 
1613  bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1614 
1615  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1616  "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1617  (double)total_size / 1024, ti1, bitrate);
1618 
1620  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1622 
1623  av_log(NULL, AV_LOG_INFO, "%s \r", buf);
1624 
1625  fflush(stderr);
1626 
1627  if (is_last_report) {
1628  int64_t raw= audio_size + video_size + extra_size;
1629  av_log(NULL, AV_LOG_INFO, "\n");
1630  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1631  video_size / 1024.0,
1632  audio_size / 1024.0,
1633  extra_size / 1024.0,
1634  100.0 * (total_size - raw) / raw
1635  );
1636  }
1637 }
1638 
1639 static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
1640 {
1641  int i, ret;
1642 
1643  for (i = 0; i < nb_ostreams; i++) {
1644  OutputStream *ost = &ost_table[i];
1645  AVCodecContext *enc = ost->st->codec;
1646  AVFormatContext *os = output_files[ost->file_index].ctx;
1647  int stop_encoding = 0;
1648 
1649  if (!ost->encoding_needed)
1650  continue;
1651 
1652  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1653  continue;
1655  continue;
1656 
1657  for (;;) {
1658  AVPacket pkt;
1659  int fifo_bytes;
1660  av_init_packet(&pkt);
1661  pkt.data = NULL;
1662  pkt.size = 0;
1663 
1664  switch (ost->st->codec->codec_type) {
1665  case AVMEDIA_TYPE_AUDIO:
1666  fifo_bytes = av_fifo_size(ost->fifo);
1667  if (fifo_bytes > 0) {
1668  /* encode any samples remaining in fifo */
1669  int frame_bytes = fifo_bytes;
1670 
1671  av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
1672 
1673  /* pad last frame with silence if needed */
1675  frame_bytes = enc->frame_size * enc->channels *
1677  if (allocated_audio_buf_size < frame_bytes)
1678  exit_program(1);
1679  generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
1680  }
1681  encode_audio_frame(os, ost, audio_buf, frame_bytes);
1682  } else {
1683  /* flush encoder with NULL frames until it is done
1684  returning packets */
1685  if (encode_audio_frame(os, ost, NULL, 0) == 0) {
1686  stop_encoding = 1;
1687  break;
1688  }
1689  }
1690  break;
1691  case AVMEDIA_TYPE_VIDEO:
1692  ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
1693  if (ret < 0) {
1694  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1695  exit_program(1);
1696  }
1697  video_size += ret;
1698  if (enc->coded_frame && enc->coded_frame->key_frame)
1699  pkt.flags |= AV_PKT_FLAG_KEY;
1700  if (ost->logfile && enc->stats_out) {
1701  fprintf(ost->logfile, "%s", enc->stats_out);
1702  }
1703  if (ret <= 0) {
1704  stop_encoding = 1;
1705  break;
1706  }
1707  pkt.stream_index = ost->index;
1708  pkt.data = bit_buffer;
1709  pkt.size = ret;
1710  if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1711  pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1712  write_frame(os, &pkt, ost);
1713  break;
1714  default:
1715  stop_encoding = 1;
1716  }
1717  if (stop_encoding)
1718  break;
1719  }
1720  }
1721 }
1722 
1723 /*
1724  * Check whether a packet from ist should be written into ost at this time
1725  */
1727 {
1728  OutputFile *of = &output_files[ost->file_index];
1729  int ist_index = ist - input_streams;
1730 
1731  if (ost->source_index != ist_index)
1732  return 0;
1733 
1734  if (of->start_time && ist->pts < of->start_time)
1735  return 0;
1736 
1737  if (of->recording_time != INT64_MAX &&
1739  (AVRational){ 1, 1000000 }) >= 0) {
1740  ost->is_past_recording_time = 1;
1741  return 0;
1742  }
1743 
1744  return 1;
1745 }
1746 
1747 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1748 {
1749  OutputFile *of = &output_files[ost->file_index];
1750  int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
1751  AVPacket opkt;
1752 
1753  av_init_packet(&opkt);
1754 
1755  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1757  return;
1758 
1759  /* force the input stream PTS */
1760  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1761  audio_size += pkt->size;
1762  else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1763  video_size += pkt->size;
1764  ost->sync_opts++;
1765  }
1766 
1767  opkt.stream_index = ost->index;
1768  if (pkt->pts != AV_NOPTS_VALUE)
1769  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1770  else
1771  opkt.pts = AV_NOPTS_VALUE;
1772 
1773  if (pkt->dts == AV_NOPTS_VALUE)
1774  opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
1775  else
1776  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1777  opkt.dts -= ost_tb_start_time;
1778 
1779  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1780  opkt.flags = pkt->flags;
1781 
1782  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1783  if ( ost->st->codec->codec_id != CODEC_ID_H264
1784  && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
1785  && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
1786  ) {
1787  if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
1789  } else {
1790  opkt.data = pkt->data;
1791  opkt.size = pkt->size;
1792  }
1793 
1794  write_frame(of->ctx, &opkt, ost);
1795  ost->st->codec->frame_number++;
1796  av_free_packet(&opkt);
1797 }
1798 
1799 static void rate_emu_sleep(InputStream *ist)
1800 {
1801  if (input_files[ist->file_index].rate_emu) {
1802  int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
1803  int64_t now = av_gettime() - ist->start;
1804  if (pts > now)
1805  usleep(pts - now);
1806  }
1807 }
1808 
1809 static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1810 {
1811  AVFrame *decoded_frame;
1812  AVCodecContext *avctx = ist->st->codec;
1814  int i, ret;
1815 
1816  if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1817  return AVERROR(ENOMEM);
1818  else
1820  decoded_frame = ist->decoded_frame;
1821 
1822  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1823  if (ret < 0) {
1824  return ret;
1825  }
1826 
1827  if (!*got_output) {
1828  /* no audio frame */
1829  return ret;
1830  }
1831 
1832  /* if the decoder provides a pts, use it instead of the last packet pts.
1833  the decoder could be delaying output by a packet or more. */
1834  if (decoded_frame->pts != AV_NOPTS_VALUE)
1835  ist->next_pts = decoded_frame->pts;
1836 
1837  /* increment next_pts to use for the case where the input stream does not
1838  have timestamps or there are multiple frames in the packet */
1839  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1840  avctx->sample_rate;
1841 
1842  // preprocess audio (volume)
1843  if (audio_volume != 256) {
1844  int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
1845  void *samples = decoded_frame->data[0];
1846  switch (avctx->sample_fmt) {
1847  case AV_SAMPLE_FMT_U8:
1848  {
1849  uint8_t *volp = samples;
1850  for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1851  int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
1852  *volp++ = av_clip_uint8(v);
1853  }
1854  break;
1855  }
1856  case AV_SAMPLE_FMT_S16:
1857  {
1858  int16_t *volp = samples;
1859  for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1860  int v = ((*volp) * audio_volume + 128) >> 8;
1861  *volp++ = av_clip_int16(v);
1862  }
1863  break;
1864  }
1865  case AV_SAMPLE_FMT_S32:
1866  {
1867  int32_t *volp = samples;
1868  for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1869  int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
1870  *volp++ = av_clipl_int32(v);
1871  }
1872  break;
1873  }
1874  case AV_SAMPLE_FMT_FLT:
1875  {
1876  float *volp = samples;
1877  float scale = audio_volume / 256.f;
1878  for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1879  *volp++ *= scale;
1880  }
1881  break;
1882  }
1883  case AV_SAMPLE_FMT_DBL:
1884  {
1885  double *volp = samples;
1886  double scale = audio_volume / 256.;
1887  for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1888  *volp++ *= scale;
1889  }
1890  break;
1891  }
1892  default:
1894  "Audio volume adjustment on sample format %s is not supported.\n",
1896  exit_program(1);
1897  }
1898  }
1899 
1900  rate_emu_sleep(ist);
1901 
1902  for (i = 0; i < nb_output_streams; i++) {
1903  OutputStream *ost = &output_streams[i];
1904 
1905  if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1906  continue;
1907  do_audio_out(output_files[ost->file_index].ctx, ost, ist, decoded_frame);
1908  }
1909 
1910  return ret;
1911 }
1912 
1913 static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts)
1914 {
1915  AVFrame *decoded_frame, *filtered_frame = NULL;
1916  void *buffer_to_free = NULL;
1917  int i, ret = 0;
1918  float quality;
1919 #if CONFIG_AVFILTER
1920  int frame_available = 1;
1921 #endif
1922 
1923  if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1924  return AVERROR(ENOMEM);
1925  else
1927  decoded_frame = ist->decoded_frame;
1928  pkt->pts = *pkt_pts;
1929  pkt->dts = ist->pts;
1930  *pkt_pts = AV_NOPTS_VALUE;
1931 
1932  ret = avcodec_decode_video2(ist->st->codec,
1933  decoded_frame, got_output, pkt);
1934  if (ret < 0)
1935  return ret;
1936 
1937  quality = same_quant ? decoded_frame->quality : 0;
1938  if (!*got_output) {
1939  /* no picture yet */
1940  return ret;
1941  }
1942  ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
1943  decoded_frame->pkt_dts);
1944  if (pkt->duration)
1945  ist->next_pts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
1946  else if (ist->st->codec->time_base.num != 0) {
1947  int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1948  ist->st->codec->ticks_per_frame;
1949  ist->next_pts += ((int64_t)AV_TIME_BASE *
1950  ist->st->codec->time_base.num * ticks) /
1951  ist->st->codec->time_base.den;
1952  }
1953  pkt->size = 0;
1954  pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
1955 
1956  rate_emu_sleep(ist);
1957 
1958  for (i = 0; i < nb_output_streams; i++) {
1959  OutputStream *ost = &output_streams[i];
1960  int frame_size, resample_changed;
1961 
1962  if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1963  continue;
1964 
1965 #if CONFIG_AVFILTER
1966  resample_changed = ost->resample_width != decoded_frame->width ||
1967  ost->resample_height != decoded_frame->height ||
1968  ost->resample_pix_fmt != decoded_frame->format;
1969  if (resample_changed) {
1971  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1972  ist->file_index, ist->st->index,
1974  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1975 
1976  avfilter_graph_free(&ost->graph);
1977  if (configure_video_filters(ist, ost)) {
1978  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1979  exit_program(1);
1980  }
1981 
1982  ost->resample_width = decoded_frame->width;
1983  ost->resample_height = decoded_frame->height;
1984  ost->resample_pix_fmt = decoded_frame->format;
1985  }
1986 
1987  if (ist->st->sample_aspect_ratio.num)
1988  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1989  if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
1990  FrameBuffer *buf = decoded_frame->opaque;
1992  decoded_frame->data, decoded_frame->linesize,
1994  ist->st->codec->width, ist->st->codec->height,
1995  ist->st->codec->pix_fmt);
1996 
1997  avfilter_copy_frame_props(fb, decoded_frame);
1998  fb->pts = ist->pts;
1999  fb->buf->priv = buf;
2001 
2002  buf->refcount++;
2003  av_buffersrc_buffer(ost->input_video_filter, fb);
2004  } else
2005  av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame,
2006  ist->pts, decoded_frame->sample_aspect_ratio);
2007 
2008  if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
2009  av_free(buffer_to_free);
2010  return AVERROR(ENOMEM);
2011  } else
2013  filtered_frame = ist->filtered_frame;
2014 
2015  frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
2016  while (frame_available) {
2017  AVRational ist_pts_tb;
2018  if (ost->output_video_filter)
2019  get_filtered_video_frame(ost->output_video_filter, filtered_frame, &ost->picref, &ist_pts_tb);
2020  if (ost->picref)
2021  ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
2022  if (ost->picref->video && !ost->frame_aspect_ratio)
2023  ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
2024 #else
2025  filtered_frame = decoded_frame;
2026 #endif
2027 
2028  do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame, &frame_size,
2029  same_quant ? quality : ost->st->codec->global_quality);
2030  if (vstats_filename && frame_size)
2031  do_video_stats(output_files[ost->file_index].ctx, ost, frame_size);
2032 #if CONFIG_AVFILTER
2033  frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
2034  if (ost->picref)
2035  avfilter_unref_buffer(ost->picref);
2036  }
2037 #endif
2038  }
2039 
2040  av_free(buffer_to_free);
2041  return ret;
2042 }
2043 
2044 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2045 {
2046  AVSubtitle subtitle;
2047  int i, ret = avcodec_decode_subtitle2(ist->st->codec,
2048  &subtitle, got_output, pkt);
2049  if (ret < 0)
2050  return ret;
2051  if (!*got_output)
2052  return ret;
2053 
2054  rate_emu_sleep(ist);
2055 
2056  for (i = 0; i < nb_output_streams; i++) {
2057  OutputStream *ost = &output_streams[i];
2058 
2059  if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2060  continue;
2061 
2062  do_subtitle_out(output_files[ost->file_index].ctx, ost, ist, &subtitle, pkt->pts);
2063  }
2064 
2065  avsubtitle_free(&subtitle);
2066  return ret;
2067 }
2068 
2069 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2070 static int output_packet(InputStream *ist,
2071  OutputStream *ost_table, int nb_ostreams,
2072  const AVPacket *pkt)
2073 {
2074  int i;
2075  int got_output;
2076  int64_t pkt_pts = AV_NOPTS_VALUE;
2077  AVPacket avpkt;
2078 
2079  if (ist->next_pts == AV_NOPTS_VALUE)
2080  ist->next_pts = ist->pts;
2081 
2082  if (pkt == NULL) {
2083  /* EOF handling */
2084  av_init_packet(&avpkt);
2085  avpkt.data = NULL;
2086  avpkt.size = 0;
2087  goto handle_eof;
2088  } else {
2089  avpkt = *pkt;
2090  }
2091 
2092  if (pkt->dts != AV_NOPTS_VALUE)
2093  ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2094  if (pkt->pts != AV_NOPTS_VALUE)
2095  pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2096 
2097  // while we have more to decode or while the decoder did output something on EOF
2098  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2099  int ret = 0;
2100  handle_eof:
2101 
2102  ist->pts = ist->next_pts;
2103 
2104  if (avpkt.size && avpkt.size != pkt->size) {
2106  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2107  ist->showed_multi_packet_warning = 1;
2108  }
2109 
2110  switch (ist->st->codec->codec_type) {
2111  case AVMEDIA_TYPE_AUDIO:
2112  ret = transcode_audio (ist, &avpkt, &got_output);
2113  break;
2114  case AVMEDIA_TYPE_VIDEO:
2115  ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts);
2116  break;
2117  case AVMEDIA_TYPE_SUBTITLE:
2118  ret = transcode_subtitles(ist, &avpkt, &got_output);
2119  break;
2120  default:
2121  return -1;
2122  }
2123 
2124  if (ret < 0)
2125  return ret;
2126  // touch data and size only if not EOF
2127  if (pkt) {
2128  avpkt.data += ret;
2129  avpkt.size -= ret;
2130  }
2131  if (!got_output) {
2132  continue;
2133  }
2134  }
2135 
2136  /* handle stream copy */
2137  if (!ist->decoding_needed) {
2138  rate_emu_sleep(ist);
2139  ist->pts = ist->next_pts;
2140  switch (ist->st->codec->codec_type) {
2141  case AVMEDIA_TYPE_AUDIO:
2142  ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
2143  ist->st->codec->sample_rate;
2144  break;
2145  case AVMEDIA_TYPE_VIDEO:
2146  if (ist->st->codec->time_base.num != 0) {
2147  int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
2148  ist->next_pts += ((int64_t)AV_TIME_BASE *
2149  ist->st->codec->time_base.num * ticks) /
2150  ist->st->codec->time_base.den;
2151  }
2152  break;
2153  }
2154  }
2155  for (i = 0; pkt && i < nb_ostreams; i++) {
2156  OutputStream *ost = &ost_table[i];
2157 
2158  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2159  continue;
2160 
2161  do_streamcopy(ist, ost, pkt);
2162  }
2163 
2164  return 0;
2165 }
2166 
2167 static void print_sdp(OutputFile *output_files, int n)
2168 {
2169  char sdp[2048];
2170  int i;
2171  AVFormatContext **avc = av_malloc(sizeof(*avc) * n);
2172 
2173  if (!avc)
2174  exit_program(1);
2175  for (i = 0; i < n; i++)
2176  avc[i] = output_files[i].ctx;
2177 
2178  av_sdp_create(avc, n, sdp, sizeof(sdp));
2179  printf("SDP:\n%s\n", sdp);
2180  fflush(stdout);
2181  av_freep(&avc);
2182 }
2183 
2184 static int init_input_stream(int ist_index, OutputStream *output_streams, int nb_output_streams,
2185  char *error, int error_len)
2186 {
2187  int i;
2188  InputStream *ist = &input_streams[ist_index];
2189  if (ist->decoding_needed) {
2190  AVCodec *codec = ist->dec;
2191  if (!codec) {
2192  snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
2193  ist->st->codec->codec_id, ist->file_index, ist->st->index);
2194  return AVERROR(EINVAL);
2195  }
2196 
2197  /* update requested sample format for the decoder based on the
2198  corresponding encoder sample format */
2199  for (i = 0; i < nb_output_streams; i++) {
2200  OutputStream *ost = &output_streams[i];
2201  if (ost->source_index == ist_index) {
2202  update_sample_fmt(ist->st->codec, codec, ost->st->codec);
2203  break;
2204  }
2205  }
2206 
2207  if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
2210  ist->st->codec->opaque = ist;
2211  }
2212 
2213  if (!av_dict_get(ist->opts, "threads", NULL, 0))
2214  av_dict_set(&ist->opts, "threads", "auto", 0);
2215  if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
2216  snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
2217  ist->file_index, ist->st->index);
2218  return AVERROR(EINVAL);
2219  }
2221  assert_avoptions(ist->opts);
2222  }
2223 
2224  ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2225  ist->next_pts = AV_NOPTS_VALUE;
2227  ist->is_start = 1;
2228 
2229  return 0;
2230 }
2231 
2232 static int transcode_init(OutputFile *output_files,
2233  int nb_output_files,
2234  InputFile *input_files,
2235  int nb_input_files)
2236 {
2237  int ret = 0, i, j, k;
2238  AVFormatContext *oc;
2239  AVCodecContext *codec, *icodec;
2240  OutputStream *ost;
2241  InputStream *ist;
2242  char error[1024];
2243  int want_sdp = 1;
2244 
2245  /* init framerate emulation */
2246  for (i = 0; i < nb_input_files; i++) {
2247  InputFile *ifile = &input_files[i];
2248  if (ifile->rate_emu)
2249  for (j = 0; j < ifile->nb_streams; j++)
2250  input_streams[j + ifile->ist_index].start = av_gettime();
2251  }
2252 
2253  /* output stream init */
2254  for (i = 0; i < nb_output_files; i++) {
2255  oc = output_files[i].ctx;
2256  if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2257  av_dump_format(oc, i, oc->filename, 1);
2258  av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2259  return AVERROR(EINVAL);
2260  }
2261  }
2262 
2263  /* for each output stream, we compute the right encoding parameters */
2264  for (i = 0; i < nb_output_streams; i++) {
2265  ost = &output_streams[i];
2266  oc = output_files[ost->file_index].ctx;
2267  ist = &input_streams[ost->source_index];
2268 
2269  if (ost->attachment_filename)
2270  continue;
2271 
2272  codec = ost->st->codec;
2273  icodec = ist->st->codec;
2274 
2275  ost->st->disposition = ist->st->disposition;
2276  codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2278 
2279  if (ost->stream_copy) {
2280  uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2281 
2282  if (extra_size > INT_MAX) {
2283  return AVERROR(EINVAL);
2284  }
2285 
2286  /* if stream_copy is selected, no need to decode or encode */
2287  codec->codec_id = icodec->codec_id;
2288  codec->codec_type = icodec->codec_type;
2289 
2290  if (!codec->codec_tag) {
2291  if (!oc->oformat->codec_tag ||
2292  av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2293  av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
2294  codec->codec_tag = icodec->codec_tag;
2295  }
2296 
2297  codec->bit_rate = icodec->bit_rate;
2298  codec->rc_max_rate = icodec->rc_max_rate;
2299  codec->rc_buffer_size = icodec->rc_buffer_size;
2300  codec->field_order = icodec->field_order;
2301  codec->extradata = av_mallocz(extra_size);
2302  if (!codec->extradata) {
2303  return AVERROR(ENOMEM);
2304  }
2305  memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2306  codec->extradata_size = icodec->extradata_size;
2307  if (!copy_tb) {
2308  codec->time_base = icodec->time_base;
2309  codec->time_base.num *= icodec->ticks_per_frame;
2310  av_reduce(&codec->time_base.num, &codec->time_base.den,
2311  codec->time_base.num, codec->time_base.den, INT_MAX);
2312  } else
2313  codec->time_base = ist->st->time_base;
2314 
2315  switch (codec->codec_type) {
2316  case AVMEDIA_TYPE_AUDIO:
2317  if (audio_volume != 256) {
2318  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2319  exit_program(1);
2320  }
2321  codec->channel_layout = icodec->channel_layout;
2322  codec->sample_rate = icodec->sample_rate;
2323  codec->channels = icodec->channels;
2324  codec->frame_size = icodec->frame_size;
2325  codec->audio_service_type = icodec->audio_service_type;
2326  codec->block_align = icodec->block_align;
2327  break;
2328  case AVMEDIA_TYPE_VIDEO:
2329  codec->pix_fmt = icodec->pix_fmt;
2330  codec->width = icodec->width;
2331  codec->height = icodec->height;
2332  codec->has_b_frames = icodec->has_b_frames;
2333  if (!codec->sample_aspect_ratio.num) {
2334  codec->sample_aspect_ratio =
2335  ost->st->sample_aspect_ratio =
2337  ist->st->codec->sample_aspect_ratio.num ?
2338  ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2339  }
2340  break;
2341  case AVMEDIA_TYPE_SUBTITLE:
2342  codec->width = icodec->width;
2343  codec->height = icodec->height;
2344  break;
2345  case AVMEDIA_TYPE_DATA:
2347  break;
2348  default:
2349  abort();
2350  }
2351  } else {
2352  if (!ost->enc)
2353  ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
2354 
2355  ist->decoding_needed = 1;
2356  ost->encoding_needed = 1;
2357 
2358  switch (codec->codec_type) {
2359  case AVMEDIA_TYPE_AUDIO:
2360  ost->fifo = av_fifo_alloc(1024);
2361  if (!ost->fifo) {
2362  return AVERROR(ENOMEM);
2363  }
2365 
2366  if (!codec->sample_rate)
2367  codec->sample_rate = icodec->sample_rate;
2368  choose_sample_rate(ost->st, ost->enc);
2369  codec->time_base = (AVRational){ 1, codec->sample_rate };
2370 
2371  if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
2372  codec->sample_fmt = icodec->sample_fmt;
2373  choose_sample_fmt(ost->st, ost->enc);
2374 
2375  if (!codec->channels)
2376  codec->channels = icodec->channels;
2377  codec->channel_layout = icodec->channel_layout;
2379  codec->channel_layout = 0;
2380 
2381  ost->audio_resample = codec-> sample_rate != icodec->sample_rate || audio_sync_method > 1;
2382  icodec->request_channels = codec-> channels;
2383  ost->resample_sample_fmt = icodec->sample_fmt;
2384  ost->resample_sample_rate = icodec->sample_rate;
2385  ost->resample_channels = icodec->channels;
2386  break;
2387  case AVMEDIA_TYPE_VIDEO:
2388  if (codec->pix_fmt == PIX_FMT_NONE)
2389  codec->pix_fmt = icodec->pix_fmt;
2390  choose_pixel_fmt(ost->st, ost->enc);
2391 
2392  if (ost->st->codec->pix_fmt == PIX_FMT_NONE) {
2393  av_log(NULL, AV_LOG_FATAL, "Video pixel format is unknown, stream cannot be encoded\n");
2394  exit_program(1);
2395  }
2396 
2397  if (!codec->width || !codec->height) {
2398  codec->width = icodec->width;
2399  codec->height = icodec->height;
2400  }
2401 
2402  ost->video_resample = codec->width != icodec->width ||
2403  codec->height != icodec->height ||
2404  codec->pix_fmt != icodec->pix_fmt;
2405  if (ost->video_resample) {
2406 #if !CONFIG_AVFILTER
2408  if (avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
2409  codec->width, codec->height)) {
2410  av_log(NULL, AV_LOG_FATAL, "Cannot allocate temp picture, check pix fmt\n");
2411  exit_program(1);
2412  }
2414  icodec->width,
2415  icodec->height,
2416  icodec->pix_fmt,
2417  codec->width,
2418  codec->height,
2419  codec->pix_fmt,
2420  ost->sws_flags, NULL, NULL, NULL);
2421  if (ost->img_resample_ctx == NULL) {
2422  av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n");
2423  exit_program(1);
2424  }
2425 #endif
2426  codec->bits_per_raw_sample = 0;
2427  }
2428 
2429  ost->resample_height = icodec->height;
2430  ost->resample_width = icodec->width;
2431  ost->resample_pix_fmt = icodec->pix_fmt;
2432 
2433  if (!ost->frame_rate.num)
2434  ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational) { 25, 1 };
2435  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2437  ost->frame_rate = ost->enc->supported_framerates[idx];
2438  }
2439  codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
2440 
2441 #if CONFIG_AVFILTER
2442  if (configure_video_filters(ist, ost)) {
2443  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2444  exit(1);
2445  }
2446 #endif
2447  break;
2448  case AVMEDIA_TYPE_SUBTITLE:
2449  break;
2450  default:
2451  abort();
2452  break;
2453  }
2454  /* two pass mode */
2455  if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2456  char logfilename[1024];
2457  FILE *f;
2458 
2459  snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2461  i);
2462  if (!strcmp(ost->enc->name, "libx264")) {
2463  av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2464  } else {
2465  if (codec->flags & CODEC_FLAG_PASS1) {
2466  f = fopen(logfilename, "wb");
2467  if (!f) {
2468  av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2469  logfilename, strerror(errno));
2470  exit_program(1);
2471  }
2472  ost->logfile = f;
2473  } else {
2474  char *logbuffer;
2475  size_t logbuffer_size;
2476  if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2477  av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2478  logfilename);
2479  exit_program(1);
2480  }
2481  codec->stats_in = logbuffer;
2482  }
2483  }
2484  }
2485  }
2486  if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2487  int size = codec->width * codec->height;
2488  bit_buffer_size = FFMAX(bit_buffer_size, 6 * size + 200);
2489  }
2490  }
2491 
2492  if (!bit_buffer)
2494  if (!bit_buffer) {
2495  av_log(NULL, AV_LOG_ERROR, "Cannot allocate %d bytes output buffer\n",
2496  bit_buffer_size);
2497  return AVERROR(ENOMEM);
2498  }
2499 
2500  /* open each encoder */
2501  for (i = 0; i < nb_output_streams; i++) {
2502  ost = &output_streams[i];
2503  if (ost->encoding_needed) {
2504  AVCodec *codec = ost->enc;
2505  AVCodecContext *dec = input_streams[ost->source_index].st->codec;
2506  if (!codec) {
2507  snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d:%d",
2508  ost->st->codec->codec_id, ost->file_index, ost->index);
2509  ret = AVERROR(EINVAL);
2510  goto dump_format;
2511  }
2512  if (dec->subtitle_header) {
2514  if (!ost->st->codec->subtitle_header) {
2515  ret = AVERROR(ENOMEM);
2516  goto dump_format;
2517  }
2518  memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2520  }
2521  if (!av_dict_get(ost->opts, "threads", NULL, 0))
2522  av_dict_set(&ost->opts, "threads", "auto", 0);
2523  if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
2524  snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2525  ost->file_index, ost->index);
2526  ret = AVERROR(EINVAL);
2527  goto dump_format;
2528  }
2530  assert_avoptions(ost->opts);
2531  if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2532  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2533  "It takes bits/s as argument, not kbits/s\n");
2534  extra_size += ost->st->codec->extradata_size;
2535 
2536  if (ost->st->codec->me_threshold)
2537  input_streams[ost->source_index].st->codec->debug |= FF_DEBUG_MV;
2538  }
2539  }
2540 
2541  /* init input streams */
2542  for (i = 0; i < nb_input_streams; i++)
2543  if ((ret = init_input_stream(i, output_streams, nb_output_streams, error, sizeof(error))) < 0)
2544  goto dump_format;
2545 
2546  /* discard unused programs */
2547  for (i = 0; i < nb_input_files; i++) {
2548  InputFile *ifile = &input_files[i];
2549  for (j = 0; j < ifile->ctx->nb_programs; j++) {
2550  AVProgram *p = ifile->ctx->programs[j];
2551  int discard = AVDISCARD_ALL;
2552 
2553  for (k = 0; k < p->nb_stream_indexes; k++)
2554  if (!input_streams[ifile->ist_index + p->stream_index[k]].discard) {
2555  discard = AVDISCARD_DEFAULT;
2556  break;
2557  }
2558  p->discard = discard;
2559  }
2560  }
2561 
2562  /* open files and write file headers */
2563  for (i = 0; i < nb_output_files; i++) {
2564  oc = output_files[i].ctx;
2565  oc->interrupt_callback = int_cb;
2566  if (avformat_write_header(oc, &output_files[i].opts) < 0) {
2567  snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
2568  ret = AVERROR(EINVAL);
2569  goto dump_format;
2570  }
2571  assert_avoptions(output_files[i].opts);
2572  if (strcmp(oc->oformat->name, "rtp")) {
2573  want_sdp = 0;
2574  }
2575  }
2576 
2577  dump_format:
2578  /* dump the file output parameters - cannot be done before in case
2579  of stream copy */
2580  for (i = 0; i < nb_output_files; i++) {
2581  av_dump_format(output_files[i].ctx, i, output_files[i].ctx->filename, 1);
2582  }
2583 
2584  /* dump the stream mapping */
2585  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2586  for (i = 0; i < nb_output_streams; i++) {
2587  ost = &output_streams[i];
2588 
2589  if (ost->attachment_filename) {
2590  /* an attached file */
2591  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2592  ost->attachment_filename, ost->file_index, ost->index);
2593  continue;
2594  }
2595  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2596  input_streams[ost->source_index].file_index,
2597  input_streams[ost->source_index].st->index,
2598  ost->file_index,
2599  ost->index);
2600  if (ost->sync_ist != &input_streams[ost->source_index])
2601  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2602  ost->sync_ist->file_index,
2603  ost->sync_ist->st->index);
2604  if (ost->stream_copy)
2605  av_log(NULL, AV_LOG_INFO, " (copy)");
2606  else
2607  av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index].dec ?
2608  input_streams[ost->source_index].dec->name : "?",
2609  ost->enc ? ost->enc->name : "?");
2610  av_log(NULL, AV_LOG_INFO, "\n");
2611  }
2612 
2613  if (ret) {
2614  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2615  return ret;
2616  }
2617 
2618  if (want_sdp) {
2619  print_sdp(output_files, nb_output_files);
2620  }
2621 
2622  return 0;
2623 }
2624 
2625 /*
2626  * The following code is the main loop of the file converter
2627  */
2628 static int transcode(OutputFile *output_files,
2629  int nb_output_files,
2630  InputFile *input_files,
2631  int nb_input_files)
2632 {
2633  int ret, i;
2634  AVFormatContext *is, *os;
2635  OutputStream *ost;
2636  InputStream *ist;
2637  uint8_t *no_packet;
2638  int no_packet_count = 0;
2639  int64_t timer_start;
2640 
2641  if (!(no_packet = av_mallocz(nb_input_files)))
2642  exit_program(1);
2643 
2644  ret = transcode_init(output_files, nb_output_files, input_files, nb_input_files);
2645  if (ret < 0)
2646  goto fail;
2647 
2648  av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2649  term_init();
2650 
2651  timer_start = av_gettime();
2652 
2653  for (; received_sigterm == 0;) {
2654  int file_index, ist_index;
2655  AVPacket pkt;
2656  int64_t ipts_min;
2657  double opts_min;
2658 
2659  ipts_min = INT64_MAX;
2660  opts_min = 1e100;
2661 
2662  /* select the stream that we must read now by looking at the
2663  smallest output pts */
2664  file_index = -1;
2665  for (i = 0; i < nb_output_streams; i++) {
2666  OutputFile *of;
2667  int64_t ipts;
2668  double opts;
2669  ost = &output_streams[i];
2670  of = &output_files[ost->file_index];
2671  os = output_files[ost->file_index].ctx;
2672  ist = &input_streams[ost->source_index];
2673  if (ost->is_past_recording_time || no_packet[ist->file_index] ||
2674  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2675  continue;
2676  opts = ost->st->pts.val * av_q2d(ost->st->time_base);
2677  ipts = ist->pts;
2678  if (!input_files[ist->file_index].eof_reached) {
2679  if (ipts < ipts_min) {
2680  ipts_min = ipts;
2681  if (input_sync)
2682  file_index = ist->file_index;
2683  }
2684  if (opts < opts_min) {
2685  opts_min = opts;
2686  if (!input_sync) file_index = ist->file_index;
2687  }
2688  }
2689  if (ost->frame_number >= ost->max_frames) {
2690  int j;
2691  for (j = 0; j < of->ctx->nb_streams; j++)
2692  output_streams[of->ost_index + j].is_past_recording_time = 1;
2693  continue;
2694  }
2695  }
2696  /* if none, if is finished */
2697  if (file_index < 0) {
2698  if (no_packet_count) {
2699  no_packet_count = 0;
2700  memset(no_packet, 0, nb_input_files);
2701  usleep(10000);
2702  continue;
2703  }
2704  break;
2705  }
2706 
2707  /* read a frame from it and output it in the fifo */
2708  is = input_files[file_index].ctx;
2709  ret = av_read_frame(is, &pkt);
2710  if (ret == AVERROR(EAGAIN)) {
2711  no_packet[file_index] = 1;
2712  no_packet_count++;
2713  continue;
2714  }
2715  if (ret < 0) {
2716  input_files[file_index].eof_reached = 1;
2717  if (opt_shortest)
2718  break;
2719  else
2720  continue;
2721  }
2722 
2723  no_packet_count = 0;
2724  memset(no_packet, 0, nb_input_files);
2725 
2726  if (do_pkt_dump) {
2728  is->streams[pkt.stream_index]);
2729  }
2730  /* the following test is needed in case new streams appear
2731  dynamically in stream : we ignore them */
2732  if (pkt.stream_index >= input_files[file_index].nb_streams)
2733  goto discard_packet;
2734  ist_index = input_files[file_index].ist_index + pkt.stream_index;
2735  ist = &input_streams[ist_index];
2736  if (ist->discard)
2737  goto discard_packet;
2738 
2739  if (pkt.dts != AV_NOPTS_VALUE)
2740  pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2741  if (pkt.pts != AV_NOPTS_VALUE)
2742  pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2743 
2744  if (pkt.pts != AV_NOPTS_VALUE)
2745  pkt.pts *= ist->ts_scale;
2746  if (pkt.dts != AV_NOPTS_VALUE)
2747  pkt.dts *= ist->ts_scale;
2748 
2749  //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
2750  // ist->next_pts,
2751  // pkt.dts, input_files[ist->file_index].ts_offset,
2752  // ist->st->codec->codec_type);
2753  if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE
2754  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
2755  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2756  int64_t delta = pkt_dts - ist->next_pts;
2757  if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->pts) && !copy_ts) {
2758  input_files[ist->file_index].ts_offset -= delta;
2760  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2761  delta, input_files[ist->file_index].ts_offset);
2762  pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2763  if (pkt.pts != AV_NOPTS_VALUE)
2764  pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2765  }
2766  }
2767 
2768  // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
2769  if (output_packet(ist, output_streams, nb_output_streams, &pkt) < 0) {
2770 
2771  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
2772  ist->file_index, ist->st->index);
2773  if (exit_on_error)
2774  exit_program(1);
2775  av_free_packet(&pkt);
2776  continue;
2777  }
2778 
2779  discard_packet:
2780  av_free_packet(&pkt);
2781 
2782  /* dump report by using the output first video and audio streams */
2783  print_report(output_files, output_streams, nb_output_streams, 0, timer_start);
2784  }
2785 
2786  /* at the end of stream, we must flush the decoder buffers */
2787  for (i = 0; i < nb_input_streams; i++) {
2788  ist = &input_streams[i];
2789  if (ist->decoding_needed) {
2790  output_packet(ist, output_streams, nb_output_streams, NULL);
2791  }
2792  }
2793  flush_encoders(output_streams, nb_output_streams);
2794 
2795  term_exit();
2796 
2797  /* write the trailer if needed and close file */
2798  for (i = 0; i < nb_output_files; i++) {
2799  os = output_files[i].ctx;
2800  av_write_trailer(os);
2801  }
2802 
2803  /* dump report by using the first video and audio streams */
2804  print_report(output_files, output_streams, nb_output_streams, 1, timer_start);
2805 
2806  /* close each encoder */
2807  for (i = 0; i < nb_output_streams; i++) {
2808  ost = &output_streams[i];
2809  if (ost->encoding_needed) {
2810  av_freep(&ost->st->codec->stats_in);
2811  avcodec_close(ost->st->codec);
2812  }
2813 #if CONFIG_AVFILTER
2814  avfilter_graph_free(&ost->graph);
2815 #endif
2816  }
2817 
2818  /* close each decoder */
2819  for (i = 0; i < nb_input_streams; i++) {
2820  ist = &input_streams[i];
2821  if (ist->decoding_needed) {
2822  avcodec_close(ist->st->codec);
2823  }
2824  }
2825 
2826  /* finished ! */
2827  ret = 0;
2828 
2829  fail:
2830  av_freep(&bit_buffer);
2831  av_freep(&no_packet);
2832 
2833  if (output_streams) {
2834  for (i = 0; i < nb_output_streams; i++) {
2835  ost = &output_streams[i];
2836  if (ost) {
2837  if (ost->stream_copy)
2838  av_freep(&ost->st->codec->extradata);
2839  if (ost->logfile) {
2840  fclose(ost->logfile);
2841  ost->logfile = NULL;
2842  }
2843  av_fifo_free(ost->fifo); /* works even if fifo is not
2844  initialized but set to zero */
2845  av_freep(&ost->st->codec->subtitle_header);
2846  av_free(ost->pict_tmp.data[0]);
2847  av_free(ost->forced_kf_pts);
2848  if (ost->video_resample)
2850  if (ost->resample)
2852  if (ost->reformat_ctx)
2854  av_dict_free(&ost->opts);
2855  }
2856  }
2857  }
2858  return ret;
2859 }
2860 
2861 static double parse_frame_aspect_ratio(const char *arg)
2862 {
2863  int x = 0, y = 0;
2864  double ar = 0;
2865  const char *p;
2866  char *end;
2867 
2868  p = strchr(arg, ':');
2869  if (p) {
2870  x = strtol(arg, &end, 10);
2871  if (end == p)
2872  y = strtol(end + 1, &end, 10);
2873  if (x > 0 && y > 0)
2874  ar = (double)x / (double)y;
2875  } else
2876  ar = strtod(arg, NULL);
2877 
2878  if (!ar) {
2879  av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
2880  exit_program(1);
2881  }
2882  return ar;
2883 }
2884 
2885 static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
2886 {
2887  return parse_option(o, "codec:a", arg, options);
2888 }
2889 
2890 static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
2891 {
2892  return parse_option(o, "codec:v", arg, options);
2893 }
2894 
2895 static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
2896 {
2897  return parse_option(o, "codec:s", arg, options);
2898 }
2899 
2900 static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
2901 {
2902  return parse_option(o, "codec:d", arg, options);
2903 }
2904 
2905 static int opt_map(OptionsContext *o, const char *opt, const char *arg)
2906 {
2907  StreamMap *m = NULL;
2908  int i, negative = 0, file_idx;
2909  int sync_file_idx = -1, sync_stream_idx;
2910  char *p, *sync;
2911  char *map;
2912 
2913  if (*arg == '-') {
2914  negative = 1;
2915  arg++;
2916  }
2917  map = av_strdup(arg);
2918 
2919  /* parse sync stream first, just pick first matching stream */
2920  if (sync = strchr(map, ',')) {
2921  *sync = 0;
2922  sync_file_idx = strtol(sync + 1, &sync, 0);
2923  if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
2924  av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
2925  exit_program(1);
2926  }
2927  if (*sync)
2928  sync++;
2929  for (i = 0; i < input_files[sync_file_idx].nb_streams; i++)
2930  if (check_stream_specifier(input_files[sync_file_idx].ctx,
2931  input_files[sync_file_idx].ctx->streams[i], sync) == 1) {
2932  sync_stream_idx = i;
2933  break;
2934  }
2935  if (i == input_files[sync_file_idx].nb_streams) {
2936  av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
2937  "match any streams.\n", arg);
2938  exit_program(1);
2939  }
2940  }
2941 
2942 
2943  file_idx = strtol(map, &p, 0);
2944  if (file_idx >= nb_input_files || file_idx < 0) {
2945  av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
2946  exit_program(1);
2947  }
2948  if (negative)
2949  /* disable some already defined maps */
2950  for (i = 0; i < o->nb_stream_maps; i++) {
2951  m = &o->stream_maps[i];
2952  if (file_idx == m->file_index &&
2953  check_stream_specifier(input_files[m->file_index].ctx,
2954  input_files[m->file_index].ctx->streams[m->stream_index],
2955  *p == ':' ? p + 1 : p) > 0)
2956  m->disabled = 1;
2957  }
2958  else
2959  for (i = 0; i < input_files[file_idx].nb_streams; i++) {
2960  if (check_stream_specifier(input_files[file_idx].ctx, input_files[file_idx].ctx->streams[i],
2961  *p == ':' ? p + 1 : p) <= 0)
2962  continue;
2963  o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
2964  &o->nb_stream_maps, o->nb_stream_maps + 1);
2965  m = &o->stream_maps[o->nb_stream_maps - 1];
2966 
2967  m->file_index = file_idx;
2968  m->stream_index = i;
2969 
2970  if (sync_file_idx >= 0) {
2971  m->sync_file_index = sync_file_idx;
2972  m->sync_stream_index = sync_stream_idx;
2973  } else {
2974  m->sync_file_index = file_idx;
2975  m->sync_stream_index = i;
2976  }
2977  }
2978 
2979  if (!m) {
2980  av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
2981  exit_program(1);
2982  }
2983 
2984  av_freep(&map);
2985  return 0;
2986 }
2987 
2988 static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
2989 {
2990  o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
2991  &o->nb_attachments, o->nb_attachments + 1);
2992  o->attachments[o->nb_attachments - 1] = arg;
2993  return 0;
2994 }
2995 
3002 static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
3003 {
3004  if (*arg) {
3005  *type = *arg;
3006  switch (*arg) {
3007  case 'g':
3008  break;
3009  case 's':
3010  if (*(++arg) && *arg != ':') {
3011  av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
3012  exit_program(1);
3013  }
3014  *stream_spec = *arg == ':' ? arg + 1 : "";
3015  break;
3016  case 'c':
3017  case 'p':
3018  if (*(++arg) == ':')
3019  *index = strtol(++arg, NULL, 0);
3020  break;
3021  default:
3022  av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
3023  exit_program(1);
3024  }
3025  } else
3026  *type = 'g';
3027 }
3028 
3029 static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
3030 {
3031  AVDictionary **meta_in = NULL;
3032  AVDictionary **meta_out;
3033  int i, ret = 0;
3034  char type_in, type_out;
3035  const char *istream_spec = NULL, *ostream_spec = NULL;
3036  int idx_in = 0, idx_out = 0;
3037 
3038  parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
3039  parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
3040 
3041  if (type_in == 'g' || type_out == 'g')
3042  o->metadata_global_manual = 1;
3043  if (type_in == 's' || type_out == 's')
3044  o->metadata_streams_manual = 1;
3045  if (type_in == 'c' || type_out == 'c')
3046  o->metadata_chapters_manual = 1;
3047 
3048 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
3049  if ((index) < 0 || (index) >= (nb_elems)) {\
3050  av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
3051  (desc), (index));\
3052  exit_program(1);\
3053  }
3054 
3055 #define SET_DICT(type, meta, context, index)\
3056  switch (type) {\
3057  case 'g':\
3058  meta = &context->metadata;\
3059  break;\
3060  case 'c':\
3061  METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
3062  meta = &context->chapters[index]->metadata;\
3063  break;\
3064  case 'p':\
3065  METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
3066  meta = &context->programs[index]->metadata;\
3067  break;\
3068  }\
3069 
3070  SET_DICT(type_in, meta_in, ic, idx_in);
3071  SET_DICT(type_out, meta_out, oc, idx_out);
3072 
3073  /* for input streams choose first matching stream */
3074  if (type_in == 's') {
3075  for (i = 0; i < ic->nb_streams; i++) {
3076  if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
3077  meta_in = &ic->streams[i]->metadata;
3078  break;
3079  } else if (ret < 0)
3080  exit_program(1);
3081  }
3082  if (!meta_in) {
3083  av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
3084  exit_program(1);
3085  }
3086  }
3087 
3088  if (type_out == 's') {
3089  for (i = 0; i < oc->nb_streams; i++) {
3090  if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
3091  meta_out = &oc->streams[i]->metadata;
3092  av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3093  } else if (ret < 0)
3094  exit_program(1);
3095  }
3096  } else
3097  av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3098 
3099  return 0;
3100 }
3101 
3102 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
3103 {
3104  const char *codec_string = encoder ? "encoder" : "decoder";
3105  AVCodec *codec;
3106 
3107  codec = encoder ?
3110  if (!codec) {
3111  av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
3112  exit_program(1);
3113  }
3114  if (codec->type != type) {
3115  av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
3116  exit_program(1);
3117  }
3118  return codec;
3119 }
3120 
3122 {
3123  char *codec_name = NULL;
3124 
3125  MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
3126  if (codec_name) {
3127  AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
3128  st->codec->codec_id = codec->id;
3129  return codec;
3130  } else
3131  return avcodec_find_decoder(st->codec->codec_id);
3132 }
3133 
3139 {
3140  int i;
3141 
3142  for (i = 0; i < ic->nb_streams; i++) {
3143  AVStream *st = ic->streams[i];
3144  AVCodecContext *dec = st->codec;
3145  InputStream *ist;
3146 
3147  input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
3148  ist = &input_streams[nb_input_streams - 1];
3149  ist->st = st;
3150  ist->file_index = nb_input_files;
3151  ist->discard = 1;
3152  ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
3153 
3154  ist->ts_scale = 1.0;
3155  MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
3156 
3157  ist->dec = choose_decoder(o, ic, st);
3158 
3159  switch (dec->codec_type) {
3160  case AVMEDIA_TYPE_AUDIO:
3161  if (o->audio_disable)
3162  st->discard = AVDISCARD_ALL;
3163  break;
3164  case AVMEDIA_TYPE_VIDEO:
3165  if (dec->lowres) {
3166  dec->flags |= CODEC_FLAG_EMU_EDGE;
3167  dec->height >>= dec->lowres;
3168  dec->width >>= dec->lowres;
3169  }
3170 
3171  if (o->video_disable)
3172  st->discard = AVDISCARD_ALL;
3173  else if (video_discard)
3174  st->discard = video_discard;
3175  break;
3176  case AVMEDIA_TYPE_DATA:
3177  break;
3178  case AVMEDIA_TYPE_SUBTITLE:
3179  if (o->subtitle_disable)
3180  st->discard = AVDISCARD_ALL;
3181  break;
3183  case AVMEDIA_TYPE_UNKNOWN:
3184  break;
3185  default:
3186  abort();
3187  }
3188  }
3189 }
3190 
3191 static void assert_file_overwrite(const char *filename)
3192 {
3193  if (!file_overwrite &&
3194  (strchr(filename, ':') == NULL || filename[1] == ':' ||
3195  av_strstart(filename, "file:", NULL))) {
3196  if (avio_check(filename, 0) == 0) {
3197  if (!using_stdin) {
3198  fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3199  fflush(stderr);
3200  if (!read_yesno()) {
3201  fprintf(stderr, "Not overwriting - exiting\n");
3202  exit_program(1);
3203  }
3204  }
3205  else {
3206  fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3207  exit_program(1);
3208  }
3209  }
3210  }
3211 }
3212 
3213 static void dump_attachment(AVStream *st, const char *filename)
3214 {
3215  int ret;
3216  AVIOContext *out = NULL;
3217  AVDictionaryEntry *e;
3218 
3219  if (!st->codec->extradata_size) {
3220  av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
3221  nb_input_files - 1, st->index);
3222  return;
3223  }
3224  if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
3225  filename = e->value;
3226  if (!*filename) {
3227  av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
3228  "in stream #%d:%d.\n", nb_input_files - 1, st->index);
3229  exit_program(1);
3230  }
3231 
3232  assert_file_overwrite(filename);
3233 
3234  if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
3235  av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
3236  filename);
3237  exit_program(1);
3238  }
3239 
3240  avio_write(out, st->codec->extradata, st->codec->extradata_size);
3241  avio_flush(out);
3242  avio_close(out);
3243 }
3244 
3245 static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
3246 {
3247  AVFormatContext *ic;
3249  int err, i, ret;
3250  int64_t timestamp;
3251  uint8_t buf[128];
3252  AVDictionary **opts;
3253  int orig_nb_streams; // number of streams before avformat_find_stream_info
3254 
3255  if (o->format) {
3256  if (!(file_iformat = av_find_input_format(o->format))) {
3257  av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
3258  exit_program(1);
3259  }
3260  }
3261 
3262  if (!strcmp(filename, "-"))
3263  filename = "pipe:";
3264 
3265  using_stdin |= !strncmp(filename, "pipe:", 5) ||
3266  !strcmp(filename, "/dev/stdin");
3267 
3268  /* get default parameters from command line */
3269  ic = avformat_alloc_context();
3270  if (!ic) {
3271  print_error(filename, AVERROR(ENOMEM));
3272  exit_program(1);
3273  }
3274  if (o->nb_audio_sample_rate) {
3275  snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
3276  av_dict_set(&format_opts, "sample_rate", buf, 0);
3277  }
3278  if (o->nb_audio_channels) {
3279  snprintf(buf, sizeof(buf), "%d", o->audio_channels[o->nb_audio_channels - 1].u.i);
3280  av_dict_set(&format_opts, "channels", buf, 0);
3281  }
3282  if (o->nb_frame_rates) {
3283  av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
3284  }
3285  if (o->nb_frame_sizes) {
3286  av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
3287  }
3288  if (o->nb_frame_pix_fmts)
3289  av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
3290 
3291  ic->flags |= AVFMT_FLAG_NONBLOCK;
3292  ic->interrupt_callback = int_cb;
3293 
3294  /* open the input file with generic libav function */
3295  err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
3296  if (err < 0) {
3297  print_error(filename, err);
3298  exit_program(1);
3299  }
3301 
3302  /* apply forced codec ids */
3303  for (i = 0; i < ic->nb_streams; i++)
3304  choose_decoder(o, ic, ic->streams[i]);
3305 
3306  /* Set AVCodecContext options for avformat_find_stream_info */
3308  orig_nb_streams = ic->nb_streams;
3309 
3310  /* If not enough info to get the stream parameters, we decode the
3311  first frames to get it. (used in mpeg case for example) */
3312  ret = avformat_find_stream_info(ic, opts);
3313  if (ret < 0) {
3314  av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
3315  avformat_close_input(&ic);
3316  exit_program(1);
3317  }
3318 
3319  timestamp = o->start_time;
3320  /* add the stream start time */
3321  if (ic->start_time != AV_NOPTS_VALUE)
3322  timestamp += ic->start_time;
3323 
3324  /* if seeking requested, we execute it */
3325  if (o->start_time != 0) {
3326  ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
3327  if (ret < 0) {
3328  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
3329  filename, (double)timestamp / AV_TIME_BASE);
3330  }
3331  }
3332 
3333  /* update the current parameters so that they match the one of the input stream */
3334  add_input_streams(o, ic);
3335 
3336  /* dump the file content */
3337  av_dump_format(ic, nb_input_files, filename, 0);
3338 
3339  input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
3340  input_files[nb_input_files - 1].ctx = ic;
3341  input_files[nb_input_files - 1].ist_index = nb_input_streams - ic->nb_streams;
3342  input_files[nb_input_files - 1].ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
3343  input_files[nb_input_files - 1].nb_streams = ic->nb_streams;
3344  input_files[nb_input_files - 1].rate_emu = o->rate_emu;
3345 
3346  for (i = 0; i < o->nb_dump_attachment; i++) {
3347  int j;
3348 
3349  for (j = 0; j < ic->nb_streams; j++) {
3350  AVStream *st = ic->streams[j];
3351 
3352  if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
3353  dump_attachment(st, o->dump_attachment[i].u.str);
3354  }
3355  }
3356 
3357  for (i = 0; i < orig_nb_streams; i++)
3358  av_dict_free(&opts[i]);
3359  av_freep(&opts);
3360 
3361  reset_options(o);
3362  return 0;
3363 }
3364 
3365 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3366  AVCodecContext *avctx)
3367 {
3368  char *p;
3369  int n = 1, i;
3370  int64_t t;
3371 
3372  for (p = kf; *p; p++)
3373  if (*p == ',')
3374  n++;
3375  ost->forced_kf_count = n;
3376  ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
3377  if (!ost->forced_kf_pts) {
3378  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3379  exit_program(1);
3380  }
3381  for (i = 0; i < n; i++) {
3382  p = i ? strchr(p, ',') + 1 : kf;
3383  t = parse_time_or_die("force_key_frames", p, 1);
3384  ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3385  }
3386 }
3387 
3388 static uint8_t *get_line(AVIOContext *s)
3389 {
3390  AVIOContext *line;
3391  uint8_t *buf;
3392  char c;
3393 
3394  if (avio_open_dyn_buf(&line) < 0) {
3395  av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
3396  exit_program(1);
3397  }
3398 
3399  while ((c = avio_r8(s)) && c != '\n')
3400  avio_w8(line, c);
3401  avio_w8(line, 0);
3402  avio_close_dyn_buf(line, &buf);
3403 
3404  return buf;
3405 }
3406 
3407 static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
3408 {
3409  int i, ret = 1;
3410  char filename[1000];
3411  const char *base[3] = { getenv("AVCONV_DATADIR"),
3412  getenv("HOME"),
3413  AVCONV_DATADIR,
3414  };
3415 
3416  for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
3417  if (!base[i])
3418  continue;
3419  if (codec_name) {
3420  snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
3421  i != 1 ? "" : "/.avconv", codec_name, preset_name);
3422  ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3423  }
3424  if (ret) {
3425  snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
3426  i != 1 ? "" : "/.avconv", preset_name);
3427  ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3428  }
3429  }
3430  return ret;
3431 }
3432 
3434 {
3435  char *codec_name = NULL;
3436 
3437  MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
3438  if (!codec_name) {
3439  ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
3440  NULL, ost->st->codec->codec_type);
3441  ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
3442  } else if (!strcmp(codec_name, "copy"))
3443  ost->stream_copy = 1;
3444  else {
3445  ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
3446  ost->st->codec->codec_id = ost->enc->id;
3447  }
3448 }
3449 
3451 {
3452  OutputStream *ost;
3453  AVStream *st = avformat_new_stream(oc, NULL);
3454  int idx = oc->nb_streams - 1, ret = 0;
3455  char *bsf = NULL, *next, *codec_tag = NULL;
3456  AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
3457  double qscale = -1;
3458  char *buf = NULL, *arg = NULL, *preset = NULL;
3459  AVIOContext *s = NULL;
3460 
3461  if (!st) {
3462  av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
3463  exit_program(1);
3464  }
3465 
3466  if (oc->nb_streams - 1 < o->nb_streamid_map)
3467  st->id = o->streamid_map[oc->nb_streams - 1];
3468 
3469  output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
3470  nb_output_streams + 1);
3471  ost = &output_streams[nb_output_streams - 1];
3472  ost->file_index = nb_output_files;
3473  ost->index = idx;
3474  ost->st = st;
3475  st->codec->codec_type = type;
3476  choose_encoder(o, oc, ost);
3477  if (ost->enc) {
3478  ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
3479  }
3480 
3482  st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
3483 
3484  MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
3485  if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
3486  do {
3487  buf = get_line(s);
3488  if (!buf[0] || buf[0] == '#') {
3489  av_free(buf);
3490  continue;
3491  }
3492  if (!(arg = strchr(buf, '='))) {
3493  av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
3494  exit_program(1);
3495  }
3496  *arg++ = 0;
3497  av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
3498  av_free(buf);
3499  } while (!s->eof_reached);
3500  avio_close(s);
3501  }
3502  if (ret) {
3504  "Preset %s specified for stream %d:%d, but could not be opened.\n",
3505  preset, ost->file_index, ost->index);
3506  exit_program(1);
3507  }
3508 
3509  ost->max_frames = INT64_MAX;
3510  MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
3511 
3512  MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
3513  while (bsf) {
3514  if (next = strchr(bsf, ','))
3515  *next++ = 0;
3516  if (!(bsfc = av_bitstream_filter_init(bsf))) {
3517  av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
3518  exit_program(1);
3519  }
3520  if (bsfc_prev)
3521  bsfc_prev->next = bsfc;
3522  else
3523  ost->bitstream_filters = bsfc;
3524 
3525  bsfc_prev = bsfc;
3526  bsf = next;
3527  }
3528 
3529  MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
3530  if (codec_tag) {
3531  uint32_t tag = strtol(codec_tag, &next, 0);
3532  if (*next)
3533  tag = AV_RL32(codec_tag);
3534  st->codec->codec_tag = tag;
3535  }
3536 
3537  MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
3538  if (qscale >= 0 || same_quant) {
3539  st->codec->flags |= CODEC_FLAG_QSCALE;
3540  st->codec->global_quality = FF_QP2LAMBDA * qscale;
3541  }
3542 
3543  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
3545 
3546  av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
3547  return ost;
3548 }
3549 
3550 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
3551 {
3552  int i;
3553  const char *p = str;
3554  for (i = 0;; i++) {
3555  dest[i] = atoi(p);
3556  if (i == 63)
3557  break;
3558  p = strchr(p, ',');
3559  if (!p) {
3560  av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3561  exit_program(1);
3562  }
3563  p++;
3564  }
3565 }
3566 
3568 {
3569  AVStream *st;
3570  OutputStream *ost;
3571  AVCodecContext *video_enc;
3572 
3573  ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO);
3574  st = ost->st;
3575  video_enc = st->codec;
3576 
3577  if (!ost->stream_copy) {
3578  const char *p = NULL;
3579  char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
3581  char *intra_matrix = NULL, *inter_matrix = NULL, *filters = NULL;
3582  int i;
3583 
3584  MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
3585  if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
3586  av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
3587  exit_program(1);
3588  }
3589 
3590  MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
3591  if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
3592  av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
3593  exit_program(1);
3594  }
3595 
3596  MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
3597  if (frame_aspect_ratio)
3598  ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio);
3599 
3600  MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
3601  if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
3602  av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
3603  exit_program(1);
3604  }
3605  st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
3606 
3607  MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
3608  if (intra_matrix) {
3609  if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
3610  av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
3611  exit_program(1);
3612  }
3613  parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
3614  }
3615  MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
3616  if (inter_matrix) {
3617  if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
3618  av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
3619  exit_program(1);
3620  }
3621  parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
3622  }
3623 
3624  MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
3625  for (i = 0; p; i++) {
3626  int start, end, q;
3627  int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
3628  if (e != 3) {
3629  av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
3630  exit_program(1);
3631  }
3632  video_enc->rc_override =
3633  av_realloc(video_enc->rc_override,
3634  sizeof(RcOverride) * (i + 1));
3635  video_enc->rc_override[i].start_frame = start;
3636  video_enc->rc_override[i].end_frame = end;
3637  if (q > 0) {
3638  video_enc->rc_override[i].qscale = q;
3639  video_enc->rc_override[i].quality_factor = 1.0;
3640  }
3641  else {
3642  video_enc->rc_override[i].qscale = 0;
3643  video_enc->rc_override[i].quality_factor = -q/100.0;
3644  }
3645  p = strchr(p, '/');
3646  if (p) p++;
3647  }
3648  video_enc->rc_override_count = i;
3649  if (!video_enc->rc_initial_buffer_occupancy)
3650  video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
3651  video_enc->intra_dc_precision = intra_dc_precision - 8;
3652 
3653  /* two pass mode */
3654  if (do_pass) {
3655  if (do_pass == 1) {
3656  video_enc->flags |= CODEC_FLAG_PASS1;
3657  } else {
3658  video_enc->flags |= CODEC_FLAG_PASS2;
3659  }
3660  }
3661 
3662  MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_key_frames, oc, st);
3663  if (forced_key_frames)
3664  parse_forced_key_frames(forced_key_frames, ost, video_enc);
3665 
3666  MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
3667 
3668  ost->top_field_first = -1;
3670 
3671 #if CONFIG_AVFILTER
3672  MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
3673  if (filters)
3674  ost->avfilter = av_strdup(filters);
3675 #endif
3676  } else {
3678  }
3679 
3680  return ost;
3681 }
3682 
3684 {
3685  AVStream *st;
3686  OutputStream *ost;
3687  AVCodecContext *audio_enc;
3688 
3689  ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO);
3690  st = ost->st;
3691 
3692  audio_enc = st->codec;
3693  audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
3694 
3695  if (!ost->stream_copy) {
3696  char *sample_fmt = NULL;
3697 
3698  MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
3699 
3700  MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
3701  if (sample_fmt &&
3702  (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
3703  av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
3704  exit_program(1);
3705  }
3706 
3707  MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
3708  }
3709 
3710  return ost;
3711 }
3712 
3714 {
3715  OutputStream *ost;
3716 
3717  ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
3718  if (!ost->stream_copy) {
3719  av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
3720  exit_program(1);
3721  }
3722 
3723  return ost;
3724 }
3725 
3727 {
3729  ost->stream_copy = 1;
3730  return ost;
3731 }
3732 
3734 {
3735  AVStream *st;
3736  OutputStream *ost;
3737  AVCodecContext *subtitle_enc;
3738 
3740  st = ost->st;
3741  subtitle_enc = st->codec;
3742 
3743  subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
3744 
3745  return ost;
3746 }
3747 
3748 /* arg format is "output-stream-index:streamid-value". */
3749 static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
3750 {
3751  int idx;
3752  char *p;
3753  char idx_str[16];
3754 
3755  av_strlcpy(idx_str, arg, sizeof(idx_str));
3756  p = strchr(idx_str, ':');
3757  if (!p) {
3759  "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
3760  arg, opt);
3761  exit_program(1);
3762  }
3763  *p++ = '\0';
3764  idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX);
3765  o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
3766  o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
3767  return 0;
3768 }
3769 
3770 static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
3771 {
3772  AVFormatContext *is = ifile->ctx;
3773  AVFormatContext *os = ofile->ctx;
3774  int i;
3775 
3776  for (i = 0; i < is->nb_chapters; i++) {
3777  AVChapter *in_ch = is->chapters[i], *out_ch;
3778  int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
3779  AV_TIME_BASE_Q, in_ch->time_base);
3780  int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
3782 
3783 
3784  if (in_ch->end < ts_off)
3785  continue;
3786  if (rt != INT64_MAX && in_ch->start > rt + ts_off)
3787  break;
3788 
3789  out_ch = av_mallocz(sizeof(AVChapter));
3790  if (!out_ch)
3791  return AVERROR(ENOMEM);
3792 
3793  out_ch->id = in_ch->id;
3794  out_ch->time_base = in_ch->time_base;
3795  out_ch->start = FFMAX(0, in_ch->start - ts_off);
3796  out_ch->end = FFMIN(rt, in_ch->end - ts_off);
3797 
3798  if (copy_metadata)
3799  av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
3800 
3801  os->nb_chapters++;
3802  os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
3803  if (!os->chapters)
3804  return AVERROR(ENOMEM);
3805  os->chapters[os->nb_chapters - 1] = out_ch;
3806  }
3807  return 0;
3808 }
3809 
3810 static void opt_output_file(void *optctx, const char *filename)
3811 {
3812  OptionsContext *o = optctx;
3813  AVFormatContext *oc;
3814  int i, err;
3815  AVOutputFormat *file_oformat;
3816  OutputStream *ost;
3817  InputStream *ist;
3818 
3819  if (!strcmp(filename, "-"))
3820  filename = "pipe:";
3821 
3822  oc = avformat_alloc_context();
3823  if (!oc) {
3824  print_error(filename, AVERROR(ENOMEM));
3825  exit_program(1);
3826  }
3827 
3828  if (o->format) {
3829  file_oformat = av_guess_format(o->format, NULL, NULL);
3830  if (!file_oformat) {
3831  av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
3832  exit_program(1);
3833  }
3834  } else {
3835  file_oformat = av_guess_format(NULL, filename, NULL);
3836  if (!file_oformat) {
3837  av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
3838  filename);
3839  exit_program(1);
3840  }
3841  }
3842 
3843  oc->oformat = file_oformat;
3844  oc->interrupt_callback = int_cb;
3845  av_strlcpy(oc->filename, filename, sizeof(oc->filename));
3846 
3847  if (!o->nb_stream_maps) {
3848  /* pick the "best" stream of each type */
3849 #define NEW_STREAM(type, index)\
3850  if (index >= 0) {\
3851  ost = new_ ## type ## _stream(o, oc);\
3852  ost->source_index = index;\
3853  ost->sync_ist = &input_streams[index];\
3854  input_streams[index].discard = 0;\
3855  }
3856 
3857  /* video: highest resolution */
3858  if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
3859  int area = 0, idx = -1;
3860  for (i = 0; i < nb_input_streams; i++) {
3861  ist = &input_streams[i];
3862  if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
3863  ist->st->codec->width * ist->st->codec->height > area) {
3864  area = ist->st->codec->width * ist->st->codec->height;
3865  idx = i;
3866  }
3867  }
3868  NEW_STREAM(video, idx);
3869  }
3870 
3871  /* audio: most channels */
3872  if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
3873  int channels = 0, idx = -1;
3874  for (i = 0; i < nb_input_streams; i++) {
3875  ist = &input_streams[i];
3876  if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
3877  ist->st->codec->channels > channels) {
3878  channels = ist->st->codec->channels;
3879  idx = i;
3880  }
3881  }
3882  NEW_STREAM(audio, idx);
3883  }
3884 
3885  /* subtitles: pick first */
3887  for (i = 0; i < nb_input_streams; i++)
3888  if (input_streams[i].st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3889  NEW_STREAM(subtitle, i);
3890  break;
3891  }
3892  }
3893  /* do something with data? */
3894  } else {
3895  for (i = 0; i < o->nb_stream_maps; i++) {
3896  StreamMap *map = &o->stream_maps[i];
3897 
3898  if (map->disabled)
3899  continue;
3900 
3901  ist = &input_streams[input_files[map->file_index].ist_index + map->stream_index];
3902  switch (ist->st->codec->codec_type) {
3903  case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
3904  case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
3905  case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
3906  case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
3907  case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
3908  default:
3909  av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
3910  map->file_index, map->stream_index);
3911  exit_program(1);
3912  }
3913 
3914  ost->source_index = input_files[map->file_index].ist_index + map->stream_index;
3915  ost->sync_ist = &input_streams[input_files[map->sync_file_index].ist_index +
3916  map->sync_stream_index];
3917  ist->discard = 0;
3918  }
3919  }
3920 
3921  /* handle attached files */
3922  for (i = 0; i < o->nb_attachments; i++) {
3923  AVIOContext *pb;
3924  uint8_t *attachment;
3925  const char *p;
3926  int64_t len;
3927 
3928  if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
3929  av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
3930  o->attachments[i]);
3931  exit_program(1);
3932  }
3933  if ((len = avio_size(pb)) <= 0) {
3934  av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
3935  o->attachments[i]);
3936  exit_program(1);
3937  }
3938  if (!(attachment = av_malloc(len))) {
3939  av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
3940  o->attachments[i]);
3941  exit_program(1);
3942  }
3943  avio_read(pb, attachment, len);
3944 
3945  ost = new_attachment_stream(o, oc);
3946  ost->stream_copy = 0;
3947  ost->source_index = -1;
3948  ost->attachment_filename = o->attachments[i];
3949  ost->st->codec->extradata = attachment;
3950  ost->st->codec->extradata_size = len;
3951 
3952  p = strrchr(o->attachments[i], '/');
3953  av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
3954  avio_close(pb);
3955  }
3956 
3957  output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
3958  output_files[nb_output_files - 1].ctx = oc;
3959  output_files[nb_output_files - 1].ost_index = nb_output_streams - oc->nb_streams;
3960  output_files[nb_output_files - 1].recording_time = o->recording_time;
3961  output_files[nb_output_files - 1].start_time = o->start_time;
3962  output_files[nb_output_files - 1].limit_filesize = o->limit_filesize;
3963  av_dict_copy(&output_files[nb_output_files - 1].opts, format_opts, 0);
3964 
3965  /* check filename in case of an image number is expected */
3966  if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
3967  if (!av_filename_number_test(oc->filename)) {
3968  print_error(oc->filename, AVERROR(EINVAL));
3969  exit_program(1);
3970  }
3971  }
3972 
3973  if (!(oc->oformat->flags & AVFMT_NOFILE)) {
3974  /* test if it already exists to avoid losing precious files */
3975  assert_file_overwrite(filename);
3976 
3977  /* open the file */
3978  if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
3979  &oc->interrupt_callback,
3980  &output_files[nb_output_files - 1].opts)) < 0) {
3981  print_error(filename, err);
3982  exit_program(1);
3983  }
3984  }
3985 
3986  if (o->mux_preload) {
3987  uint8_t buf[64];
3988  snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
3989  av_dict_set(&output_files[nb_output_files - 1].opts, "preload", buf, 0);
3990  }
3991  oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
3992  oc->flags |= AVFMT_FLAG_NONBLOCK;
3993 
3994  /* copy metadata */
3995  for (i = 0; i < o->nb_metadata_map; i++) {
3996  char *p;
3997  int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
3998 
3999  if (in_file_index < 0)
4000  continue;
4001  if (in_file_index >= nb_input_files) {
4002  av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
4003  exit_program(1);
4004  }
4005  copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index].ctx, o);
4006  }
4007 
4008  /* copy chapters */
4009  if (o->chapters_input_file >= nb_input_files) {
4010  if (o->chapters_input_file == INT_MAX) {
4011  /* copy chapters from the first input file that has them*/
4012  o->chapters_input_file = -1;
4013  for (i = 0; i < nb_input_files; i++)
4014  if (input_files[i].ctx->nb_chapters) {
4015  o->chapters_input_file = i;
4016  break;
4017  }
4018  } else {
4019  av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
4020  o->chapters_input_file);
4021  exit_program(1);
4022  }
4023  }
4024  if (o->chapters_input_file >= 0)
4025  copy_chapters(&input_files[o->chapters_input_file], &output_files[nb_output_files - 1],
4027 
4028  /* copy global metadata by default */
4030  av_dict_copy(&oc->metadata, input_files[0].ctx->metadata,
4032  if (!o->metadata_streams_manual)
4033  for (i = output_files[nb_output_files - 1].ost_index; i < nb_output_streams; i++) {
4034  InputStream *ist;
4035  if (output_streams[i].source_index < 0) /* this is true e.g. for attached files */
4036  continue;
4037  ist = &input_streams[output_streams[i].source_index];
4038  av_dict_copy(&output_streams[i].st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
4039  }
4040 
4041  /* process manually set metadata */
4042  for (i = 0; i < o->nb_metadata; i++) {
4043  AVDictionary **m;
4044  char type, *val;
4045  const char *stream_spec;
4046  int index = 0, j, ret;
4047 
4048  val = strchr(o->metadata[i].u.str, '=');
4049  if (!val) {
4050  av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
4051  o->metadata[i].u.str);
4052  exit_program(1);
4053  }
4054  *val++ = 0;
4055 
4056  parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
4057  if (type == 's') {
4058  for (j = 0; j < oc->nb_streams; j++) {
4059  if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
4060  av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
4061  } else if (ret < 0)
4062  exit_program(1);
4063  }
4064  printf("ret %d, stream_spec %s\n", ret, stream_spec);
4065  }
4066  else {
4067  switch (type) {
4068  case 'g':
4069  m = &oc->metadata;
4070  break;
4071  case 'c':
4072  if (index < 0 || index >= oc->nb_chapters) {
4073  av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
4074  exit_program(1);
4075  }
4076  m = &oc->chapters[index]->metadata;
4077  break;
4078  default:
4079  av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
4080  exit_program(1);
4081  }
4082  av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
4083  }
4084  }
4085 
4086  reset_options(o);
4087 }
4088 
4089 /* same option as mencoder */
4090 static int opt_pass(const char *opt, const char *arg)
4091 {
4092  do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2);
4093  return 0;
4094 }
4095 
4096 static int64_t getutime(void)
4097 {
4098 #if HAVE_GETRUSAGE
4099  struct rusage rusage;
4100 
4101  getrusage(RUSAGE_SELF, &rusage);
4102  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4103 #elif HAVE_GETPROCESSTIMES
4104  HANDLE proc;
4105  FILETIME c, e, k, u;
4106  proc = GetCurrentProcess();
4107  GetProcessTimes(proc, &c, &e, &k, &u);
4108  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4109 #else
4110  return av_gettime();
4111 #endif
4112 }
4113 
4114 static int64_t getmaxrss(void)
4115 {
4116 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4117  struct rusage rusage;
4118  getrusage(RUSAGE_SELF, &rusage);
4119  return (int64_t)rusage.ru_maxrss * 1024;
4120 #elif HAVE_GETPROCESSMEMORYINFO
4121  HANDLE proc;
4122  PROCESS_MEMORY_COUNTERS memcounters;
4123  proc = GetCurrentProcess();
4124  memcounters.cb = sizeof(memcounters);
4125  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4126  return memcounters.PeakPagefileUsage;
4127 #else
4128  return 0;
4129 #endif
4130 }
4131 
4132 static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
4133 {
4134  return parse_option(o, "q:a", arg, options);
4135 }
4136 
4137 static void show_usage(void)
4138 {
4139  printf("Hyper fast Audio and Video encoder\n");
4140  printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
4141  printf("\n");
4142 }
4143 
4144 static void show_help(void)
4145 {
4148  show_usage();
4149  show_help_options(options, "Main options:\n",
4151  show_help_options(options, "\nAdvanced options:\n",
4153  OPT_EXPERT);
4154  show_help_options(options, "\nVideo options:\n",
4156  OPT_VIDEO);
4157  show_help_options(options, "\nAdvanced Video options:\n",
4159  OPT_VIDEO | OPT_EXPERT);
4160  show_help_options(options, "\nAudio options:\n",
4162  OPT_AUDIO);
4163  show_help_options(options, "\nAdvanced Audio options:\n",
4165  OPT_AUDIO | OPT_EXPERT);
4166  show_help_options(options, "\nSubtitle options:\n",
4168  OPT_SUBTITLE);
4169  show_help_options(options, "\nAudio/Video grab options:\n",
4170  OPT_GRAB,
4171  OPT_GRAB);
4172  printf("\n");
4176 }
4177 
4178 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
4179 {
4180  enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
4181  static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
4182 
4183  if (!strncmp(arg, "pal-", 4)) {
4184  norm = PAL;
4185  arg += 4;
4186  } else if (!strncmp(arg, "ntsc-", 5)) {
4187  norm = NTSC;
4188  arg += 5;
4189  } else if (!strncmp(arg, "film-", 5)) {
4190  norm = FILM;
4191  arg += 5;
4192  } else {
4193  /* Try to determine PAL/NTSC by peeking in the input files */
4194  if (nb_input_files) {
4195  int i, j, fr;
4196  for (j = 0; j < nb_input_files; j++) {
4197  for (i = 0; i < input_files[j].nb_streams; i++) {
4198  AVCodecContext *c = input_files[j].ctx->streams[i]->codec;
4199  if (c->codec_type != AVMEDIA_TYPE_VIDEO)
4200  continue;
4201  fr = c->time_base.den * 1000 / c->time_base.num;
4202  if (fr == 25000) {
4203  norm = PAL;
4204  break;
4205  } else if ((fr == 29970) || (fr == 23976)) {
4206  norm = NTSC;
4207  break;
4208  }
4209  }
4210  if (norm != UNKNOWN)
4211  break;
4212  }
4213  }
4214  if (norm != UNKNOWN)
4215  av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
4216  }
4217 
4218  if (norm == UNKNOWN) {
4219  av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
4220  av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
4221  av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
4222  exit_program(1);
4223  }
4224 
4225  if (!strcmp(arg, "vcd")) {
4226  opt_video_codec(o, "c:v", "mpeg1video");
4227  opt_audio_codec(o, "c:a", "mp2");
4228  parse_option(o, "f", "vcd", options);
4229 
4230  parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
4231  parse_option(o, "r", frame_rates[norm], options);
4232  opt_default("g", norm == PAL ? "15" : "18");
4233 
4234  opt_default("b", "1150000");
4235  opt_default("maxrate", "1150000");
4236  opt_default("minrate", "1150000");
4237  opt_default("bufsize", "327680"); // 40*1024*8;
4238 
4239  opt_default("b:a", "224000");
4240  parse_option(o, "ar", "44100", options);
4241  parse_option(o, "ac", "2", options);
4242 
4243  opt_default("packetsize", "2324");
4244  opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
4245 
4246  /* We have to offset the PTS, so that it is consistent with the SCR.
4247  SCR starts at 36000, but the first two packs contain only padding
4248  and the first pack from the other stream, respectively, may also have
4249  been written before.
4250  So the real data starts at SCR 36000+3*1200. */
4251  o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
4252  } else if (!strcmp(arg, "svcd")) {
4253 
4254  opt_video_codec(o, "c:v", "mpeg2video");
4255  opt_audio_codec(o, "c:a", "mp2");
4256  parse_option(o, "f", "svcd", options);
4257 
4258  parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
4259  parse_option(o, "r", frame_rates[norm], options);
4260  opt_default("g", norm == PAL ? "15" : "18");
4261 
4262  opt_default("b", "2040000");
4263  opt_default("maxrate", "2516000");
4264  opt_default("minrate", "0"); // 1145000;
4265  opt_default("bufsize", "1835008"); // 224*1024*8;
4266  opt_default("flags", "+scan_offset");
4267 
4268 
4269  opt_default("b:a", "224000");
4270  parse_option(o, "ar", "44100", options);
4271 
4272  opt_default("packetsize", "2324");
4273 
4274  } else if (!strcmp(arg, "dvd")) {
4275 
4276  opt_video_codec(o, "c:v", "mpeg2video");
4277  opt_audio_codec(o, "c:a", "ac3");
4278  parse_option(o, "f", "dvd", options);
4279 
4280  parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4281  parse_option(o, "r", frame_rates[norm], options);
4282  opt_default("g", norm == PAL ? "15" : "18");
4283 
4284  opt_default("b", "6000000");
4285  opt_default("maxrate", "9000000");
4286  opt_default("minrate", "0"); // 1500000;
4287  opt_default("bufsize", "1835008"); // 224*1024*8;
4288 
4289  opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
4290  opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
4291 
4292  opt_default("b:a", "448000");
4293  parse_option(o, "ar", "48000", options);
4294 
4295  } else if (!strncmp(arg, "dv", 2)) {
4296 
4297  parse_option(o, "f", "dv", options);
4298 
4299  parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4300  parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
4301  norm == PAL ? "yuv420p" : "yuv411p", options);
4302  parse_option(o, "r", frame_rates[norm], options);
4303 
4304  parse_option(o, "ar", "48000", options);
4305  parse_option(o, "ac", "2", options);
4306 
4307  } else {
4308  av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
4309  return AVERROR(EINVAL);
4310  }
4311  return 0;
4312 }
4313 
4314 static int opt_vstats_file(const char *opt, const char *arg)
4315 {
4317  vstats_filename = av_strdup (arg);
4318  return 0;
4319 }
4320 
4321 static int opt_vstats(const char *opt, const char *arg)
4322 {
4323  char filename[40];
4324  time_t today2 = time(NULL);
4325  struct tm *today = localtime(&today2);
4326 
4327  snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
4328  today->tm_sec);
4329  return opt_vstats_file(opt, filename);
4330 }
4331 
4332 static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
4333 {
4334  return parse_option(o, "frames:v", arg, options);
4335 }
4336 
4337 static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
4338 {
4339  return parse_option(o, "frames:a", arg, options);
4340 }
4341 
4342 static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
4343 {
4344  return parse_option(o, "frames:d", arg, options);
4345 }
4346 
4347 static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg)
4348 {
4349  return parse_option(o, "tag:v", arg, options);
4350 }
4351 
4352 static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg)
4353 {
4354  return parse_option(o, "tag:a", arg, options);
4355 }
4356 
4357 static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg)
4358 {
4359  return parse_option(o, "tag:s", arg, options);
4360 }
4361 
4362 static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
4363 {
4364  return parse_option(o, "filter:v", arg, options);
4365 }
4366 
4367 static int opt_vsync(const char *opt, const char *arg)
4368 {
4369  if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
4370  else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
4371  else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
4372 
4375  return 0;
4376 }
4377 
4378 #define OFFSET(x) offsetof(OptionsContext, x)
4379 static const OptionDef options[] = {
4380  /* main options */
4381 #include "cmdutils_common_opts.h"
4382  { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
4383  { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
4384  { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
4385  { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4386  { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4387  { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
4388  { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4389  { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
4390  "outfile[,metadata]:infile[,metadata]" },
4391  { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
4392  { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
4393  { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
4394  { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
4395  { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
4396  { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
4397  { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
4398  { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
4399  { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
4400  "add timings for benchmarking" },
4401  { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
4402  { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
4403  "dump each input packet" },
4404  { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
4405  "when dumping packets, also dump the payload" },
4406  { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
4407  { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
4408  { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
4409  { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
4410  { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
4411  { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)&copy_ts}, "copy timestamps" },
4412  { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)&copy_tb}, "copy input stream time base when stream copying" },
4413  { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
4414  { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
4415  { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
4416  { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
4417  { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
4418  { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
4419  { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4420  { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4421 #if CONFIG_AVFILTER
4422  { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
4423 #endif
4424  { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
4425  { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
4426  { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
4427 
4428  /* video options */
4429  { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
4430  { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
4431  { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
4432  { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
4433  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
4434  { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
4435  { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
4436  { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
4437  { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
4438  { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
4439  "use same quantizer as source (implies VBR)" },
4440  { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
4441  { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
4442  { "deinterlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_deinterlace},
4443  "deinterlace pictures" },
4444  { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
4445  { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
4446 #if CONFIG_AVFILTER
4447  { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
4448 #endif
4449  { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
4450  { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
4451  { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
4452  { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
4453  { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
4454  { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
4455  { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
4456  { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
4457  { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
4458 
4459  /* audio options */
4460  { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
4461  { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
4462  { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
4463  { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
4464  { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
4465  { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
4466  { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
4467  { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
4468  { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
4469 
4470  /* subtitle options */
4471  { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
4472  { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
4473  { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" },
4474 
4475  /* grab options */
4476  { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
4477 
4478  /* muxer options */
4479  { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
4480  { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
4481 
4482  { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
4483 
4484  /* data codec support */
4485  { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
4486 
4487  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
4488  { NULL, },
4489 };
4490 
4491 int main(int argc, char **argv)
4492 {
4493  OptionsContext o = { 0 };
4494  int64_t ti;
4495 
4496  reset_options(&o);
4497 
4499  parse_loglevel(argc, argv, options);
4500 
4502 #if CONFIG_AVDEVICE
4504 #endif
4505 #if CONFIG_AVFILTER
4507 #endif
4508  av_register_all();
4510 
4511  show_banner();
4512 
4513  /* parse options */
4514  parse_options(&o, argc, argv, options, opt_output_file);
4515 
4516  if (nb_output_files <= 0 && nb_input_files == 0) {
4517  show_usage();
4518  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4519  exit_program(1);
4520  }
4521 
4522  /* file converter / grab */
4523  if (nb_output_files <= 0) {
4524  fprintf(stderr, "At least one output file must be specified\n");
4525  exit_program(1);
4526  }
4527 
4528  if (nb_input_files == 0) {
4529  av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4530  exit_program(1);
4531  }
4532 
4533  ti = getutime();
4534  if (transcode(output_files, nb_output_files, input_files, nb_input_files) < 0)
4535  exit_program(1);
4536  ti = getutime() - ti;
4537  if (do_benchmark) {
4538  int maxrss = getmaxrss() / 1024;
4539  printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
4540  }
4541 
4542  exit_program(0);
4543  return 0;
4544 }