FFmpeg  4.4.5
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
139 static unsigned nb_output_dumped = 0;
140 
141 static int want_sdp = 1;
142 
145 
147 
152 
157 
160 
161 #if HAVE_TERMIOS_H
162 
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
166 #endif
167 
168 #if HAVE_THREADS
169 static void free_input_threads(void);
170 #endif
171 
172 /* sub2video hack:
173  Convert subtitles to video with alpha to insert them in filter graphs.
174  This is a temporary solution until libavfilter gets real subtitles support.
175  */
176 
178 {
179  int ret;
180  AVFrame *frame = ist->sub2video.frame;
181 
183  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
186  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187  return ret;
188  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
189  return 0;
190 }
191 
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
193  AVSubtitleRect *r)
194 {
195  uint32_t *pal, *dst2;
196  uint8_t *src, *src2;
197  int x, y;
198 
199  if (r->type != SUBTITLE_BITMAP) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
201  return;
202  }
203  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205  r->x, r->y, r->w, r->h, w, h
206  );
207  return;
208  }
209 
210  dst += r->y * dst_linesize + r->x * 4;
211  src = r->data[0];
212  pal = (uint32_t *)r->data[1];
213  for (y = 0; y < r->h; y++) {
214  dst2 = (uint32_t *)dst;
215  src2 = src;
216  for (x = 0; x < r->w; x++)
217  *(dst2++) = pal[*(src2++)];
218  dst += dst_linesize;
219  src += r->linesize[0];
220  }
221 }
222 
224 {
225  AVFrame *frame = ist->sub2video.frame;
226  int i;
227  int ret;
228 
229  av_assert1(frame->data[0]);
230  ist->sub2video.last_pts = frame->pts = pts;
231  for (i = 0; i < ist->nb_filters; i++) {
235  if (ret != AVERROR_EOF && ret < 0)
236  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
237  av_err2str(ret));
238  }
239 }
240 
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 {
243  AVFrame *frame = ist->sub2video.frame;
244  int8_t *dst;
245  int dst_linesize;
246  int num_rects, i;
247  int64_t pts, end_pts;
248 
249  if (!frame)
250  return;
251  if (sub) {
252  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253  AV_TIME_BASE_Q, ist->st->time_base);
254  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255  AV_TIME_BASE_Q, ist->st->time_base);
256  num_rects = sub->num_rects;
257  } else {
258  /* If we are initializing the system, utilize current heartbeat
259  PTS as the start time, and show until the following subpicture
260  is received. Otherwise, utilize the previous subpicture's end time
261  as the fall-back value. */
262  pts = ist->sub2video.initialize ?
263  heartbeat_pts : ist->sub2video.end_pts;
264  end_pts = INT64_MAX;
265  num_rects = 0;
266  }
267  if (sub2video_get_blank_frame(ist) < 0) {
269  "Impossible to get a blank canvas.\n");
270  return;
271  }
272  dst = frame->data [0];
273  dst_linesize = frame->linesize[0];
274  for (i = 0; i < num_rects; i++)
275  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
276  sub2video_push_ref(ist, pts);
277  ist->sub2video.end_pts = end_pts;
278  ist->sub2video.initialize = 0;
279 }
280 
282 {
283  InputFile *infile = input_files[ist->file_index];
284  int i, j, nb_reqs;
285  int64_t pts2;
286 
287  /* When a frame is read from a file, examine all sub2video streams in
288  the same file and send the sub2video frame again. Otherwise, decoded
289  video frames could be accumulating in the filter graph while a filter
290  (possibly overlay) is desperately waiting for a subtitle frame. */
291  for (i = 0; i < infile->nb_streams; i++) {
292  InputStream *ist2 = input_streams[infile->ist_index + i];
293  if (!ist2->sub2video.frame)
294  continue;
295  /* subtitles seem to be usually muxed ahead of other streams;
296  if not, subtracting a larger time here is necessary */
297  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298  /* do not send the heartbeat frame if the subtitle is already ahead */
299  if (pts2 <= ist2->sub2video.last_pts)
300  continue;
301  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302  /* if we have hit the end of the current displayed subpicture,
303  or if we need to initialize the system, update the
304  overlayed subpicture and its start/end times */
305  sub2video_update(ist2, pts2 + 1, NULL);
306  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308  if (nb_reqs)
309  sub2video_push_ref(ist2, pts2);
310  }
311 }
312 
313 static void sub2video_flush(InputStream *ist)
314 {
315  int i;
316  int ret;
317 
318  if (ist->sub2video.end_pts < INT64_MAX)
319  sub2video_update(ist, INT64_MAX, NULL);
320  for (i = 0; i < ist->nb_filters; i++) {
321  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322  if (ret != AVERROR_EOF && ret < 0)
323  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
324  }
325 }
326 
327 /* end of sub2video hack */
328 
329 static void term_exit_sigsafe(void)
330 {
331 #if HAVE_TERMIOS_H
332  if(restore_tty)
333  tcsetattr (0, TCSANOW, &oldtty);
334 #endif
335 }
336 
337 void term_exit(void)
338 {
339  av_log(NULL, AV_LOG_QUIET, "%s", "");
341 }
342 
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
349 
350 static void
352 {
353  int ret;
354  received_sigterm = sig;
357  if(received_nb_signals > 3) {
358  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359  strlen("Received > 3 system signals, hard exiting\n"));
360  if (ret < 0) { /* Do nothing */ };
361  exit(123);
362  }
363 }
364 
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
367 {
368  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
369 
370  switch (fdwCtrlType)
371  {
372  case CTRL_C_EVENT:
373  case CTRL_BREAK_EVENT:
374  sigterm_handler(SIGINT);
375  return TRUE;
376 
377  case CTRL_CLOSE_EVENT:
378  case CTRL_LOGOFF_EVENT:
379  case CTRL_SHUTDOWN_EVENT:
380  sigterm_handler(SIGTERM);
381  /* Basically, with these 3 events, when we return from this method the
382  process is hard terminated, so stall as long as we need to
383  to try and let the main thread(s) clean up and gracefully terminate
384  (we have at most 5 seconds, but should be done far before that). */
385  while (!ffmpeg_exited) {
386  Sleep(0);
387  }
388  return TRUE;
389 
390  default:
391  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
392  return FALSE;
393  }
394 }
395 #endif
396 
397 #ifdef __linux__
398 #define SIGNAL(sig, func) \
399  do { \
400  action.sa_handler = func; \
401  sigaction(sig, &action, NULL); \
402  } while (0)
403 #else
404 #define SIGNAL(sig, func) \
405  signal(sig, func)
406 #endif
407 
408 void term_init(void)
409 {
410 #if defined __linux__
411  struct sigaction action = {0};
412  action.sa_handler = sigterm_handler;
413 
414  /* block other interrupts while processing this one */
415  sigfillset(&action.sa_mask);
416 
417  /* restart interruptible functions (i.e. don't fail with EINTR) */
418  action.sa_flags = SA_RESTART;
419 #endif
420 
421 #if HAVE_TERMIOS_H
423  struct termios tty;
424  if (tcgetattr (0, &tty) == 0) {
425  oldtty = tty;
426  restore_tty = 1;
427 
428  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429  |INLCR|IGNCR|ICRNL|IXON);
430  tty.c_oflag |= OPOST;
431  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432  tty.c_cflag &= ~(CSIZE|PARENB);
433  tty.c_cflag |= CS8;
434  tty.c_cc[VMIN] = 1;
435  tty.c_cc[VTIME] = 0;
436 
437  tcsetattr (0, TCSANOW, &tty);
438  }
439  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
440  }
441 #endif
442 
443  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
445 #ifdef SIGXCPU
446  SIGNAL(SIGXCPU, sigterm_handler);
447 #endif
448 #ifdef SIGPIPE
449  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
450 #endif
451 #if HAVE_SETCONSOLECTRLHANDLER
452  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
453 #endif
454 }
455 
456 /* read a key without blocking */
457 static int read_key(void)
458 {
459  unsigned char ch;
460 #if HAVE_TERMIOS_H
461  int n = 1;
462  struct timeval tv;
463  fd_set rfds;
464 
465  FD_ZERO(&rfds);
466  FD_SET(0, &rfds);
467  tv.tv_sec = 0;
468  tv.tv_usec = 0;
469  n = select(1, &rfds, NULL, NULL, &tv);
470  if (n > 0) {
471  n = read(0, &ch, 1);
472  if (n == 1)
473  return ch;
474 
475  return n;
476  }
477 #elif HAVE_KBHIT
478 # if HAVE_PEEKNAMEDPIPE
479  static int is_pipe;
480  static HANDLE input_handle;
481  DWORD dw, nchars;
482  if(!input_handle){
483  input_handle = GetStdHandle(STD_INPUT_HANDLE);
484  is_pipe = !GetConsoleMode(input_handle, &dw);
485  }
486 
487  if (is_pipe) {
488  /* When running under a GUI, you will end here. */
489  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490  // input pipe may have been closed by the program that ran ffmpeg
491  return -1;
492  }
493  //Read it
494  if(nchars != 0) {
495  if (read(0, &ch, 1) == 1)
496  return ch;
497  return 0;
498  }else{
499  return -1;
500  }
501  }
502 # endif
503  if(kbhit())
504  return(getch());
505 #endif
506  return -1;
507 }
508 
509 static int decode_interrupt_cb(void *ctx)
510 {
512 }
513 
515 
516 static void ffmpeg_cleanup(int ret)
517 {
518  int i, j;
519 
520  if (do_benchmark) {
521  int maxrss = getmaxrss() / 1024;
522  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
523  }
524 
525  for (i = 0; i < nb_filtergraphs; i++) {
526  FilterGraph *fg = filtergraphs[i];
528  for (j = 0; j < fg->nb_inputs; j++) {
529  InputFilter *ifilter = fg->inputs[j];
530  struct InputStream *ist = ifilter->ist;
531 
532  while (av_fifo_size(ifilter->frame_queue)) {
533  AVFrame *frame;
535  sizeof(frame), NULL);
537  }
538  av_fifo_freep(&ifilter->frame_queue);
539  if (ist->sub2video.sub_queue) {
540  while (av_fifo_size(ist->sub2video.sub_queue)) {
541  AVSubtitle sub;
543  &sub, sizeof(sub), NULL);
545  }
547  }
548  av_buffer_unref(&ifilter->hw_frames_ctx);
549  av_freep(&ifilter->name);
550  av_freep(&fg->inputs[j]);
551  }
552  av_freep(&fg->inputs);
553  for (j = 0; j < fg->nb_outputs; j++) {
554  OutputFilter *ofilter = fg->outputs[j];
555 
556  avfilter_inout_free(&ofilter->out_tmp);
557  av_freep(&ofilter->name);
558  av_freep(&ofilter->formats);
559  av_freep(&ofilter->channel_layouts);
560  av_freep(&ofilter->sample_rates);
561  av_freep(&fg->outputs[j]);
562  }
563  av_freep(&fg->outputs);
564  av_freep(&fg->graph_desc);
565 
567  }
569 
571 
572  /* close files */
573  for (i = 0; i < nb_output_files; i++) {
574  OutputFile *of = output_files[i];
576  if (!of)
577  continue;
578  s = of->ctx;
579  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
580  avio_closep(&s->pb);
582  av_dict_free(&of->opts);
583 
585  }
586  for (i = 0; i < nb_output_streams; i++) {
588 
589  if (!ost)
590  continue;
591 
592  av_bsf_free(&ost->bsf_ctx);
593 
594  av_frame_free(&ost->filtered_frame);
595  av_frame_free(&ost->last_frame);
596  av_packet_free(&ost->pkt);
597  av_dict_free(&ost->encoder_opts);
598 
599  av_freep(&ost->forced_keyframes);
600  av_expr_free(ost->forced_keyframes_pexpr);
601  av_freep(&ost->avfilter);
602  av_freep(&ost->logfile_prefix);
603 
604  av_freep(&ost->audio_channels_map);
605  ost->audio_channels_mapped = 0;
606 
607  av_dict_free(&ost->sws_dict);
608  av_dict_free(&ost->swr_opts);
609 
610  avcodec_free_context(&ost->enc_ctx);
611  avcodec_parameters_free(&ost->ref_par);
612 
613  if (ost->muxing_queue) {
614  while (av_fifo_size(ost->muxing_queue)) {
615  AVPacket *pkt;
616  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
618  }
619  av_fifo_freep(&ost->muxing_queue);
620  }
621 
623  }
624 #if HAVE_THREADS
625  free_input_threads();
626 #endif
627  for (i = 0; i < nb_input_files; i++) {
631  }
632  for (i = 0; i < nb_input_streams; i++) {
633  InputStream *ist = input_streams[i];
634 
637  av_packet_free(&ist->pkt);
638  av_dict_free(&ist->decoder_opts);
641  av_freep(&ist->filters);
642  av_freep(&ist->hwaccel_device);
643  av_freep(&ist->dts_buffer);
644 
646 
648  }
649 
650  if (vstats_file) {
651  if (fclose(vstats_file))
653  "Error closing vstats file, loss of information possible: %s\n",
654  av_err2str(AVERROR(errno)));
655  }
657 
662 
663  uninit_opts();
664 
666 
667  if (received_sigterm) {
668  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
669  (int) received_sigterm);
670  } else if (ret && atomic_load(&transcode_init_done)) {
671  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
672  }
673  term_exit();
674  ffmpeg_exited = 1;
675 }
676 
678 {
679  AVDictionaryEntry *t = NULL;
680 
681  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
683  }
684 }
685 
687 {
689  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
690  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
691  exit_program(1);
692  }
693 }
694 
695 static void abort_codec_experimental(const AVCodec *c, int encoder)
696 {
697  exit_program(1);
698 }
699 
700 static void update_benchmark(const char *fmt, ...)
701 {
702  if (do_benchmark_all) {
704  va_list va;
705  char buf[1024];
706 
707  if (fmt) {
708  va_start(va, fmt);
709  vsnprintf(buf, sizeof(buf), fmt, va);
710  va_end(va);
712  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
715  t.real_usec - current_time.real_usec, buf);
716  }
717  current_time = t;
718  }
719 }
720 
722 {
723  int i;
724  for (i = 0; i < nb_output_streams; i++) {
725  OutputStream *ost2 = output_streams[i];
726  ost2->finished |= ost == ost2 ? this_stream : others;
727  }
728 }
729 
730 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
731 {
732  AVFormatContext *s = of->ctx;
733  AVStream *st = ost->st;
734  int ret;
735 
736  /*
737  * Audio encoders may split the packets -- #frames in != #packets out.
738  * But there is no reordering, so we can limit the number of output packets
739  * by simply dropping them here.
740  * Counting encoded video frames needs to be done separately because of
741  * reordering, see do_video_out().
742  * Do not count the packet when unqueued because it has been counted when queued.
743  */
744  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
745  if (ost->frame_number >= ost->max_frames) {
747  return;
748  }
749  ost->frame_number++;
750  }
751 
752  if (!of->header_written) {
753  AVPacket *tmp_pkt;
754  /* the muxer is not initialized yet, buffer the packet */
755  if (!av_fifo_space(ost->muxing_queue)) {
756  unsigned int are_we_over_size =
757  (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
758  int new_size = are_we_over_size ?
759  FFMIN(2 * av_fifo_size(ost->muxing_queue),
760  ost->max_muxing_queue_size) :
761  2 * av_fifo_size(ost->muxing_queue);
762 
763  if (new_size <= av_fifo_size(ost->muxing_queue)) {
765  "Too many packets buffered for output stream %d:%d.\n",
766  ost->file_index, ost->st->index);
767  exit_program(1);
768  }
769  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
770  if (ret < 0)
771  exit_program(1);
772  }
774  if (ret < 0)
775  exit_program(1);
776  tmp_pkt = av_packet_alloc();
777  if (!tmp_pkt)
778  exit_program(1);
779  av_packet_move_ref(tmp_pkt, pkt);
780  ost->muxing_queue_data_size += tmp_pkt->size;
781  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
782  return;
783  }
784 
787  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
788 
790  int i;
792  NULL);
793  ost->quality = sd ? AV_RL32(sd) : -1;
794  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
795 
796  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
797  if (sd && i < sd[5])
798  ost->error[i] = AV_RL64(sd + 8 + 8*i);
799  else
800  ost->error[i] = -1;
801  }
802 
803  if (ost->frame_rate.num && ost->is_cfr) {
804  if (pkt->duration > 0)
805  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
806  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
807  ost->mux_timebase);
808  }
809  }
810 
811  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
812 
813  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
814  if (pkt->dts != AV_NOPTS_VALUE &&
815  pkt->pts != AV_NOPTS_VALUE &&
816  pkt->dts > pkt->pts) {
817  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
818  pkt->dts, pkt->pts,
819  ost->file_index, ost->st->index);
820  pkt->pts =
821  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
822  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
823  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
824  }
826  pkt->dts != AV_NOPTS_VALUE &&
827  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
828  ost->last_mux_dts != AV_NOPTS_VALUE) {
829  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
830  if (pkt->dts < max) {
831  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
832  if (exit_on_error)
833  loglevel = AV_LOG_ERROR;
834  av_log(s, loglevel, "Non-monotonous DTS in output stream "
835  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
836  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
837  if (exit_on_error) {
838  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
839  exit_program(1);
840  }
841  av_log(s, loglevel, "changing to %"PRId64". This may result "
842  "in incorrect timestamps in the output file.\n",
843  max);
844  if (pkt->pts >= pkt->dts)
845  pkt->pts = FFMAX(pkt->pts, max);
846  pkt->dts = max;
847  }
848  }
849  }
850  ost->last_mux_dts = pkt->dts;
851 
852  ost->data_size += pkt->size;
853  ost->packets_written++;
854 
856 
857  if (debug_ts) {
858  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
859  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
860  av_get_media_type_string(ost->enc_ctx->codec_type),
863  pkt->size
864  );
865  }
866 
868  if (ret < 0) {
869  print_error("av_interleaved_write_frame()", ret);
870  main_return_code = 1;
872  }
874 }
875 
877 {
878  OutputFile *of = output_files[ost->file_index];
879 
880  ost->finished |= ENCODER_FINISHED;
881  if (of->shortest) {
882  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
883  of->recording_time = FFMIN(of->recording_time, end);
884  }
885 }
886 
887 /*
888  * Send a single packet to the output, applying any bitstream filters
889  * associated with the output stream. This may result in any number
890  * of packets actually being written, depending on what bitstream
891  * filters are applied. The supplied packet is consumed and will be
892  * blank (as if newly-allocated) when this function returns.
893  *
894  * If eof is set, instead indicate EOF to all bitstream filters and
895  * therefore flush any delayed packets to the output. A blank packet
896  * must be supplied in this case.
897  */
899  OutputStream *ost, int eof)
900 {
901  int ret = 0;
902 
903  /* apply the output bitstream filters */
904  if (ost->bsf_ctx) {
905  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
906  if (ret < 0)
907  goto finish;
908  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
909  write_packet(of, pkt, ost, 0);
910  if (ret == AVERROR(EAGAIN))
911  ret = 0;
912  } else if (!eof)
913  write_packet(of, pkt, ost, 0);
914 
915 finish:
916  if (ret < 0 && ret != AVERROR_EOF) {
917  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
918  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
919  if(exit_on_error)
920  exit_program(1);
921  }
922 }
923 
925 {
926  OutputFile *of = output_files[ost->file_index];
927 
928  if (of->recording_time != INT64_MAX &&
929  av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
930  AV_TIME_BASE_Q) >= 0) {
932  return 0;
933  }
934  return 1;
935 }
936 
938  AVFrame *frame)
939 {
940  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
941  AVCodecContext *enc = ost->enc_ctx;
942  if (!frame || frame->pts == AV_NOPTS_VALUE ||
943  !enc || !ost->filter || !ost->filter->graph->graph)
944  goto early_exit;
945 
946  {
947  AVFilterContext *filter = ost->filter->filter;
948 
951  AVRational tb = enc->time_base;
952  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
953 
954  tb.den <<= extra_bits;
955  float_pts =
956  av_rescale_q(frame->pts, filter_tb, tb) -
958  float_pts /= 1 << extra_bits;
959  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
960  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
961 
962  frame->pts =
963  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
965  }
966 
967 early_exit:
968 
969  if (debug_ts) {
970  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
971  frame ? av_ts2str(frame->pts) : "NULL",
972  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
973  float_pts,
974  enc ? enc->time_base.num : -1,
975  enc ? enc->time_base.den : -1);
976  }
977 
978  return float_pts;
979 }
980 
982  char *error, int error_len);
983 
985  unsigned int fatal)
986 {
987  int ret = AVERROR_BUG;
988  char error[1024] = {0};
989 
990  if (ost->initialized)
991  return 0;
992 
993  ret = init_output_stream(ost, frame, error, sizeof(error));
994  if (ret < 0) {
995  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
996  ost->file_index, ost->index, error);
997 
998  if (fatal)
999  exit_program(1);
1000  }
1001 
1002  return ret;
1003 }
1004 
1006  AVFrame *frame)
1007 {
1008  AVCodecContext *enc = ost->enc_ctx;
1009  AVPacket *pkt = ost->pkt;
1010  int ret;
1011 
1013 
1014  if (!check_recording_time(ost))
1015  return;
1016 
1017  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1018  frame->pts = ost->sync_opts;
1019  ost->sync_opts = frame->pts + frame->nb_samples;
1020  ost->samples_encoded += frame->nb_samples;
1021  ost->frames_encoded++;
1022 
1024  if (debug_ts) {
1025  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1026  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1028  enc->time_base.num, enc->time_base.den);
1029  }
1030 
1031  ret = avcodec_send_frame(enc, frame);
1032  if (ret < 0)
1033  goto error;
1034 
1035  while (1) {
1037  ret = avcodec_receive_packet(enc, pkt);
1038  if (ret == AVERROR(EAGAIN))
1039  break;
1040  if (ret < 0)
1041  goto error;
1042 
1043  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1044 
1045  av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1046 
1047  if (debug_ts) {
1048  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1049  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1052  }
1053 
1054  output_packet(of, pkt, ost, 0);
1055  }
1056 
1057  return;
1058 error:
1059  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1060  exit_program(1);
1061 }
1062 
1063 static void do_subtitle_out(OutputFile *of,
1064  OutputStream *ost,
1065  AVSubtitle *sub)
1066 {
1067  int subtitle_out_max_size = 1024 * 1024;
1068  int subtitle_out_size, nb, i;
1069  AVCodecContext *enc;
1070  AVPacket *pkt = ost->pkt;
1071  int64_t pts;
1072 
1073  if (sub->pts == AV_NOPTS_VALUE) {
1074  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1075  if (exit_on_error)
1076  exit_program(1);
1077  return;
1078  }
1079 
1080  enc = ost->enc_ctx;
1081 
1082  if (!subtitle_out) {
1083  subtitle_out = av_malloc(subtitle_out_max_size);
1084  if (!subtitle_out) {
1085  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1086  exit_program(1);
1087  }
1088  }
1089 
1090  /* Note: DVB subtitle need one packet to draw them and one other
1091  packet to clear them */
1092  /* XXX: signal it in the codec context ? */
1093  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1094  nb = 2;
1095  else
1096  nb = 1;
1097 
1098  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1099  pts = sub->pts;
1100  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1101  pts -= output_files[ost->file_index]->start_time;
1102  for (i = 0; i < nb; i++) {
1103  unsigned save_num_rects = sub->num_rects;
1104 
1105  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1106  if (!check_recording_time(ost))
1107  return;
1108 
1109  sub->pts = pts;
1110  // start_display_time is required to be 0
1111  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1112  sub->end_display_time -= sub->start_display_time;
1113  sub->start_display_time = 0;
1114  if (i == 1)
1115  sub->num_rects = 0;
1116 
1117  ost->frames_encoded++;
1118 
1119  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1120  subtitle_out_max_size, sub);
1121  if (i == 1)
1122  sub->num_rects = save_num_rects;
1123  if (subtitle_out_size < 0) {
1124  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1125  exit_program(1);
1126  }
1127 
1129  pkt->data = subtitle_out;
1130  pkt->size = subtitle_out_size;
1131  pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1132  pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1133  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1134  /* XXX: the pts correction is handled here. Maybe handling
1135  it in the codec would be better */
1136  if (i == 0)
1137  pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1138  else
1139  pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1140  }
1141  pkt->dts = pkt->pts;
1142  output_packet(of, pkt, ost, 0);
1143  }
1144 }
1145 
1146 static void do_video_out(OutputFile *of,
1147  OutputStream *ost,
1148  AVFrame *next_picture)
1149 {
1150  int ret, format_video_sync;
1151  AVPacket *pkt = ost->pkt;
1152  AVCodecContext *enc = ost->enc_ctx;
1153  AVRational frame_rate;
1154  int nb_frames, nb0_frames, i;
1155  double delta, delta0;
1156  double duration = 0;
1157  double sync_ipts = AV_NOPTS_VALUE;
1158  int frame_size = 0;
1159  InputStream *ist = NULL;
1160  AVFilterContext *filter = ost->filter->filter;
1161 
1162  init_output_stream_wrapper(ost, next_picture, 1);
1163  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1164 
1165  if (ost->source_index >= 0)
1166  ist = input_streams[ost->source_index];
1167 
1168  frame_rate = av_buffersink_get_frame_rate(filter);
1169  if (frame_rate.num > 0 && frame_rate.den > 0)
1170  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1171 
1172  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1173  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1174 
1175  if (!ost->filters_script &&
1176  !ost->filters &&
1177  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1178  next_picture &&
1179  ist &&
1180  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1181  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1182  }
1183 
1184  if (!next_picture) {
1185  //end, flushing
1186  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1187  ost->last_nb0_frames[1],
1188  ost->last_nb0_frames[2]);
1189  } else {
1190  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1191  delta = delta0 + duration;
1192 
1193  /* by default, we output a single frame */
1194  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1195  nb_frames = 1;
1196 
1197  format_video_sync = video_sync_method;
1198  if (format_video_sync == VSYNC_AUTO) {
1199  if(!strcmp(of->ctx->oformat->name, "avi")) {
1200  format_video_sync = VSYNC_VFR;
1201  } else
1202  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1203  if ( ist
1204  && format_video_sync == VSYNC_CFR
1205  && input_files[ist->file_index]->ctx->nb_streams == 1
1206  && input_files[ist->file_index]->input_ts_offset == 0) {
1207  format_video_sync = VSYNC_VSCFR;
1208  }
1209  if (format_video_sync == VSYNC_CFR && copy_ts) {
1210  format_video_sync = VSYNC_VSCFR;
1211  }
1212  }
1213  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1214 
1215  if (delta0 < 0 &&
1216  delta > 0 &&
1217  format_video_sync != VSYNC_PASSTHROUGH &&
1218  format_video_sync != VSYNC_DROP) {
1219  if (delta0 < -0.6) {
1220  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1221  } else
1222  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1223  sync_ipts = ost->sync_opts;
1224  duration += delta0;
1225  delta0 = 0;
1226  }
1227 
1228  switch (format_video_sync) {
1229  case VSYNC_VSCFR:
1230  if (ost->frame_number == 0 && delta0 >= 0.5) {
1231  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1232  delta = duration;
1233  delta0 = 0;
1234  ost->sync_opts = llrint(sync_ipts);
1235  }
1236  case VSYNC_CFR:
1237  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1238  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1239  nb_frames = 0;
1240  } else if (delta < -1.1)
1241  nb_frames = 0;
1242  else if (delta > 1.1) {
1243  nb_frames = lrintf(delta);
1244  if (delta0 > 1.1)
1245  nb0_frames = llrintf(delta0 - 0.6);
1246  }
1247  break;
1248  case VSYNC_VFR:
1249  if (delta <= -0.6)
1250  nb_frames = 0;
1251  else if (delta > 0.6)
1252  ost->sync_opts = llrint(sync_ipts);
1253  break;
1254  case VSYNC_DROP:
1255  case VSYNC_PASSTHROUGH:
1256  ost->sync_opts = llrint(sync_ipts);
1257  break;
1258  default:
1259  av_assert0(0);
1260  }
1261  }
1262 
1263  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1264  nb0_frames = FFMIN(nb0_frames, nb_frames);
1265 
1266  memmove(ost->last_nb0_frames + 1,
1267  ost->last_nb0_frames,
1268  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1269  ost->last_nb0_frames[0] = nb0_frames;
1270 
1271  if (nb0_frames == 0 && ost->last_dropped) {
1272  nb_frames_drop++;
1274  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1275  ost->frame_number, ost->st->index, ost->last_frame->pts);
1276  }
1277  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1278  if (nb_frames > dts_error_threshold * 30) {
1279  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1280  nb_frames_drop++;
1281  return;
1282  }
1283  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1284  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1285  if (nb_frames_dup > dup_warning) {
1286  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1287  dup_warning *= 10;
1288  }
1289  }
1290  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1291 
1292  /* duplicates frame if needed */
1293  for (i = 0; i < nb_frames; i++) {
1294  AVFrame *in_picture;
1295  int forced_keyframe = 0;
1296  double pts_time;
1297 
1298  if (i < nb0_frames && ost->last_frame) {
1299  in_picture = ost->last_frame;
1300  } else
1301  in_picture = next_picture;
1302 
1303  if (!in_picture)
1304  return;
1305 
1306  in_picture->pts = ost->sync_opts;
1307 
1308  if (!check_recording_time(ost))
1309  return;
1310 
1311  in_picture->quality = enc->global_quality;
1312  in_picture->pict_type = 0;
1313 
1314  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1315  in_picture->pts != AV_NOPTS_VALUE)
1316  ost->forced_kf_ref_pts = in_picture->pts;
1317 
1318  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1319  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1320  if (ost->forced_kf_index < ost->forced_kf_count &&
1321  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1322  ost->forced_kf_index++;
1323  forced_keyframe = 1;
1324  } else if (ost->forced_keyframes_pexpr) {
1325  double res;
1326  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1327  res = av_expr_eval(ost->forced_keyframes_pexpr,
1328  ost->forced_keyframes_expr_const_values, NULL);
1329  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1330  ost->forced_keyframes_expr_const_values[FKF_N],
1331  ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1332  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1333  ost->forced_keyframes_expr_const_values[FKF_T],
1334  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1335  res);
1336  if (res) {
1337  forced_keyframe = 1;
1338  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1339  ost->forced_keyframes_expr_const_values[FKF_N];
1340  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1341  ost->forced_keyframes_expr_const_values[FKF_T];
1342  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1343  }
1344 
1345  ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1346  } else if ( ost->forced_keyframes
1347  && !strncmp(ost->forced_keyframes, "source", 6)
1348  && in_picture->key_frame==1
1349  && !i) {
1350  forced_keyframe = 1;
1351  }
1352 
1353  if (forced_keyframe) {
1354  in_picture->pict_type = AV_PICTURE_TYPE_I;
1355  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1356  }
1357 
1359  if (debug_ts) {
1360  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1361  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1362  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1363  enc->time_base.num, enc->time_base.den);
1364  }
1365 
1366  ost->frames_encoded++;
1367 
1368  ret = avcodec_send_frame(enc, in_picture);
1369  if (ret < 0)
1370  goto error;
1371  // Make sure Closed Captions will not be duplicated
1373 
1374  while (1) {
1376  ret = avcodec_receive_packet(enc, pkt);
1377  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1378  if (ret == AVERROR(EAGAIN))
1379  break;
1380  if (ret < 0)
1381  goto error;
1382 
1383  if (debug_ts) {
1384  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1385  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1388  }
1389 
1391  pkt->pts = ost->sync_opts;
1392 
1393  av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1394 
1395  if (debug_ts) {
1396  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1397  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1398  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->mux_timebase),
1399  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->mux_timebase));
1400  }
1401 
1402  frame_size = pkt->size;
1403  output_packet(of, pkt, ost, 0);
1404 
1405  /* if two pass, output log */
1406  if (ost->logfile && enc->stats_out) {
1407  fprintf(ost->logfile, "%s", enc->stats_out);
1408  }
1409  }
1410  ost->sync_opts++;
1411  /*
1412  * For video, number of frames in == number of packets out.
1413  * But there may be reordering, so we can't throw away frames on encoder
1414  * flush, we need to limit them here, before they go into encoder.
1415  */
1416  ost->frame_number++;
1417 
1418  if (vstats_filename && frame_size)
1420  }
1421 
1422  if (!ost->last_frame)
1423  ost->last_frame = av_frame_alloc();
1424  av_frame_unref(ost->last_frame);
1425  if (next_picture && ost->last_frame)
1426  av_frame_ref(ost->last_frame, next_picture);
1427  else
1428  av_frame_free(&ost->last_frame);
1429 
1430  return;
1431 error:
1432  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1433  exit_program(1);
1434 }
1435 
1436 static double psnr(double d)
1437 {
1438  return -10.0 * log10(d);
1439 }
1440 
1442 {
1443  AVCodecContext *enc;
1444  int frame_number;
1445  double ti1, bitrate, avg_bitrate;
1446 
1447  /* this is executed just the first time do_video_stats is called */
1448  if (!vstats_file) {
1449  vstats_file = fopen(vstats_filename, "w");
1450  if (!vstats_file) {
1451  perror("fopen");
1452  exit_program(1);
1453  }
1454  }
1455 
1456  enc = ost->enc_ctx;
1457  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1458  frame_number = ost->st->nb_frames;
1459  if (vstats_version <= 1) {
1460  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1461  ost->quality / (float)FF_QP2LAMBDA);
1462  } else {
1463  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1464  ost->quality / (float)FF_QP2LAMBDA);
1465  }
1466 
1467  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1468  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1469 
1470  fprintf(vstats_file,"f_size= %6d ", frame_size);
1471  /* compute pts value */
1472  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1473  if (ti1 < 0.01)
1474  ti1 = 0.01;
1475 
1476  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1477  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1478  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1479  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1480  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1481  }
1482 }
1483 
1485 {
1486  OutputFile *of = output_files[ost->file_index];
1487  int i;
1488 
1489  ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1490 
1491  if (of->shortest) {
1492  for (i = 0; i < of->ctx->nb_streams; i++)
1494  }
1495 }
1496 
1497 /**
1498  * Get and encode new output from any of the filtergraphs, without causing
1499  * activity.
1500  *
1501  * @return 0 for success, <0 for severe errors
1502  */
1503 static int reap_filters(int flush)
1504 {
1505  AVFrame *filtered_frame = NULL;
1506  int i;
1507 
1508  /* Reap all buffers present in the buffer sinks */
1509  for (i = 0; i < nb_output_streams; i++) {
1511  OutputFile *of = output_files[ost->file_index];
1513  AVCodecContext *enc = ost->enc_ctx;
1514  int ret = 0;
1515 
1516  if (!ost->filter || !ost->filter->graph->graph)
1517  continue;
1518  filter = ost->filter->filter;
1519 
1520  /*
1521  * Unlike video, with audio the audio frame size matters.
1522  * Currently we are fully reliant on the lavfi filter chain to
1523  * do the buffering deed for us, and thus the frame size parameter
1524  * needs to be set accordingly. Where does one get the required
1525  * frame size? From the initialized AVCodecContext of an audio
1526  * encoder. Thus, if we have gotten to an audio stream, initialize
1527  * the encoder earlier than receiving the first AVFrame.
1528  */
1531 
1532  if (!ost->pkt && !(ost->pkt = av_packet_alloc())) {
1533  return AVERROR(ENOMEM);
1534  }
1535  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1536  return AVERROR(ENOMEM);
1537  }
1538  filtered_frame = ost->filtered_frame;
1539 
1540  while (1) {
1541  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1543  if (ret < 0) {
1544  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1546  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1547  } else if (flush && ret == AVERROR_EOF) {
1549  do_video_out(of, ost, NULL);
1550  }
1551  break;
1552  }
1553  if (ost->finished) {
1554  av_frame_unref(filtered_frame);
1555  continue;
1556  }
1557 
1558  switch (av_buffersink_get_type(filter)) {
1559  case AVMEDIA_TYPE_VIDEO:
1560  if (!ost->frame_aspect_ratio.num)
1561  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1562 
1563  do_video_out(of, ost, filtered_frame);
1564  break;
1565  case AVMEDIA_TYPE_AUDIO:
1566  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1567  enc->channels != filtered_frame->channels) {
1569  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1570  break;
1571  }
1572  do_audio_out(of, ost, filtered_frame);
1573  break;
1574  default:
1575  // TODO support subtitle filters
1576  av_assert0(0);
1577  }
1578 
1579  av_frame_unref(filtered_frame);
1580  }
1581  }
1582 
1583  return 0;
1584 }
1585 
1586 static void print_final_stats(int64_t total_size)
1587 {
1588  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1589  uint64_t subtitle_size = 0;
1590  uint64_t data_size = 0;
1591  float percent = -1.0;
1592  int i, j;
1593  int pass1_used = 1;
1594 
1595  for (i = 0; i < nb_output_streams; i++) {
1597  switch (ost->enc_ctx->codec_type) {
1598  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1599  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1600  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1601  default: other_size += ost->data_size; break;
1602  }
1603  extra_size += ost->enc_ctx->extradata_size;
1604  data_size += ost->data_size;
1605  if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1607  pass1_used = 0;
1608  }
1609 
1610  if (data_size && total_size>0 && total_size >= data_size)
1611  percent = 100.0 * (total_size - data_size) / data_size;
1612 
1613  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1614  video_size / 1024.0,
1615  audio_size / 1024.0,
1616  subtitle_size / 1024.0,
1617  other_size / 1024.0,
1618  extra_size / 1024.0);
1619  if (percent >= 0.0)
1620  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1621  else
1622  av_log(NULL, AV_LOG_INFO, "unknown");
1623  av_log(NULL, AV_LOG_INFO, "\n");
1624 
1625  /* print verbose per-stream stats */
1626  for (i = 0; i < nb_input_files; i++) {
1627  InputFile *f = input_files[i];
1628  uint64_t total_packets = 0, total_size = 0;
1629 
1630  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1631  i, f->ctx->url);
1632 
1633  for (j = 0; j < f->nb_streams; j++) {
1634  InputStream *ist = input_streams[f->ist_index + j];
1635  enum AVMediaType type = ist->dec_ctx->codec_type;
1636 
1637  total_size += ist->data_size;
1638  total_packets += ist->nb_packets;
1639 
1640  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1641  i, j, media_type_string(type));
1642  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1643  ist->nb_packets, ist->data_size);
1644 
1645  if (ist->decoding_needed) {
1646  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1647  ist->frames_decoded);
1648  if (type == AVMEDIA_TYPE_AUDIO)
1649  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1650  av_log(NULL, AV_LOG_VERBOSE, "; ");
1651  }
1652 
1653  av_log(NULL, AV_LOG_VERBOSE, "\n");
1654  }
1655 
1656  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1657  total_packets, total_size);
1658  }
1659 
1660  for (i = 0; i < nb_output_files; i++) {
1661  OutputFile *of = output_files[i];
1662  uint64_t total_packets = 0, total_size = 0;
1663 
1664  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1665  i, of->ctx->url);
1666 
1667  for (j = 0; j < of->ctx->nb_streams; j++) {
1669  enum AVMediaType type = ost->enc_ctx->codec_type;
1670 
1671  total_size += ost->data_size;
1672  total_packets += ost->packets_written;
1673 
1674  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1675  i, j, media_type_string(type));
1676  if (ost->encoding_needed) {
1677  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1678  ost->frames_encoded);
1679  if (type == AVMEDIA_TYPE_AUDIO)
1680  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1681  av_log(NULL, AV_LOG_VERBOSE, "; ");
1682  }
1683 
1684  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1685  ost->packets_written, ost->data_size);
1686 
1687  av_log(NULL, AV_LOG_VERBOSE, "\n");
1688  }
1689 
1690  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1691  total_packets, total_size);
1692  }
1693  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1694  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1695  if (pass1_used) {
1696  av_log(NULL, AV_LOG_WARNING, "\n");
1697  } else {
1698  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1699  }
1700  }
1701 }
1702 
1703 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1704 {
1705  AVBPrint buf, buf_script;
1706  OutputStream *ost;
1707  AVFormatContext *oc;
1708  int64_t total_size;
1709  AVCodecContext *enc;
1710  int frame_number, vid, i;
1711  double bitrate;
1712  double speed;
1713  int64_t pts = INT64_MIN + 1;
1714  static int64_t last_time = -1;
1715  static int first_report = 1;
1716  static int qp_histogram[52];
1717  int hours, mins, secs, us;
1718  const char *hours_sign;
1719  int ret;
1720  float t;
1721 
1722  if (!print_stats && !is_last_report && !progress_avio)
1723  return;
1724 
1725  if (!is_last_report) {
1726  if (last_time == -1) {
1727  last_time = cur_time;
1728  }
1729  if (((cur_time - last_time) < stats_period && !first_report) ||
1730  (first_report && nb_output_dumped < nb_output_files))
1731  return;
1732  last_time = cur_time;
1733  }
1734 
1735  t = (cur_time-timer_start) / 1000000.0;
1736 
1737 
1738  oc = output_files[0]->ctx;
1739 
1740  total_size = avio_size(oc->pb);
1741  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1742  total_size = avio_tell(oc->pb);
1743 
1744  vid = 0;
1746  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1747  for (i = 0; i < nb_output_streams; i++) {
1748  float q = -1;
1749  ost = output_streams[i];
1750  enc = ost->enc_ctx;
1751  if (!ost->stream_copy)
1752  q = ost->quality / (float) FF_QP2LAMBDA;
1753 
1754  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1755  av_bprintf(&buf, "q=%2.1f ", q);
1756  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1757  ost->file_index, ost->index, q);
1758  }
1759  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1760  float fps;
1761 
1762  frame_number = ost->frame_number;
1763  fps = t > 1 ? frame_number / t : 0;
1764  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1765  frame_number, fps < 9.95, fps, q);
1766  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1767  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1768  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1769  ost->file_index, ost->index, q);
1770  if (is_last_report)
1771  av_bprintf(&buf, "L");
1772  if (qp_hist) {
1773  int j;
1774  int qp = lrintf(q);
1775  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1776  qp_histogram[qp]++;
1777  for (j = 0; j < 32; j++)
1778  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1779  }
1780 
1781  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1782  int j;
1783  double error, error_sum = 0;
1784  double scale, scale_sum = 0;
1785  double p;
1786  char type[3] = { 'Y','U','V' };
1787  av_bprintf(&buf, "PSNR=");
1788  for (j = 0; j < 3; j++) {
1789  if (is_last_report) {
1790  error = enc->error[j];
1791  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1792  } else {
1793  error = ost->error[j];
1794  scale = enc->width * enc->height * 255.0 * 255.0;
1795  }
1796  if (j)
1797  scale /= 4;
1798  error_sum += error;
1799  scale_sum += scale;
1800  p = psnr(error / scale);
1801  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1802  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1803  ost->file_index, ost->index, type[j] | 32, p);
1804  }
1805  p = psnr(error_sum / scale_sum);
1806  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1807  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1808  ost->file_index, ost->index, p);
1809  }
1810  vid = 1;
1811  }
1812  /* compute min output value */
1815  ost->st->time_base, AV_TIME_BASE_Q));
1816  if (copy_ts) {
1817  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1821  }
1822  }
1823 
1824  if (is_last_report)
1825  nb_frames_drop += ost->last_dropped;
1826  }
1827 
1828  secs = FFABS(pts) / AV_TIME_BASE;
1829  us = FFABS(pts) % AV_TIME_BASE;
1830  mins = secs / 60;
1831  secs %= 60;
1832  hours = mins / 60;
1833  mins %= 60;
1834  hours_sign = (pts < 0) ? "-" : "";
1835 
1836  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1837  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1838 
1839  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1840  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1841  if (pts == AV_NOPTS_VALUE) {
1842  av_bprintf(&buf, "N/A ");
1843  } else {
1844  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1845  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1846  }
1847 
1848  if (bitrate < 0) {
1849  av_bprintf(&buf, "bitrate=N/A");
1850  av_bprintf(&buf_script, "bitrate=N/A\n");
1851  }else{
1852  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1853  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1854  }
1855 
1856  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1857  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1858  if (pts == AV_NOPTS_VALUE) {
1859  av_bprintf(&buf_script, "out_time_us=N/A\n");
1860  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1861  av_bprintf(&buf_script, "out_time=N/A\n");
1862  } else {
1863  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1864  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1865  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1866  hours_sign, hours, mins, secs, us);
1867  }
1868 
1870  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1871  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1872  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1873 
1874  if (speed < 0) {
1875  av_bprintf(&buf, " speed=N/A");
1876  av_bprintf(&buf_script, "speed=N/A\n");
1877  } else {
1878  av_bprintf(&buf, " speed=%4.3gx", speed);
1879  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1880  }
1881 
1882  if (print_stats || is_last_report) {
1883  const char end = is_last_report ? '\n' : '\r';
1884  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1885  fprintf(stderr, "%s %c", buf.str, end);
1886  } else
1887  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1888 
1889  fflush(stderr);
1890  }
1891  av_bprint_finalize(&buf, NULL);
1892 
1893  if (progress_avio) {
1894  av_bprintf(&buf_script, "progress=%s\n",
1895  is_last_report ? "end" : "continue");
1896  avio_write(progress_avio, buf_script.str,
1897  FFMIN(buf_script.len, buf_script.size - 1));
1899  av_bprint_finalize(&buf_script, NULL);
1900  if (is_last_report) {
1901  if ((ret = avio_closep(&progress_avio)) < 0)
1903  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1904  }
1905  }
1906 
1907  first_report = 0;
1908 
1909  if (is_last_report)
1910  print_final_stats(total_size);
1911 }
1912 
1914 {
1915  // We never got any input. Set a fake format, which will
1916  // come from libavformat.
1917  ifilter->format = par->format;
1918  ifilter->sample_rate = par->sample_rate;
1919  ifilter->channels = par->channels;
1920  ifilter->channel_layout = par->channel_layout;
1921  ifilter->width = par->width;
1922  ifilter->height = par->height;
1923  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1924 }
1925 
1926 static void flush_encoders(void)
1927 {
1928  int i, ret;
1929 
1930  for (i = 0; i < nb_output_streams; i++) {
1932  AVCodecContext *enc = ost->enc_ctx;
1933  OutputFile *of = output_files[ost->file_index];
1934 
1935  if (!ost->encoding_needed)
1936  continue;
1937 
1938  // Try to enable encoding with no input frames.
1939  // Maybe we should just let encoding fail instead.
1940  if (!ost->initialized) {
1941  FilterGraph *fg = ost->filter->graph;
1942 
1944  "Finishing stream %d:%d without any data written to it.\n",
1945  ost->file_index, ost->st->index);
1946 
1947  if (ost->filter && !fg->graph) {
1948  int x;
1949  for (x = 0; x < fg->nb_inputs; x++) {
1950  InputFilter *ifilter = fg->inputs[x];
1951  if (ifilter->format < 0)
1952  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1953  }
1954 
1956  continue;
1957 
1958  ret = configure_filtergraph(fg);
1959  if (ret < 0) {
1960  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1961  exit_program(1);
1962  }
1963 
1965  }
1966 
1968  }
1969 
1971  continue;
1972 
1973  for (;;) {
1974  const char *desc = NULL;
1975  AVPacket *pkt = ost->pkt;
1976  int pkt_size;
1977 
1978  if (!pkt)
1979  break;
1980 
1981  switch (enc->codec_type) {
1982  case AVMEDIA_TYPE_AUDIO:
1983  desc = "audio";
1984  break;
1985  case AVMEDIA_TYPE_VIDEO:
1986  desc = "video";
1987  break;
1988  default:
1989  av_assert0(0);
1990  }
1991 
1993 
1995  while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
1996  ret = avcodec_send_frame(enc, NULL);
1997  if (ret < 0) {
1998  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1999  desc,
2000  av_err2str(ret));
2001  exit_program(1);
2002  }
2003  }
2004 
2005  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2006  if (ret < 0 && ret != AVERROR_EOF) {
2007  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2008  desc,
2009  av_err2str(ret));
2010  exit_program(1);
2011  }
2012  if (ost->logfile && enc->stats_out) {
2013  fprintf(ost->logfile, "%s", enc->stats_out);
2014  }
2015  if (ret == AVERROR_EOF) {
2016  output_packet(of, pkt, ost, 1);
2017  break;
2018  }
2019  if (ost->finished & MUXER_FINISHED) {
2021  continue;
2022  }
2023  av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
2024  pkt_size = pkt->size;
2025  output_packet(of, pkt, ost, 0);
2026  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
2027  do_video_stats(ost, pkt_size);
2028  }
2029  }
2030  }
2031 }
2032 
2033 /*
2034  * Check whether a packet from ist should be written into ost at this time
2035  */
2037 {
2038  OutputFile *of = output_files[ost->file_index];
2039  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2040 
2041  if (ost->source_index != ist_index)
2042  return 0;
2043 
2044  if (ost->finished)
2045  return 0;
2046 
2047  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2048  return 0;
2049 
2050  return 1;
2051 }
2052 
2054 {
2055  OutputFile *of = output_files[ost->file_index];
2056  InputFile *f = input_files [ist->file_index];
2057  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2058  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2059  AVPacket *opkt = ost->pkt;
2060 
2061  av_packet_unref(opkt);
2062  // EOF: flush output bitstream filters.
2063  if (!pkt) {
2064  output_packet(of, opkt, ost, 1);
2065  return;
2066  }
2067 
2068  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2069  !ost->copy_initial_nonkeyframes)
2070  return;
2071 
2072  if (!ost->frame_number && !ost->copy_prior_start) {
2073  int64_t comp_start = start_time;
2074  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2075  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2076  if (pkt->pts == AV_NOPTS_VALUE ?
2077  ist->pts < comp_start :
2078  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2079  return;
2080  }
2081 
2082  if (of->recording_time != INT64_MAX &&
2083  ist->pts >= of->recording_time + start_time) {
2085  return;
2086  }
2087 
2088  if (f->recording_time != INT64_MAX) {
2089  start_time = f->ctx->start_time;
2090  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2091  start_time += f->start_time;
2092  if (ist->pts >= f->recording_time + start_time) {
2094  return;
2095  }
2096  }
2097 
2098  /* force the input stream PTS */
2099  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2100  ost->sync_opts++;
2101 
2102  if (av_packet_ref(opkt, pkt) < 0)
2103  exit_program(1);
2104 
2105  if (pkt->pts != AV_NOPTS_VALUE)
2106  opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2107 
2108  if (pkt->dts == AV_NOPTS_VALUE) {
2109  opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2110  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2112  if(!duration)
2113  duration = ist->dec_ctx->frame_size;
2114  opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2115  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2116  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2117  /* dts will be set immediately afterwards to what pts is now */
2118  opkt->pts = opkt->dts - ost_tb_start_time;
2119  } else
2120  opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2121  opkt->dts -= ost_tb_start_time;
2122 
2123  opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2124 
2125  output_packet(of, opkt, ost, 0);
2126 }
2127 
2129 {
2130  AVCodecContext *dec = ist->dec_ctx;
2131 
2132  if (!dec->channel_layout) {
2133  char layout_name[256];
2134 
2135  if (dec->channels > ist->guess_layout_max)
2136  return 0;
2137  dec->channel_layout = av_get_default_channel_layout(dec->channels);
2138  if (!dec->channel_layout)
2139  return 0;
2140  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2141  dec->channels, dec->channel_layout);
2142  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2143  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2144  }
2145  return 1;
2146 }
2147 
2148 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2149 {
2150  if (*got_output || ret<0)
2151  decode_error_stat[ret<0] ++;
2152 
2153  if (ret < 0 && exit_on_error)
2154  exit_program(1);
2155 
2156  if (*got_output && ist) {
2159  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2160  if (exit_on_error)
2161  exit_program(1);
2162  }
2163  }
2164 }
2165 
2166 // Filters can be configured only if the formats of all inputs are known.
2168 {
2169  int i;
2170  for (i = 0; i < fg->nb_inputs; i++) {
2171  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2172  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2173  return 0;
2174  }
2175  return 1;
2176 }
2177 
2179 {
2180  FilterGraph *fg = ifilter->graph;
2181  int need_reinit, ret, i;
2182 
2183  /* determine if the parameters for this input changed */
2184  need_reinit = ifilter->format != frame->format;
2185 
2186  switch (ifilter->ist->st->codecpar->codec_type) {
2187  case AVMEDIA_TYPE_AUDIO:
2188  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2189  ifilter->channels != frame->channels ||
2190  ifilter->channel_layout != frame->channel_layout;
2191  break;
2192  case AVMEDIA_TYPE_VIDEO:
2193  need_reinit |= ifilter->width != frame->width ||
2194  ifilter->height != frame->height;
2195  break;
2196  }
2197 
2198  if (!ifilter->ist->reinit_filters && fg->graph)
2199  need_reinit = 0;
2200 
2201  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2202  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2203  need_reinit = 1;
2204 
2205  if (need_reinit) {
2207  if (ret < 0)
2208  return ret;
2209  }
2210 
2211  /* (re)init the graph if possible, otherwise buffer the frame and return */
2212  if (need_reinit || !fg->graph) {
2213  for (i = 0; i < fg->nb_inputs; i++) {
2214  if (!ifilter_has_all_input_formats(fg)) {
2216  if (!tmp)
2217  return AVERROR(ENOMEM);
2219 
2220  if (!av_fifo_space(ifilter->frame_queue)) {
2221  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2222  if (ret < 0) {
2223  av_frame_free(&tmp);
2224  return ret;
2225  }
2226  }
2227  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2228  return 0;
2229  }
2230  }
2231 
2232  ret = reap_filters(1);
2233  if (ret < 0 && ret != AVERROR_EOF) {
2234  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2235  return ret;
2236  }
2237 
2238  ret = configure_filtergraph(fg);
2239  if (ret < 0) {
2240  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2241  return ret;
2242  }
2243  }
2244 
2246  if (ret < 0) {
2247  if (ret != AVERROR_EOF)
2248  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2249  return ret;
2250  }
2251 
2252  return 0;
2253 }
2254 
2256 {
2257  int ret;
2258 
2259  ifilter->eof = 1;
2260 
2261  if (ifilter->filter) {
2263  if (ret < 0)
2264  return ret;
2265  } else {
2266  // the filtergraph was never configured
2267  if (ifilter->format < 0)
2268  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2269  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2270  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2271  return AVERROR_INVALIDDATA;
2272  }
2273  }
2274 
2275  return 0;
2276 }
2277 
2278 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2279 // There is the following difference: if you got a frame, you must call
2280 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2281 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2282 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2283 {
2284  int ret;
2285 
2286  *got_frame = 0;
2287 
2288  if (pkt) {
2289  ret = avcodec_send_packet(avctx, pkt);
2290  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2291  // decoded frames with avcodec_receive_frame() until done.
2292  if (ret < 0 && ret != AVERROR_EOF)
2293  return ret;
2294  }
2295 
2296  ret = avcodec_receive_frame(avctx, frame);
2297  if (ret < 0 && ret != AVERROR(EAGAIN))
2298  return ret;
2299  if (ret >= 0)
2300  *got_frame = 1;
2301 
2302  return 0;
2303 }
2304 
2306 {
2307  int i, ret;
2308  AVFrame *f;
2309 
2310  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2311  for (i = 0; i < ist->nb_filters; i++) {
2312  if (i < ist->nb_filters - 1) {
2313  f = ist->filter_frame;
2315  if (ret < 0)
2316  break;
2317  } else
2318  f = decoded_frame;
2319  ret = ifilter_send_frame(ist->filters[i], f);
2320  if (ret == AVERROR_EOF)
2321  ret = 0; /* ignore */
2322  if (ret < 0) {
2324  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2325  break;
2326  }
2327  }
2328  return ret;
2329 }
2330 
2332  int *decode_failed)
2333 {
2335  AVCodecContext *avctx = ist->dec_ctx;
2336  int ret, err = 0;
2337  AVRational decoded_frame_tb;
2338 
2339  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2340  return AVERROR(ENOMEM);
2341  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2342  return AVERROR(ENOMEM);
2344 
2346  ret = decode(avctx, decoded_frame, got_output, pkt);
2347  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2348  if (ret < 0)
2349  *decode_failed = 1;
2350 
2351  if (ret >= 0 && avctx->sample_rate <= 0) {
2352  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2354  }
2355 
2356  if (ret != AVERROR_EOF)
2358 
2359  if (!*got_output || ret < 0)
2360  return ret;
2361 
2363  ist->frames_decoded++;
2364 
2365  /* increment next_dts to use for the case where the input stream does not
2366  have timestamps or there are multiple frames in the packet */
2368  avctx->sample_rate;
2370  avctx->sample_rate;
2371 
2372  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2373  decoded_frame_tb = ist->st->time_base;
2374  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2375  decoded_frame->pts = pkt->pts;
2376  decoded_frame_tb = ist->st->time_base;
2377  }else {
2378  decoded_frame->pts = ist->dts;
2379  decoded_frame_tb = AV_TIME_BASE_Q;
2380  }
2382  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2383  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2384  (AVRational){1, avctx->sample_rate});
2387 
2390  return err < 0 ? err : ret;
2391 }
2392 
2393 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2394  int *decode_failed)
2395 {
2397  int i, ret = 0, err = 0;
2398  int64_t best_effort_timestamp;
2400 
2401  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2402  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2403  // skip the packet.
2404  if (!eof && pkt && pkt->size == 0)
2405  return 0;
2406 
2407  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2408  return AVERROR(ENOMEM);
2409  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2410  return AVERROR(ENOMEM);
2412  if (ist->dts != AV_NOPTS_VALUE)
2413  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2414  if (pkt) {
2415  pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2416  }
2417 
2418  // The old code used to set dts on the drain packet, which does not work
2419  // with the new API anymore.
2420  if (eof) {
2421  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2422  if (!new)
2423  return AVERROR(ENOMEM);
2424  ist->dts_buffer = new;
2425  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2426  }
2427 
2430  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2431  if (ret < 0)
2432  *decode_failed = 1;
2433 
2434  // The following line may be required in some cases where there is no parser
2435  // or the parser does not has_b_frames correctly
2436  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2437  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2438  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2439  } else
2441  "video_delay is larger in decoder than demuxer %d > %d.\n"
2442  "If you want to help, upload a sample "
2443  "of this file to https://streams.videolan.org/upload/ "
2444  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2445  ist->dec_ctx->has_b_frames,
2446  ist->st->codecpar->video_delay);
2447  }
2448 
2449  if (ret != AVERROR_EOF)
2451 
2452  if (*got_output && ret >= 0) {
2453  if (ist->dec_ctx->width != decoded_frame->width ||
2454  ist->dec_ctx->height != decoded_frame->height ||
2455  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2456  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2460  ist->dec_ctx->width,
2461  ist->dec_ctx->height,
2462  ist->dec_ctx->pix_fmt);
2463  }
2464  }
2465 
2466  if (!*got_output || ret < 0)
2467  return ret;
2468 
2469  if(ist->top_field_first>=0)
2471 
2472  ist->frames_decoded++;
2473 
2475  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2476  if (err < 0)
2477  goto fail;
2478  }
2480 
2481  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2482  *duration_pts = decoded_frame->pkt_duration;
2483 
2484  if (ist->framerate.num)
2485  best_effort_timestamp = ist->cfr_next_pts++;
2486 
2487  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2488  best_effort_timestamp = ist->dts_buffer[0];
2489 
2490  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2491  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2492  ist->nb_dts_buffer--;
2493  }
2494 
2495  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2496  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2497 
2498  if (ts != AV_NOPTS_VALUE)
2499  ist->next_pts = ist->pts = ts;
2500  }
2501 
2502  if (debug_ts) {
2503  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2504  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2505  ist->st->index, av_ts2str(decoded_frame->pts),
2507  best_effort_timestamp,
2508  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2510  ist->st->time_base.num, ist->st->time_base.den);
2511  }
2512 
2513  if (ist->st->sample_aspect_ratio.num)
2515 
2517 
2518 fail:
2521  return err < 0 ? err : ret;
2522 }
2523 
2525  int *decode_failed)
2526 {
2528  int free_sub = 1;
2529  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2530  &subtitle, got_output, pkt);
2531 
2533 
2534  if (ret < 0 || !*got_output) {
2535  *decode_failed = 1;
2536  if (!pkt->size)
2537  sub2video_flush(ist);
2538  return ret;
2539  }
2540 
2541  if (ist->fix_sub_duration) {
2542  int end = 1;
2543  if (ist->prev_sub.got_output) {
2544  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2545  1000, AV_TIME_BASE);
2546  if (end < ist->prev_sub.subtitle.end_display_time) {
2547  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2548  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2550  end <= 0 ? ", dropping it" : "");
2551  ist->prev_sub.subtitle.end_display_time = end;
2552  }
2553  }
2554  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2555  FFSWAP(int, ret, ist->prev_sub.ret);
2557  if (end <= 0)
2558  goto out;
2559  }
2560 
2561  if (!*got_output)
2562  return ret;
2563 
2564  if (ist->sub2video.frame) {
2565  sub2video_update(ist, INT64_MIN, &subtitle);
2566  } else if (ist->nb_filters) {
2567  if (!ist->sub2video.sub_queue)
2568  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2569  if (!ist->sub2video.sub_queue)
2570  exit_program(1);
2571  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2573  if (ret < 0)
2574  exit_program(1);
2575  }
2577  free_sub = 0;
2578  }
2579 
2580  if (!subtitle.num_rects)
2581  goto out;
2582 
2583  ist->frames_decoded++;
2584 
2585  for (i = 0; i < nb_output_streams; i++) {
2587 
2588  if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2589  exit_program(1);
2590  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2591  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2592  continue;
2593 
2594  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2595  }
2596 
2597 out:
2598  if (free_sub)
2600  return ret;
2601 }
2602 
2604 {
2605  int i, ret;
2606  /* TODO keep pts also in stream time base to avoid converting back */
2609 
2610  for (i = 0; i < ist->nb_filters; i++) {
2611  ret = ifilter_send_eof(ist->filters[i], pts);
2612  if (ret < 0)
2613  return ret;
2614  }
2615  return 0;
2616 }
2617 
2618 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2619 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2620 {
2621  int ret = 0, i;
2622  int repeating = 0;
2623  int eof_reached = 0;
2624 
2625  AVPacket *avpkt;
2626 
2627  if (!ist->pkt && !(ist->pkt = av_packet_alloc()))
2628  return AVERROR(ENOMEM);
2629  avpkt = ist->pkt;
2630 
2631  if (!ist->saw_first_ts) {
2632  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2633  ist->pts = 0;
2634  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2635  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2636  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2637  }
2638  ist->saw_first_ts = 1;
2639  }
2640 
2641  if (ist->next_dts == AV_NOPTS_VALUE)
2642  ist->next_dts = ist->dts;
2643  if (ist->next_pts == AV_NOPTS_VALUE)
2644  ist->next_pts = ist->pts;
2645 
2646  if (pkt) {
2647  av_packet_unref(avpkt);
2648  ret = av_packet_ref(avpkt, pkt);
2649  if (ret < 0)
2650  return ret;
2651  }
2652 
2653  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2654  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2655  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2656  ist->next_pts = ist->pts = ist->dts;
2657  }
2658 
2659  // while we have more to decode or while the decoder did output something on EOF
2660  while (ist->decoding_needed) {
2661  int64_t duration_dts = 0;
2662  int64_t duration_pts = 0;
2663  int got_output = 0;
2664  int decode_failed = 0;
2665 
2666  ist->pts = ist->next_pts;
2667  ist->dts = ist->next_dts;
2668 
2669  switch (ist->dec_ctx->codec_type) {
2670  case AVMEDIA_TYPE_AUDIO:
2671  ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2672  &decode_failed);
2673  av_packet_unref(avpkt);
2674  break;
2675  case AVMEDIA_TYPE_VIDEO:
2676  ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2677  &decode_failed);
2678  if (!repeating || !pkt || got_output) {
2679  if (pkt && pkt->duration) {
2680  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2681  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2683  duration_dts = ((int64_t)AV_TIME_BASE *
2684  ist->dec_ctx->framerate.den * ticks) /
2686  }
2687 
2688  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2689  ist->next_dts += duration_dts;
2690  }else
2691  ist->next_dts = AV_NOPTS_VALUE;
2692  }
2693 
2694  if (got_output) {
2695  if (duration_pts > 0) {
2696  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2697  } else {
2698  ist->next_pts += duration_dts;
2699  }
2700  }
2701  av_packet_unref(avpkt);
2702  break;
2703  case AVMEDIA_TYPE_SUBTITLE:
2704  if (repeating)
2705  break;
2706  ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2707  if (!pkt && ret >= 0)
2708  ret = AVERROR_EOF;
2709  av_packet_unref(avpkt);
2710  break;
2711  default:
2712  return -1;
2713  }
2714 
2715  if (ret == AVERROR_EOF) {
2716  eof_reached = 1;
2717  break;
2718  }
2719 
2720  if (ret < 0) {
2721  if (decode_failed) {
2722  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2723  ist->file_index, ist->st->index, av_err2str(ret));
2724  } else {
2725  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2726  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2727  }
2728  if (!decode_failed || exit_on_error)
2729  exit_program(1);
2730  break;
2731  }
2732 
2733  if (got_output)
2734  ist->got_output = 1;
2735 
2736  if (!got_output)
2737  break;
2738 
2739  // During draining, we might get multiple output frames in this loop.
2740  // ffmpeg.c does not drain the filter chain on configuration changes,
2741  // which means if we send multiple frames at once to the filters, and
2742  // one of those frames changes configuration, the buffered frames will
2743  // be lost. This can upset certain FATE tests.
2744  // Decode only 1 frame per call on EOF to appease these FATE tests.
2745  // The ideal solution would be to rewrite decoding to use the new
2746  // decoding API in a better way.
2747  if (!pkt)
2748  break;
2749 
2750  repeating = 1;
2751  }
2752 
2753  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2754  /* except when looping we need to flush but not to send an EOF */
2755  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2756  int ret = send_filter_eof(ist);
2757  if (ret < 0) {
2758  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2759  exit_program(1);
2760  }
2761  }
2762 
2763  /* handle stream copy */
2764  if (!ist->decoding_needed && pkt) {
2765  ist->dts = ist->next_dts;
2766  switch (ist->dec_ctx->codec_type) {
2767  case AVMEDIA_TYPE_AUDIO:
2768  av_assert1(pkt->duration >= 0);
2769  if (ist->dec_ctx->sample_rate) {
2770  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2771  ist->dec_ctx->sample_rate;
2772  } else {
2774  }
2775  break;
2776  case AVMEDIA_TYPE_VIDEO:
2777  if (ist->framerate.num) {
2778  // TODO: Remove work-around for c99-to-c89 issue 7
2779  AVRational time_base_q = AV_TIME_BASE_Q;
2780  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2781  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2782  } else if (pkt->duration) {
2784  } else if(ist->dec_ctx->framerate.num != 0) {
2785  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2786  ist->next_dts += ((int64_t)AV_TIME_BASE *
2787  ist->dec_ctx->framerate.den * ticks) /
2789  }
2790  break;
2791  }
2792  ist->pts = ist->dts;
2793  ist->next_pts = ist->next_dts;
2794  }
2795  for (i = 0; i < nb_output_streams; i++) {
2797 
2798  if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2799  exit_program(1);
2800  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2801  continue;
2802 
2803  do_streamcopy(ist, ost, pkt);
2804  }
2805 
2806  return !eof_reached;
2807 }
2808 
2809 static void print_sdp(void)
2810 {
2811  char sdp[16384];
2812  int i;
2813  int j;
2814  AVIOContext *sdp_pb;
2815  AVFormatContext **avc;
2816 
2817  for (i = 0; i < nb_output_files; i++) {
2818  if (!output_files[i]->header_written)
2819  return;
2820  }
2821 
2822  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2823  if (!avc)
2824  exit_program(1);
2825  for (i = 0, j = 0; i < nb_output_files; i++) {
2826  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2827  avc[j] = output_files[i]->ctx;
2828  j++;
2829  }
2830  }
2831 
2832  if (!j)
2833  goto fail;
2834 
2835  av_sdp_create(avc, j, sdp, sizeof(sdp));
2836 
2837  if (!sdp_filename) {
2838  printf("SDP:\n%s\n", sdp);
2839  fflush(stdout);
2840  } else {
2841  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2842  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2843  } else {
2844  avio_print(sdp_pb, sdp);
2845  avio_closep(&sdp_pb);
2847  }
2848  }
2849 
2850 fail:
2851  av_freep(&avc);
2852 }
2853 
2855 {
2856  InputStream *ist = s->opaque;
2857  const enum AVPixelFormat *p;
2858  int ret;
2859 
2860  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2862  const AVCodecHWConfig *config = NULL;
2863  int i;
2864 
2865  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2866  break;
2867 
2868  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2869  ist->hwaccel_id == HWACCEL_AUTO) {
2870  for (i = 0;; i++) {
2871  config = avcodec_get_hw_config(s->codec, i);
2872  if (!config)
2873  break;
2874  if (!(config->methods &
2876  continue;
2877  if (config->pix_fmt == *p)
2878  break;
2879  }
2880  }
2881  if (config) {
2882  if (config->device_type != ist->hwaccel_device_type) {
2883  // Different hwaccel offered, ignore.
2884  continue;
2885  }
2886 
2888  if (ret < 0) {
2889  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2891  "%s hwaccel requested for input stream #%d:%d, "
2892  "but cannot be initialized.\n",
2894  ist->file_index, ist->st->index);
2895  return AV_PIX_FMT_NONE;
2896  }
2897  continue;
2898  }
2899  } else {
2900  const HWAccel *hwaccel = NULL;
2901  int i;
2902  for (i = 0; hwaccels[i].name; i++) {
2903  if (hwaccels[i].pix_fmt == *p) {
2904  hwaccel = &hwaccels[i];
2905  break;
2906  }
2907  }
2908  if (!hwaccel) {
2909  // No hwaccel supporting this pixfmt.
2910  continue;
2911  }
2912  if (hwaccel->id != ist->hwaccel_id) {
2913  // Does not match requested hwaccel.
2914  continue;
2915  }
2916 
2917  ret = hwaccel->init(s);
2918  if (ret < 0) {
2920  "%s hwaccel requested for input stream #%d:%d, "
2921  "but cannot be initialized.\n", hwaccel->name,
2922  ist->file_index, ist->st->index);
2923  return AV_PIX_FMT_NONE;
2924  }
2925  }
2926 
2927  if (ist->hw_frames_ctx) {
2928  s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2929  if (!s->hw_frames_ctx)
2930  return AV_PIX_FMT_NONE;
2931  }
2932 
2933  ist->hwaccel_pix_fmt = *p;
2934  break;
2935  }
2936 
2937  return *p;
2938 }
2939 
2941 {
2942  InputStream *ist = s->opaque;
2943 
2944  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2945  return ist->hwaccel_get_buffer(s, frame, flags);
2946 
2948 }
2949 
2950 static int init_input_stream(int ist_index, char *error, int error_len)
2951 {
2952  int ret;
2953  InputStream *ist = input_streams[ist_index];
2954 
2955  if (ist->decoding_needed) {
2956  const AVCodec *codec = ist->dec;
2957  if (!codec) {
2958  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2959  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2960  return AVERROR(EINVAL);
2961  }
2962 
2963  ist->dec_ctx->opaque = ist;
2964  ist->dec_ctx->get_format = get_format;
2965  ist->dec_ctx->get_buffer2 = get_buffer;
2966 #if LIBAVCODEC_VERSION_MAJOR < 60
2967  ist->dec_ctx->thread_safe_callbacks = 1;
2968 #endif
2969 
2970  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2971  (ist->decoding_needed & DECODING_FOR_OST)) {
2972  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2974  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2975  }
2976 
2977  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2978 
2979  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2980  * audio, and video decoders such as cuvid or mediacodec */
2981  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2982 
2983  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2984  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2985  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2987  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2988 
2990  if (ret < 0) {
2991  snprintf(error, error_len, "Device setup failed for "
2992  "decoder on input stream #%d:%d : %s",
2993  ist->file_index, ist->st->index, av_err2str(ret));
2994  return ret;
2995  }
2996 
2997  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2998  if (ret == AVERROR_EXPERIMENTAL)
2999  abort_codec_experimental(codec, 0);
3000 
3001  snprintf(error, error_len,
3002  "Error while opening decoder for input stream "
3003  "#%d:%d : %s",
3004  ist->file_index, ist->st->index, av_err2str(ret));
3005  return ret;
3006  }
3008  }
3009 
3010  ist->next_pts = AV_NOPTS_VALUE;
3011  ist->next_dts = AV_NOPTS_VALUE;
3012 
3013  return 0;
3014 }
3015 
3017 {
3018  if (ost->source_index >= 0)
3019  return input_streams[ost->source_index];
3020  return NULL;
3021 }
3022 
3023 static int compare_int64(const void *a, const void *b)
3024 {
3025  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3026 }
3027 
3028 /* open the muxer when all the streams are initialized */
3030 {
3031  int ret, i;
3032 
3033  for (i = 0; i < of->ctx->nb_streams; i++) {
3035  if (!ost->initialized)
3036  return 0;
3037  }
3038 
3039  of->ctx->interrupt_callback = int_cb;
3040 
3041  ret = avformat_write_header(of->ctx, &of->opts);
3042  if (ret < 0) {
3044  "Could not write header for output file #%d "
3045  "(incorrect codec parameters ?): %s\n",
3047  return ret;
3048  }
3049  //assert_avoptions(of->opts);
3050  of->header_written = 1;
3051 
3052  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3053  nb_output_dumped++;
3054 
3055  if (sdp_filename || want_sdp)
3056  print_sdp();
3057 
3058  /* flush the muxing queues */
3059  for (i = 0; i < of->ctx->nb_streams; i++) {
3061 
3062  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3063  if (!av_fifo_size(ost->muxing_queue))
3064  ost->mux_timebase = ost->st->time_base;
3065 
3066  while (av_fifo_size(ost->muxing_queue)) {
3067  AVPacket *pkt;
3068  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3069  ost->muxing_queue_data_size -= pkt->size;
3070  write_packet(of, pkt, ost, 1);
3071  av_packet_free(&pkt);
3072  }
3073  }
3074 
3075  return 0;
3076 }
3077 
3079 {
3080  AVBSFContext *ctx = ost->bsf_ctx;
3081  int ret;
3082 
3083  if (!ctx)
3084  return 0;
3085 
3086  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3087  if (ret < 0)
3088  return ret;
3089 
3090  ctx->time_base_in = ost->st->time_base;
3091 
3092  ret = av_bsf_init(ctx);
3093  if (ret < 0) {
3094  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3095  ctx->filter->name);
3096  return ret;
3097  }
3098 
3099  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3100  if (ret < 0)
3101  return ret;
3102  ost->st->time_base = ctx->time_base_out;
3103 
3104  return 0;
3105 }
3106 
3108 {
3109  OutputFile *of = output_files[ost->file_index];
3111  AVCodecParameters *par_dst = ost->st->codecpar;
3112  AVCodecParameters *par_src = ost->ref_par;
3113  AVRational sar;
3114  int i, ret;
3115  uint32_t codec_tag = par_dst->codec_tag;
3116 
3117  av_assert0(ist && !ost->filter);
3118 
3119  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3120  if (ret >= 0)
3121  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3122  if (ret < 0) {
3124  "Error setting up codec context options.\n");
3125  return ret;
3126  }
3127 
3128  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3129  if (ret < 0) {
3131  "Error getting reference codec parameters.\n");
3132  return ret;
3133  }
3134 
3135  if (!codec_tag) {
3136  unsigned int codec_tag_tmp;
3137  if (!of->ctx->oformat->codec_tag ||
3138  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3139  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3140  codec_tag = par_src->codec_tag;
3141  }
3142 
3143  ret = avcodec_parameters_copy(par_dst, par_src);
3144  if (ret < 0)
3145  return ret;
3146 
3147  par_dst->codec_tag = codec_tag;
3148 
3149  if (!ost->frame_rate.num)
3150  ost->frame_rate = ist->framerate;
3151  ost->st->avg_frame_rate = ost->frame_rate;
3152 
3154  if (ret < 0)
3155  return ret;
3156 
3157  // copy timebase while removing common factors
3158  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3160 
3161  // copy estimated duration as a hint to the muxer
3162  if (ost->st->duration <= 0 && ist->st->duration > 0)
3163  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3164 
3165  // copy disposition
3166  ost->st->disposition = ist->st->disposition;
3167 
3168  if (ist->st->nb_side_data) {
3169  for (i = 0; i < ist->st->nb_side_data; i++) {
3170  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3171  uint8_t *dst_data;
3172 
3173  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3174  if (!dst_data)
3175  return AVERROR(ENOMEM);
3176  memcpy(dst_data, sd_src->data, sd_src->size);
3177  }
3178  }
3179 
3180  if (ost->rotate_overridden) {
3182  sizeof(int32_t) * 9);
3183  if (sd)
3184  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3185  }
3186 
3187  switch (par_dst->codec_type) {
3188  case AVMEDIA_TYPE_AUDIO:
3189  if (audio_volume != 256) {
3190  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3191  exit_program(1);
3192  }
3193  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3194  par_dst->block_align= 0;
3195  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3196  par_dst->block_align= 0;
3197  break;
3198  case AVMEDIA_TYPE_VIDEO:
3199  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3200  sar =
3201  av_mul_q(ost->frame_aspect_ratio,
3202  (AVRational){ par_dst->height, par_dst->width });
3203  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3204  "with stream copy may produce invalid files\n");
3205  }
3206  else if (ist->st->sample_aspect_ratio.num)
3207  sar = ist->st->sample_aspect_ratio;
3208  else
3209  sar = par_src->sample_aspect_ratio;
3210  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3211  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3212  ost->st->r_frame_rate = ist->st->r_frame_rate;
3213  break;
3214  }
3215 
3216  ost->mux_timebase = ist->st->time_base;
3217 
3218  return 0;
3219 }
3220 
3222 {
3223  AVDictionaryEntry *e;
3224 
3225  uint8_t *encoder_string;
3226  int encoder_string_len;
3227  int format_flags = 0;
3228  int codec_flags = ost->enc_ctx->flags;
3229 
3230  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3231  return;
3232 
3233  e = av_dict_get(of->opts, "fflags", NULL, 0);
3234  if (e) {
3235  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3236  if (!o)
3237  return;
3238  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3239  }
3240  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3241  if (e) {
3242  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3243  if (!o)
3244  return;
3245  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3246  }
3247 
3248  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3249  encoder_string = av_mallocz(encoder_string_len);
3250  if (!encoder_string)
3251  exit_program(1);
3252 
3253  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3254  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3255  else
3256  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3257  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3258  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3260 }
3261 
3263  AVCodecContext *avctx)
3264 {
3265  char *p;
3266  int n = 1, i, size, index = 0;
3267  int64_t t, *pts;
3268 
3269  for (p = kf; *p; p++)
3270  if (*p == ',')
3271  n++;
3272  size = n;
3273  pts = av_malloc_array(size, sizeof(*pts));
3274  if (!pts) {
3275  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3276  exit_program(1);
3277  }
3278 
3279  p = kf;
3280  for (i = 0; i < n; i++) {
3281  char *next = strchr(p, ',');
3282 
3283  if (next)
3284  *next++ = 0;
3285 
3286  if (!memcmp(p, "chapters", 8)) {
3287 
3288  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3289  int j;
3290 
3291  if (avf->nb_chapters > INT_MAX - size ||
3292  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3293  sizeof(*pts)))) {
3295  "Could not allocate forced key frames array.\n");
3296  exit_program(1);
3297  }
3298  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3299  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3300 
3301  for (j = 0; j < avf->nb_chapters; j++) {
3302  AVChapter *c = avf->chapters[j];
3303  av_assert1(index < size);
3304  pts[index++] = av_rescale_q(c->start, c->time_base,
3305  avctx->time_base) + t;
3306  }
3307 
3308  } else {
3309 
3310  t = parse_time_or_die("force_key_frames", p, 1);
3311  av_assert1(index < size);
3312  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3313 
3314  }
3315 
3316  p = next;
3317  }
3318 
3319  av_assert0(index == size);
3320  qsort(pts, size, sizeof(*pts), compare_int64);
3321  ost->forced_kf_count = size;
3322  ost->forced_kf_pts = pts;
3323 }
3324 
3325 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3326 {
3328  AVCodecContext *enc_ctx = ost->enc_ctx;
3329  AVFormatContext *oc;
3330 
3331  if (ost->enc_timebase.num > 0) {
3332  enc_ctx->time_base = ost->enc_timebase;
3333  return;
3334  }
3335 
3336  if (ost->enc_timebase.num < 0) {
3337  if (ist) {
3338  enc_ctx->time_base = ist->st->time_base;
3339  return;
3340  }
3341 
3342  oc = output_files[ost->file_index]->ctx;
3343  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3344  }
3345 
3346  enc_ctx->time_base = default_time_base;
3347 }
3348 
3350 {
3352  AVCodecContext *enc_ctx = ost->enc_ctx;
3354  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3355  int j, ret;
3356 
3357  set_encoder_id(output_files[ost->file_index], ost);
3358 
3359  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3360  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3361  // which have to be filtered out to prevent leaking them to output files.
3362  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3363 
3364  if (ist) {
3365  ost->st->disposition = ist->st->disposition;
3366 
3367  dec_ctx = ist->dec_ctx;
3368 
3370  } else {
3371  for (j = 0; j < oc->nb_streams; j++) {
3372  AVStream *st = oc->streams[j];
3373  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3374  break;
3375  }
3376  if (j == oc->nb_streams)
3377  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3380  }
3381 
3382  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3383  if (!ost->frame_rate.num)
3384  ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3385  if (ist && !ost->frame_rate.num)
3386  ost->frame_rate = ist->framerate;
3387  if (ist && !ost->frame_rate.num)
3388  ost->frame_rate = ist->st->r_frame_rate;
3389  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3390  ost->frame_rate = (AVRational){25, 1};
3392  "No information "
3393  "about the input framerate is available. Falling "
3394  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3395  "if you want a different framerate.\n",
3396  ost->file_index, ost->index);
3397  }
3398 
3399  if (ost->max_frame_rate.num &&
3400  (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3401  !ost->frame_rate.den))
3402  ost->frame_rate = ost->max_frame_rate;
3403 
3404  if (ost->enc->supported_framerates && !ost->force_fps) {
3405  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3406  ost->frame_rate = ost->enc->supported_framerates[idx];
3407  }
3408  // reduce frame rate for mpeg4 to be within the spec limits
3409  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3410  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3411  ost->frame_rate.num, ost->frame_rate.den, 65535);
3412  }
3413  }
3414 
3415  switch (enc_ctx->codec_type) {
3416  case AVMEDIA_TYPE_AUDIO:
3417  enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3418  if (dec_ctx)
3420  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3421  enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3422  enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3423  enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3424 
3426  break;
3427 
3428  case AVMEDIA_TYPE_VIDEO:
3429  init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3430 
3431  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3432  enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3433  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3435  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3436  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3437  }
3438 
3439  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3440  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3441  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3442  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3443  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3444  av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3445 
3446  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3447  if (dec_ctx)
3449  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3450 
3451  if (frame) {
3452  enc_ctx->color_range = frame->color_range;
3454  enc_ctx->color_trc = frame->color_trc;
3455  enc_ctx->colorspace = frame->colorspace;
3457  }
3458 
3459  enc_ctx->framerate = ost->frame_rate;
3460 
3461  ost->st->avg_frame_rate = ost->frame_rate;
3462 
3463  if (!dec_ctx ||
3464  enc_ctx->width != dec_ctx->width ||
3465  enc_ctx->height != dec_ctx->height ||
3466  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3468  }
3469 
3470  // Field order: autodetection
3471  if (frame) {
3473  ost->top_field_first >= 0)
3474  frame->top_field_first = !!ost->top_field_first;
3475 
3476  if (frame->interlaced_frame) {
3477  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3479  else
3481  } else
3482  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3483  }
3484 
3485  // Field order: override
3486  if (ost->top_field_first == 0) {
3487  enc_ctx->field_order = AV_FIELD_BB;
3488  } else if (ost->top_field_first == 1) {
3489  enc_ctx->field_order = AV_FIELD_TT;
3490  }
3491 
3492  if (ost->forced_keyframes) {
3493  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3494  ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3496  if (ret < 0) {
3498  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3499  return ret;
3500  }
3501  ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3502  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3503  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3504  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3505 
3506  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3507  // parse it only for static kf timings
3508  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3509  parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3510  }
3511  }
3512  break;
3513  case AVMEDIA_TYPE_SUBTITLE:
3514  enc_ctx->time_base = AV_TIME_BASE_Q;
3515  if (!enc_ctx->width) {
3516  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3517  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3518  }
3519  break;
3520  case AVMEDIA_TYPE_DATA:
3521  break;
3522  default:
3523  abort();
3524  break;
3525  }
3526 
3527  ost->mux_timebase = enc_ctx->time_base;
3528 
3529  return 0;
3530 }
3531 
3533  char *error, int error_len)
3534 {
3535  int ret = 0;
3536 
3537  if (ost->encoding_needed) {
3538  const AVCodec *codec = ost->enc;
3539  AVCodecContext *dec = NULL;
3540  InputStream *ist;
3541 
3543  if (ret < 0)
3544  return ret;
3545 
3546  if ((ist = get_input_stream(ost)))
3547  dec = ist->dec_ctx;
3548  if (dec && dec->subtitle_header) {
3549  /* ASS code assumes this buffer is null terminated so add extra byte. */
3550  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3551  if (!ost->enc_ctx->subtitle_header)
3552  return AVERROR(ENOMEM);
3553  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3554  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3555  }
3556  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3557  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3558  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3559  !codec->defaults &&
3560  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3561  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3562  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3563 
3565  if (ret < 0) {
3566  snprintf(error, error_len, "Device setup failed for "
3567  "encoder on output stream #%d:%d : %s",
3568  ost->file_index, ost->index, av_err2str(ret));
3569  return ret;
3570  }
3571 
3572  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3573  int input_props = 0, output_props = 0;
3574  AVCodecDescriptor const *input_descriptor =
3575  avcodec_descriptor_get(dec->codec_id);
3576  AVCodecDescriptor const *output_descriptor =
3577  avcodec_descriptor_get(ost->enc_ctx->codec_id);
3578  if (input_descriptor)
3579  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3580  if (output_descriptor)
3581  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3582  if (input_props && output_props && input_props != output_props) {
3583  snprintf(error, error_len,
3584  "Subtitle encoding currently only possible from text to text "
3585  "or bitmap to bitmap");
3586  return AVERROR_INVALIDDATA;
3587  }
3588  }
3589 
3590  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3591  if (ret == AVERROR_EXPERIMENTAL)
3592  abort_codec_experimental(codec, 1);
3593  snprintf(error, error_len,
3594  "Error while opening encoder for output stream #%d:%d - "
3595  "maybe incorrect parameters such as bit_rate, rate, width or height",
3596  ost->file_index, ost->index);
3597  return ret;
3598  }
3599  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3600  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3601  av_buffersink_set_frame_size(ost->filter->filter,
3602  ost->enc_ctx->frame_size);
3603  assert_avoptions(ost->encoder_opts);
3604  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3605  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3606  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3607  " It takes bits/s as argument, not kbits/s\n");
3608 
3610  if (ret < 0) {
3612  "Error initializing the output stream codec context.\n");
3613  exit_program(1);
3614  }
3615 
3616  if (ost->enc_ctx->nb_coded_side_data) {
3617  int i;
3618 
3619  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3620  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3621  uint8_t *dst_data;
3622 
3623  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3624  if (!dst_data)
3625  return AVERROR(ENOMEM);
3626  memcpy(dst_data, sd_src->data, sd_src->size);
3627  }
3628  }
3629 
3630  /*
3631  * Add global input side data. For now this is naive, and copies it
3632  * from the input stream's global side data. All side data should
3633  * really be funneled over AVFrame and libavfilter, then added back to
3634  * packet side data, and then potentially using the first packet for
3635  * global side data.
3636  */
3637  if (ist) {
3638  int i;
3639  for (i = 0; i < ist->st->nb_side_data; i++) {
3640  AVPacketSideData *sd = &ist->st->side_data[i];
3641  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3642  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3643  if (!dst)
3644  return AVERROR(ENOMEM);
3645  memcpy(dst, sd->data, sd->size);
3646  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3647  av_display_rotation_set((uint32_t *)dst, 0);
3648  }
3649  }
3650  }
3651 
3652  // copy timebase while removing common factors
3653  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3654  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3655 
3656  // copy estimated duration as a hint to the muxer
3657  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3658  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3659  } else if (ost->stream_copy) {
3661  if (ret < 0)
3662  return ret;
3663  }
3664 
3665  // parse user provided disposition, and update stream values
3666  if (ost->disposition) {
3667  static const AVOption opts[] = {
3668  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3669  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3670  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3671  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3672  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3673  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3674  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3675  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3676  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3677  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3678  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3679  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3680  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3681  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3682  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3683  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3684  { NULL },
3685  };
3686  static const AVClass class = {
3687  .class_name = "",
3688  .item_name = av_default_item_name,
3689  .option = opts,
3690  .version = LIBAVUTIL_VERSION_INT,
3691  };
3692  const AVClass *pclass = &class;
3693 
3694  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3695  if (ret < 0)
3696  return ret;
3697  }
3698 
3699  /* initialize bitstream filters for the output stream
3700  * needs to be done here, because the codec id for streamcopy is not
3701  * known until now */
3702  ret = init_output_bsfs(ost);
3703  if (ret < 0)
3704  return ret;
3705 
3706  ost->initialized = 1;
3707 
3708  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3709  if (ret < 0)
3710  return ret;
3711 
3712  return ret;
3713 }
3714 
3715 static void report_new_stream(int input_index, AVPacket *pkt)
3716 {
3717  InputFile *file = input_files[input_index];
3718  AVStream *st = file->ctx->streams[pkt->stream_index];
3719 
3720  if (pkt->stream_index < file->nb_streams_warn)
3721  return;
3722  av_log(file->ctx, AV_LOG_WARNING,
3723  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3725  input_index, pkt->stream_index,
3726  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3727  file->nb_streams_warn = pkt->stream_index + 1;
3728 }
3729 
3730 static int transcode_init(void)
3731 {
3732  int ret = 0, i, j, k;
3733  AVFormatContext *oc;
3734  OutputStream *ost;
3735  InputStream *ist;
3736  char error[1024] = {0};
3737 
3738  for (i = 0; i < nb_filtergraphs; i++) {
3739  FilterGraph *fg = filtergraphs[i];
3740  for (j = 0; j < fg->nb_outputs; j++) {
3741  OutputFilter *ofilter = fg->outputs[j];
3742  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3743  continue;
3744  if (fg->nb_inputs != 1)
3745  continue;
3746  for (k = nb_input_streams-1; k >= 0 ; k--)
3747  if (fg->inputs[0]->ist == input_streams[k])
3748  break;
3749  ofilter->ost->source_index = k;
3750  }
3751  }
3752 
3753  /* init framerate emulation */
3754  for (i = 0; i < nb_input_files; i++) {
3756  if (ifile->rate_emu)
3757  for (j = 0; j < ifile->nb_streams; j++)
3758  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3759  }
3760 
3761  /* init input streams */
3762  for (i = 0; i < nb_input_streams; i++)
3763  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3764  for (i = 0; i < nb_output_streams; i++) {
3765  ost = output_streams[i];
3766  avcodec_close(ost->enc_ctx);
3767  }
3768  goto dump_format;
3769  }
3770 
3771  /*
3772  * initialize stream copy and subtitle/data streams.
3773  * Encoded AVFrame based streams will get initialized as follows:
3774  * - when the first AVFrame is received in do_video_out
3775  * - just before the first AVFrame is received in either transcode_step
3776  * or reap_filters due to us requiring the filter chain buffer sink
3777  * to be configured with the correct audio frame size, which is only
3778  * known after the encoder is initialized.
3779  */
3780  for (i = 0; i < nb_output_streams; i++) {
3781  if (!output_streams[i]->stream_copy &&
3782  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3784  continue;
3785 
3787  if (ret < 0)
3788  goto dump_format;
3789  }
3790 
3791  /* discard unused programs */
3792  for (i = 0; i < nb_input_files; i++) {
3794  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3795  AVProgram *p = ifile->ctx->programs[j];
3796  int discard = AVDISCARD_ALL;
3797 
3798  for (k = 0; k < p->nb_stream_indexes; k++)
3799  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3800  discard = AVDISCARD_DEFAULT;
3801  break;
3802  }
3803  p->discard = discard;
3804  }
3805  }
3806 
3807  /* write headers for files with no streams */
3808  for (i = 0; i < nb_output_files; i++) {
3809  oc = output_files[i]->ctx;
3810  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3812  if (ret < 0)
3813  goto dump_format;
3814  }
3815  }
3816 
3817  dump_format:
3818  /* dump the stream mapping */
3819  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3820  for (i = 0; i < nb_input_streams; i++) {
3821  ist = input_streams[i];
3822 
3823  for (j = 0; j < ist->nb_filters; j++) {
3824  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3825  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3826  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3827  ist->filters[j]->name);
3828  if (nb_filtergraphs > 1)
3829  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3830  av_log(NULL, AV_LOG_INFO, "\n");
3831  }
3832  }
3833  }
3834 
3835  for (i = 0; i < nb_output_streams; i++) {
3836  ost = output_streams[i];
3837 
3838  if (ost->attachment_filename) {
3839  /* an attached file */
3840  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3841  ost->attachment_filename, ost->file_index, ost->index);
3842  continue;
3843  }
3844 
3845  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3846  /* output from a complex graph */
3847  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3848  if (nb_filtergraphs > 1)
3849  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3850 
3851  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3852  ost->index, ost->enc ? ost->enc->name : "?");
3853  continue;
3854  }
3855 
3856  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3857  input_streams[ost->source_index]->file_index,
3858  input_streams[ost->source_index]->st->index,
3859  ost->file_index,
3860  ost->index);
3861  if (ost->sync_ist != input_streams[ost->source_index])
3862  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3863  ost->sync_ist->file_index,
3864  ost->sync_ist->st->index);
3865  if (ost->stream_copy)
3866  av_log(NULL, AV_LOG_INFO, " (copy)");
3867  else {
3868  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3869  const AVCodec *out_codec = ost->enc;
3870  const char *decoder_name = "?";
3871  const char *in_codec_name = "?";
3872  const char *encoder_name = "?";
3873  const char *out_codec_name = "?";
3874  const AVCodecDescriptor *desc;
3875 
3876  if (in_codec) {
3877  decoder_name = in_codec->name;
3878  desc = avcodec_descriptor_get(in_codec->id);
3879  if (desc)
3880  in_codec_name = desc->name;
3881  if (!strcmp(decoder_name, in_codec_name))
3882  decoder_name = "native";
3883  }
3884 
3885  if (out_codec) {
3886  encoder_name = out_codec->name;
3887  desc = avcodec_descriptor_get(out_codec->id);
3888  if (desc)
3889  out_codec_name = desc->name;
3890  if (!strcmp(encoder_name, out_codec_name))
3891  encoder_name = "native";
3892  }
3893 
3894  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3895  in_codec_name, decoder_name,
3896  out_codec_name, encoder_name);
3897  }
3898  av_log(NULL, AV_LOG_INFO, "\n");
3899  }
3900 
3901  if (ret) {
3902  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3903  return ret;
3904  }
3905 
3907 
3908  return 0;
3909 }
3910 
3911 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3912 static int need_output(void)
3913 {
3914  int i;
3915 
3916  for (i = 0; i < nb_output_streams; i++) {
3918  OutputFile *of = output_files[ost->file_index];
3919  AVFormatContext *os = output_files[ost->file_index]->ctx;
3920 
3921  if (ost->finished ||
3922  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3923  continue;
3924  if (ost->frame_number >= ost->max_frames) {
3925  int j;
3926  for (j = 0; j < of->ctx->nb_streams; j++)
3928  continue;
3929  }
3930 
3931  return 1;
3932  }
3933 
3934  return 0;
3935 }
3936 
3937 /**
3938  * Select the output stream to process.
3939  *
3940  * @return selected output stream, or NULL if none available
3941  */
3943 {
3944  int i;
3945  int64_t opts_min = INT64_MAX;
3946  OutputStream *ost_min = NULL;
3947 
3948  for (i = 0; i < nb_output_streams; i++) {
3950  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3951  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3952  AV_TIME_BASE_Q);
3953  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3955  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3956  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3957 
3958  if (!ost->initialized && !ost->inputs_done)
3959  return ost->unavailable ? NULL : ost;
3960 
3961  if (!ost->finished && opts < opts_min) {
3962  opts_min = opts;
3963  ost_min = ost->unavailable ? NULL : ost;
3964  }
3965  }
3966  return ost_min;
3967 }
3968 
3969 static void set_tty_echo(int on)
3970 {
3971 #if HAVE_TERMIOS_H
3972  struct termios tty;
3973  if (tcgetattr(0, &tty) == 0) {
3974  if (on) tty.c_lflag |= ECHO;
3975  else tty.c_lflag &= ~ECHO;
3976  tcsetattr(0, TCSANOW, &tty);
3977  }
3978 #endif
3979 }
3980 
3982 {
3983  int i, ret, key;
3984  static int64_t last_time;
3985  if (received_nb_signals)
3986  return AVERROR_EXIT;
3987  /* read_key() returns 0 on EOF */
3988  if(cur_time - last_time >= 100000 && !run_as_daemon){
3989  key = read_key();
3990  last_time = cur_time;
3991  }else
3992  key = -1;
3993  if (key == 'q')
3994  return AVERROR_EXIT;
3995  if (key == '+') av_log_set_level(av_log_get_level()+10);
3996  if (key == '-') av_log_set_level(av_log_get_level()-10);
3997  if (key == 's') qp_hist ^= 1;
3998  if (key == 'h'){
3999  if (do_hex_dump){
4000  do_hex_dump = do_pkt_dump = 0;
4001  } else if(do_pkt_dump){
4002  do_hex_dump = 1;
4003  } else
4004  do_pkt_dump = 1;
4006  }
4007  if (key == 'c' || key == 'C'){
4008  char buf[4096], target[64], command[256], arg[256] = {0};
4009  double time;
4010  int k, n = 0;
4011  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4012  i = 0;
4013  set_tty_echo(1);
4014  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4015  if (k > 0)
4016  buf[i++] = k;
4017  buf[i] = 0;
4018  set_tty_echo(0);
4019  fprintf(stderr, "\n");
4020  if (k > 0 &&
4021  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4022  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4023  target, time, command, arg);
4024  for (i = 0; i < nb_filtergraphs; i++) {
4025  FilterGraph *fg = filtergraphs[i];
4026  if (fg->graph) {
4027  if (time < 0) {
4028  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4029  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4030  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4031  } else if (key == 'c') {
4032  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4033  ret = AVERROR_PATCHWELCOME;
4034  } else {
4035  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4036  if (ret < 0)
4037  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4038  }
4039  }
4040  }
4041  } else {
4043  "Parse error, at least 3 arguments were expected, "
4044  "only %d given in string '%s'\n", n, buf);
4045  }
4046  }
4047  if (key == 'd' || key == 'D'){
4048  int debug=0;
4049  if(key == 'D') {
4050  debug = input_streams[0]->dec_ctx->debug << 1;
4051  if(!debug) debug = 1;
4052  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4053  debug += debug;
4054  }else{
4055  char buf[32];
4056  int k = 0;
4057  i = 0;
4058  set_tty_echo(1);
4059  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4060  if (k > 0)
4061  buf[i++] = k;
4062  buf[i] = 0;
4063  set_tty_echo(0);
4064  fprintf(stderr, "\n");
4065  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4066  fprintf(stderr,"error parsing debug value\n");
4067  }
4068  for(i=0;i<nb_input_streams;i++) {
4069  input_streams[i]->dec_ctx->debug = debug;
4070  }
4071  for(i=0;i<nb_output_streams;i++) {
4073  ost->enc_ctx->debug = debug;
4074  }
4075  if(debug) av_log_set_level(AV_LOG_DEBUG);
4076  fprintf(stderr,"debug=%d\n", debug);
4077  }
4078  if (key == '?'){
4079  fprintf(stderr, "key function\n"
4080  "? show this help\n"
4081  "+ increase verbosity\n"
4082  "- decrease verbosity\n"
4083  "c Send command to first matching filter supporting it\n"
4084  "C Send/Queue command to all matching filters\n"
4085  "D cycle through available debug modes\n"
4086  "h dump packets/hex press to cycle through the 3 states\n"
4087  "q quit\n"
4088  "s Show QP histogram\n"
4089  );
4090  }
4091  return 0;
4092 }
4093 
4094 #if HAVE_THREADS
4095 static void *input_thread(void *arg)
4096 {
4097  InputFile *f = arg;
4098  AVPacket *pkt = f->pkt, *queue_pkt;
4099  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4100  int ret = 0;
4101 
4102  while (1) {
4103  ret = av_read_frame(f->ctx, pkt);
4104 
4105  if (ret == AVERROR(EAGAIN)) {
4106  av_usleep(10000);
4107  continue;
4108  }
4109  if (ret < 0) {
4110  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4111  break;
4112  }
4113  queue_pkt = av_packet_alloc();
4114  if (!queue_pkt) {
4116  av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
4117  break;
4118  }
4119  av_packet_move_ref(queue_pkt, pkt);
4120  ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
4121  if (flags && ret == AVERROR(EAGAIN)) {
4122  flags = 0;
4123  ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
4124  av_log(f->ctx, AV_LOG_WARNING,
4125  "Thread message queue blocking; consider raising the "
4126  "thread_queue_size option (current value: %d)\n",
4127  f->thread_queue_size);
4128  }
4129  if (ret < 0) {
4130  if (ret != AVERROR_EOF)
4131  av_log(f->ctx, AV_LOG_ERROR,
4132  "Unable to send packet to main thread: %s\n",
4133  av_err2str(ret));
4134  av_packet_free(&queue_pkt);
4135  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4136  break;
4137  }
4138  }
4139 
4140  return NULL;
4141 }
4142 
4143 static void free_input_thread(int i)
4144 {
4145  InputFile *f = input_files[i];
4146  AVPacket *pkt;
4147 
4148  if (!f || !f->in_thread_queue)
4149  return;
4151  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4152  av_packet_free(&pkt);
4153 
4154  pthread_join(f->thread, NULL);
4155  f->joined = 1;
4156  av_thread_message_queue_free(&f->in_thread_queue);
4157 }
4158 
4159 static void free_input_threads(void)
4160 {
4161  int i;
4162 
4163  for (i = 0; i < nb_input_files; i++)
4164  free_input_thread(i);
4165 }
4166 
4167 static int init_input_thread(int i)
4168 {
4169  int ret;
4170  InputFile *f = input_files[i];
4171 
4172  if (f->thread_queue_size < 0)
4173  f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4174  if (!f->thread_queue_size)
4175  return 0;
4176 
4177  if (f->ctx->pb ? !f->ctx->pb->seekable :
4178  strcmp(f->ctx->iformat->name, "lavfi"))
4179  f->non_blocking = 1;
4180  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4181  f->thread_queue_size, sizeof(f->pkt));
4182  if (ret < 0)
4183  return ret;
4184 
4185  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4186  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4187  av_thread_message_queue_free(&f->in_thread_queue);
4188  return AVERROR(ret);
4189  }
4190 
4191  return 0;
4192 }
4193 
4194 static int init_input_threads(void)
4195 {
4196  int i, ret;
4197 
4198  for (i = 0; i < nb_input_files; i++) {
4199  ret = init_input_thread(i);
4200  if (ret < 0)
4201  return ret;
4202  }
4203  return 0;
4204 }
4205 
4206 static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
4207 {
4208  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4209  f->non_blocking ?
4211 }
4212 #endif
4213 
4215 {
4216  if (f->rate_emu) {
4217  int i;
4218  for (i = 0; i < f->nb_streams; i++) {
4219  InputStream *ist = input_streams[f->ist_index + i];
4220  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4221  int64_t now = av_gettime_relative() - ist->start;
4222  if (pts > now)
4223  return AVERROR(EAGAIN);
4224  }
4225  }
4226 
4227 #if HAVE_THREADS
4228  if (f->thread_queue_size)
4229  return get_input_packet_mt(f, pkt);
4230 #endif
4231  *pkt = f->pkt;
4232  return av_read_frame(f->ctx, *pkt);
4233 }
4234 
4235 static int got_eagain(void)
4236 {
4237  int i;
4238  for (i = 0; i < nb_output_streams; i++)
4239  if (output_streams[i]->unavailable)
4240  return 1;
4241  return 0;
4242 }
4243 
4244 static void reset_eagain(void)
4245 {
4246  int i;
4247  for (i = 0; i < nb_input_files; i++)
4248  input_files[i]->eagain = 0;
4249  for (i = 0; i < nb_output_streams; i++)
4250  output_streams[i]->unavailable = 0;
4251 }
4252 
4253 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4255  AVRational time_base)
4256 {
4257  int ret;
4258 
4259  if (!*duration) {
4260  *duration = tmp;
4261  return tmp_time_base;
4262  }
4263 
4264  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4265  if (ret < 0) {
4266  *duration = tmp;
4267  return tmp_time_base;
4268  }
4269 
4270  return time_base;
4271 }
4272 
4274 {
4275  InputStream *ist;
4276  AVCodecContext *avctx;
4277  int i, ret, has_audio = 0;
4278  int64_t duration = 0;
4279 
4280  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4281  if (ret < 0)
4282  return ret;
4283 
4284  for (i = 0; i < ifile->nb_streams; i++) {
4285  ist = input_streams[ifile->ist_index + i];
4286  avctx = ist->dec_ctx;
4287 
4288  /* duration is the length of the last frame in a stream
4289  * when audio stream is present we don't care about
4290  * last video frame length because it's not defined exactly */
4291  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4292  has_audio = 1;
4293  }
4294 
4295  for (i = 0; i < ifile->nb_streams; i++) {
4296  ist = input_streams[ifile->ist_index + i];
4297  avctx = ist->dec_ctx;
4298 
4299  if (has_audio) {
4300  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4301  AVRational sample_rate = {1, avctx->sample_rate};
4302 
4304  } else {
4305  continue;
4306  }
4307  } else {
4308  if (ist->framerate.num) {
4309  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4310  } else if (ist->st->avg_frame_rate.num) {
4312  } else {
4313  duration = 1;
4314  }
4315  }
4316  if (!ifile->duration)
4317  ifile->time_base = ist->st->time_base;
4318  /* the total duration of the stream, max_pts - min_pts is
4319  * the duration of the stream without the last frame */
4320  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4321  duration += ist->max_pts - ist->min_pts;
4322  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4323  ifile->time_base);
4324  }
4325 
4326  if (ifile->loop > 0)
4327  ifile->loop--;
4328 
4329  return ret;
4330 }
4331 
4332 /*
4333  * Return
4334  * - 0 -- one packet was read and processed
4335  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4336  * this function should be called again
4337  * - AVERROR_EOF -- this function should not be called again
4338  */
4339 static int process_input(int file_index)
4340 {
4341  InputFile *ifile = input_files[file_index];
4343  InputStream *ist;
4344  AVPacket *pkt;
4345  int ret, thread_ret, i, j;
4346  int64_t duration;
4347  int64_t pkt_dts;
4348  int disable_discontinuity_correction = copy_ts;
4349 
4350  is = ifile->ctx;
4351  ret = get_input_packet(ifile, &pkt);
4352 
4353  if (ret == AVERROR(EAGAIN)) {
4354  ifile->eagain = 1;
4355  return ret;
4356  }
4357  if (ret < 0 && ifile->loop) {
4358  AVCodecContext *avctx;
4359  for (i = 0; i < ifile->nb_streams; i++) {
4360  ist = input_streams[ifile->ist_index + i];
4361  avctx = ist->dec_ctx;
4362  if (ist->decoding_needed) {
4363  ret = process_input_packet(ist, NULL, 1);
4364  if (ret>0)
4365  return 0;
4366  avcodec_flush_buffers(avctx);
4367  }
4368  }
4369 #if HAVE_THREADS
4370  free_input_thread(file_index);
4371 #endif
4372  ret = seek_to_start(ifile, is);
4373 #if HAVE_THREADS
4374  thread_ret = init_input_thread(file_index);
4375  if (thread_ret < 0)
4376  return thread_ret;
4377 #endif
4378  if (ret < 0)
4379  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4380  else
4381  ret = get_input_packet(ifile, &pkt);
4382  if (ret == AVERROR(EAGAIN)) {
4383  ifile->eagain = 1;
4384  return ret;
4385  }
4386  }
4387  if (ret < 0) {
4388  if (ret != AVERROR_EOF) {
4389  print_error(is->url, ret);
4390  if (exit_on_error)
4391  exit_program(1);
4392  }
4393 
4394  for (i = 0; i < ifile->nb_streams; i++) {
4395  ist = input_streams[ifile->ist_index + i];
4396  if (ist->decoding_needed) {
4397  ret = process_input_packet(ist, NULL, 0);
4398  if (ret>0)
4399  return 0;
4400  }
4401 
4402  /* mark all outputs that don't go through lavfi as finished */
4403  for (j = 0; j < nb_output_streams; j++) {
4405 
4406  if (ost->source_index == ifile->ist_index + i &&
4407  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4409  }
4410  }
4411 
4412  ifile->eof_reached = 1;
4413  return AVERROR(EAGAIN);
4414  }
4415 
4416  reset_eagain();
4417 
4418  if (do_pkt_dump) {
4420  is->streams[pkt->stream_index]);
4421  }
4422  /* the following test is needed in case new streams appear
4423  dynamically in stream : we ignore them */
4424  if (pkt->stream_index >= ifile->nb_streams) {
4425  report_new_stream(file_index, pkt);
4426  goto discard_packet;
4427  }
4428 
4429  ist = input_streams[ifile->ist_index + pkt->stream_index];
4430 
4431  ist->data_size += pkt->size;
4432  ist->nb_packets++;
4433 
4434  if (ist->discard)
4435  goto discard_packet;
4436 
4437  if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
4439  "%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
4440  if (exit_on_error)
4441  exit_program(1);
4442  }
4443 
4444  if (debug_ts) {
4445  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4446  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4454  }
4455 
4456  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4457  int64_t stime, stime2;
4458  // Correcting starttime based on the enabled streams
4459  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4460  // so we instead do it here as part of discontinuity handling
4461  if ( ist->next_dts == AV_NOPTS_VALUE
4462  && ifile->ts_offset == -is->start_time
4463  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4464  int64_t new_start_time = INT64_MAX;
4465  for (i=0; i<is->nb_streams; i++) {
4466  AVStream *st = is->streams[i];
4467  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4468  continue;
4469  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4470  }
4471  if (new_start_time > is->start_time) {
4472  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4473  ifile->ts_offset = -new_start_time;
4474  }
4475  }
4476 
4477  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4478  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4479  ist->wrap_correction_done = 1;
4480 
4481  if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4482  pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
4483  ist->wrap_correction_done = 0;
4484  }
4485  if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4486  pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
4487  ist->wrap_correction_done = 0;
4488  }
4489  }
4490 
4491  /* add the stream-global side data to the first packet */
4492  if (ist->nb_packets == 1) {
4493  for (i = 0; i < ist->st->nb_side_data; i++) {
4494  AVPacketSideData *src_sd = &ist->st->side_data[i];
4495  uint8_t *dst_data;
4496 
4497  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4498  continue;
4499 
4500  if (av_packet_get_side_data(pkt, src_sd->type, NULL))
4501  continue;
4502 
4503  dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
4504  if (!dst_data)
4505  exit_program(1);
4506 
4507  memcpy(dst_data, src_sd->data, src_sd->size);
4508  }
4509  }
4510 
4511  if (pkt->dts != AV_NOPTS_VALUE)
4512  pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4513  if (pkt->pts != AV_NOPTS_VALUE)
4514  pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4515 
4516  if (pkt->pts != AV_NOPTS_VALUE)
4517  pkt->pts *= ist->ts_scale;
4518  if (pkt->dts != AV_NOPTS_VALUE)
4519  pkt->dts *= ist->ts_scale;
4520 
4522  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4524  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4525  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4526  int64_t delta = pkt_dts - ifile->last_ts;
4527  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4529  ifile->ts_offset -= delta;
4531  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4532  delta, ifile->ts_offset);
4534  if (pkt->pts != AV_NOPTS_VALUE)
4536  }
4537  }
4538 
4539  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4540  if (pkt->pts != AV_NOPTS_VALUE) {
4541  pkt->pts += duration;
4542  ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
4543  ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
4544  }
4545 
4546  if (pkt->dts != AV_NOPTS_VALUE)
4547  pkt->dts += duration;
4548 
4550 
4551  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4552  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4553  int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
4554  ist->st->time_base, AV_TIME_BASE_Q,
4556  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4557  disable_discontinuity_correction = 0;
4558  }
4559 
4560  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4562  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4563  !disable_discontinuity_correction) {
4564  int64_t delta = pkt_dts - ist->next_dts;
4565  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4566  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4568  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4569  ifile->ts_offset -= delta;
4571  "timestamp discontinuity for stream #%d:%d "
4572  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4573  ist->file_index, ist->st->index, ist->st->id,
4575  delta, ifile->ts_offset);
4577  if (pkt->pts != AV_NOPTS_VALUE)
4579  }
4580  } else {
4581  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4583  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
4584  pkt->dts = AV_NOPTS_VALUE;
4585  }
4586  if (pkt->pts != AV_NOPTS_VALUE){
4587  int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
4588  delta = pkt_pts - ist->next_dts;
4589  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4591  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
4592  pkt->pts = AV_NOPTS_VALUE;
4593  }
4594  }
4595  }
4596  }
4597 
4598  if (pkt->dts != AV_NOPTS_VALUE)
4599  ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
4600 
4601  if (debug_ts) {
4602  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4608  }
4609 
4610  sub2video_heartbeat(ist, pkt->pts);
4611 
4612  process_input_packet(ist, pkt, 0);
4613 
4614 discard_packet:
4615 #if HAVE_THREADS
4616  if (ifile->thread_queue_size)
4617  av_packet_free(&pkt);
4618  else
4619 #endif
4621 
4622  return 0;
4623 }
4624 
4625 /**
4626  * Perform a step of transcoding for the specified filter graph.
4627  *
4628  * @param[in] graph filter graph to consider
4629  * @param[out] best_ist input stream where a frame would allow to continue
4630  * @return 0 for success, <0 for error
4631  */
4632 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4633 {
4634  int i, ret;
4635  int nb_requests, nb_requests_max = 0;
4636  InputFilter *ifilter;
4637  InputStream *ist;
4638 
4639  *best_ist = NULL;
4640  ret = avfilter_graph_request_oldest(graph->graph);
4641  if (ret >= 0)
4642  return reap_filters(0);
4643 
4644  if (ret == AVERROR_EOF) {
4645  ret = reap_filters(1);
4646  for (i = 0; i < graph->nb_outputs; i++)
4647  close_output_stream(graph->outputs[i]->ost);
4648  return ret;
4649  }
4650  if (ret != AVERROR(EAGAIN))
4651  return ret;
4652 
4653  for (i = 0; i < graph->nb_inputs; i++) {
4654  ifilter = graph->inputs[i];
4655  ist = ifilter->ist;
4656  if (input_files[ist->file_index]->eagain ||
4658  continue;
4659  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4660  if (nb_requests > nb_requests_max) {
4661  nb_requests_max = nb_requests;
4662  *best_ist = ist;
4663  }
4664  }
4665 
4666  if (!*best_ist)
4667  for (i = 0; i < graph->nb_outputs; i++)
4668  graph->outputs[i]->ost->unavailable = 1;
4669 
4670  return 0;
4671 }
4672 
4673 /**
4674  * Run a single step of transcoding.
4675  *
4676  * @return 0 for success, <0 for error
4677  */
4678 static int transcode_step(void)
4679 {
4680  OutputStream *ost;
4681  InputStream *ist = NULL;
4682  int ret;
4683 
4684  ost = choose_output();
4685  if (!ost) {
4686  if (got_eagain()) {
4687  reset_eagain();
4688  av_usleep(10000);
4689  return 0;
4690  }
4691  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4692  return AVERROR_EOF;
4693  }
4694 
4695  if (ost->filter && !ost->filter->graph->graph) {
4696  if (ifilter_has_all_input_formats(ost->filter->graph)) {
4697  ret = configure_filtergraph(ost->filter->graph);
4698  if (ret < 0) {
4699  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4700  return ret;
4701  }
4702  }
4703  }
4704 
4705  if (ost->filter && ost->filter->graph->graph) {
4706  /*
4707  * Similar case to the early audio initialization in reap_filters.
4708  * Audio is special in ffmpeg.c currently as we depend on lavfi's
4709  * audio frame buffering/creation to get the output audio frame size
4710  * in samples correct. The audio frame size for the filter chain is
4711  * configured during the output stream initialization.
4712  *
4713  * Apparently avfilter_graph_request_oldest (called in
4714  * transcode_from_filter just down the line) peeks. Peeking already
4715  * puts one frame "ready to be given out", which means that any
4716  * update in filter buffer sink configuration afterwards will not
4717  * help us. And yes, even if it would be utilized,
4718  * av_buffersink_get_samples is affected, as it internally utilizes
4719  * the same early exit for peeked frames.
4720  *
4721  * In other words, if avfilter_graph_request_oldest would not make
4722  * further filter chain configuration or usage of
4723  * av_buffersink_get_samples useless (by just causing the return
4724  * of the peeked AVFrame as-is), we could get rid of this additional
4725  * early encoder initialization.
4726  */
4727  if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4729 
4730  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4731  return ret;
4732  if (!ist)
4733  return 0;
4734  } else if (ost->filter) {
4735  int i;
4736  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4737  InputFilter *ifilter = ost->filter->graph->inputs[i];
4738  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4739  ist = ifilter->ist;
4740  break;
4741  }
4742  }
4743  if (!ist) {
4744  ost->inputs_done = 1;
4745  return 0;
4746  }
4747  } else {
4748  av_assert0(ost->source_index >= 0);
4749  ist = input_streams[ost->source_index];
4750  }
4751 
4752  ret = process_input(ist->file_index);
4753  if (ret == AVERROR(EAGAIN)) {
4754  if (input_files[ist->file_index]->eagain)
4755  ost->unavailable = 1;
4756  return 0;
4757  }
4758 
4759  if (ret < 0)
4760  return ret == AVERROR_EOF ? 0 : ret;
4761 
4762  return reap_filters(0);
4763 }
4764 
4765 /*
4766  * The following code is the main loop of the file converter
4767  */
4768 static int transcode(void)
4769 {
4770  int ret, i;
4771  AVFormatContext *os;
4772  OutputStream *ost;
4773  InputStream *ist;
4774  int64_t timer_start;
4775  int64_t total_packets_written = 0;
4776 
4777  ret = transcode_init();
4778  if (ret < 0)
4779  goto fail;
4780 
4781  if (stdin_interaction) {
4782  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4783  }
4784 
4785  timer_start = av_gettime_relative();
4786 
4787 #if HAVE_THREADS
4788  if ((ret = init_input_threads()) < 0)
4789  goto fail;
4790 #endif
4791 
4792  while (!received_sigterm) {
4793  int64_t cur_time= av_gettime_relative();
4794 
4795  /* if 'q' pressed, exits */
4796  if (stdin_interaction)
4797  if (check_keyboard_interaction(cur_time) < 0)
4798  break;
4799 
4800  /* check if there's any stream where output is still needed */
4801  if (!need_output()) {
4802  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4803  break;
4804  }
4805 
4806  ret = transcode_step();
4807  if (ret < 0 && ret != AVERROR_EOF) {
4808  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4809  break;
4810  }
4811 
4812  /* dump report by using the output first video and audio streams */
4813  print_report(0, timer_start, cur_time);
4814  }
4815 #if HAVE_THREADS
4816  free_input_threads();
4817 #endif
4818 
4819  /* at the end of stream, we must flush the decoder buffers */
4820  for (i = 0; i < nb_input_streams; i++) {
4821  ist = input_streams[i];
4822  if (!input_files[ist->file_index]->eof_reached) {
4823  process_input_packet(ist, NULL, 0);
4824  }
4825  }
4826  flush_encoders();
4827 
4828  term_exit();
4829 
4830  /* write the trailer if needed and close file */
4831  for (i = 0; i < nb_output_files; i++) {
4832  os = output_files[i]->ctx;
4833  if (!output_files[i]->header_written) {
4835  "Nothing was written into output file %d (%s), because "
4836  "at least one of its streams received no packets.\n",
4837  i, os->url);
4838  continue;
4839  }
4840  if ((ret = av_write_trailer(os)) < 0) {
4841  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4842  if (exit_on_error)
4843  exit_program(1);
4844  }
4845  }
4846 
4847  /* dump report by using the first video and audio streams */
4848  print_report(1, timer_start, av_gettime_relative());
4849 
4850  /* close each encoder */
4851  for (i = 0; i < nb_output_streams; i++) {
4852  ost = output_streams[i];
4853  if (ost->encoding_needed) {
4854  av_freep(&ost->enc_ctx->stats_in);
4855  }
4856  total_packets_written += ost->packets_written;
4857  if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4858  av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4859  exit_program(1);
4860  }
4861  }
4862 
4863  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4864  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4865  exit_program(1);
4866  }
4867 
4868  /* close each decoder */
4869  for (i = 0; i < nb_input_streams; i++) {
4870  ist = input_streams[i];
4871  if (ist->decoding_needed) {
4872  avcodec_close(ist->dec_ctx);
4873  if (ist->hwaccel_uninit)
4874  ist->hwaccel_uninit(ist->dec_ctx);
4875  }
4876  }
4877 
4879 
4880  /* finished ! */
4881  ret = 0;
4882 
4883  fail:
4884 #if HAVE_THREADS
4885  free_input_threads();
4886 #endif
4887 
4888  if (output_streams) {
4889  for (i = 0; i < nb_output_streams; i++) {
4890  ost = output_streams[i];
4891  if (ost) {
4892  if (ost->logfile) {
4893  if (fclose(ost->logfile))
4895  "Error closing logfile, loss of information possible: %s\n",
4896  av_err2str(AVERROR(errno)));
4897  ost->logfile = NULL;
4898  }
4899  av_freep(&ost->forced_kf_pts);
4900  av_freep(&ost->apad);
4902  av_dict_free(&ost->encoder_opts);
4903  av_dict_free(&ost->sws_dict);
4904  av_dict_free(&ost->swr_opts);
4905  av_dict_free(&ost->resample_opts);
4906  }
4907  }
4908  }
4909  return ret;
4910 }
4911 
4913 {
4914  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4915 #if HAVE_GETRUSAGE
4916  struct rusage rusage;
4917 
4918  getrusage(RUSAGE_SELF, &rusage);
4919  time_stamps.user_usec =
4920  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4921  time_stamps.sys_usec =
4922  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4923 #elif HAVE_GETPROCESSTIMES
4924  HANDLE proc;
4925  FILETIME c, e, k, u;
4926  proc = GetCurrentProcess();
4927  GetProcessTimes(proc, &c, &e, &k, &u);
4928  time_stamps.user_usec =
4929  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4930  time_stamps.sys_usec =
4931  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4932 #else
4933  time_stamps.user_usec = time_stamps.sys_usec = 0;
4934 #endif
4935  return time_stamps;
4936 }
4937 
4938 static int64_t getmaxrss(void)
4939 {
4940 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4941  struct rusage rusage;
4942  getrusage(RUSAGE_SELF, &rusage);
4943  return (int64_t)rusage.ru_maxrss * 1024;
4944 #elif HAVE_GETPROCESSMEMORYINFO
4945  HANDLE proc;
4946  PROCESS_MEMORY_COUNTERS memcounters;
4947  proc = GetCurrentProcess();
4948  memcounters.cb = sizeof(memcounters);
4949  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4950  return memcounters.PeakPagefileUsage;
4951 #else
4952  return 0;
4953 #endif
4954 }
4955 
4956 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4957 {
4958 }
4959 
4960 int main(int argc, char **argv)
4961 {
4962  int i, ret;
4964 
4965  init_dynload();
4966 
4968 
4969  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4970 
4972  parse_loglevel(argc, argv, options);
4973 
4974  if(argc>1 && !strcmp(argv[1], "-d")){
4975  run_as_daemon=1;
4977  argc--;
4978  argv++;
4979  }
4980 
4981 #if CONFIG_AVDEVICE
4983 #endif
4985 
4986  show_banner(argc, argv, options);
4987 
4988  /* parse options and open all input/output files */
4989  ret = ffmpeg_parse_options(argc, argv);
4990  if (ret < 0)
4991  exit_program(1);
4992 
4993  if (nb_output_files <= 0 && nb_input_files == 0) {
4994  show_usage();
4995  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4996  exit_program(1);
4997  }
4998 
4999  /* file converter / grab */
5000  if (nb_output_files <= 0) {
5001  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
5002  exit_program(1);
5003  }
5004 
5005  for (i = 0; i < nb_output_files; i++) {
5006  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
5007  want_sdp = 0;
5008  }
5009 
5011  if (transcode() < 0)
5012  exit_program(1);
5013  if (do_benchmark) {
5014  int64_t utime, stime, rtime;
5016  utime = current_time.user_usec - ti.user_usec;
5017  stime = current_time.sys_usec - ti.sys_usec;
5018  rtime = current_time.real_usec - ti.real_usec;
5020  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
5021  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
5022  }
5023  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
5026  exit_program(69);
5027 
5029  return main_return_code;
5030 }
static void flush(AVCodecContext *avctx)
#define ECHO(name, type, min, max)
Definition: af_aecho.c:188
uint8_t
int32_t
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1629
Main libavdevice API header.
Main libavfilter public API header.
Main libavformat public API header.
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:822
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:145
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:467
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1380
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:831
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:821
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:464
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:465
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:823
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:853
#define AV_DISPOSITION_DUB
Definition: avformat.h:819
#define AV_DISPOSITION_METADATA
Definition: avformat.h:855
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:458
#define AV_DISPOSITION_DEPENDENT
dependent audio stream (mix_type=0 in mpegts)
Definition: avformat.h:856
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:472
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:833
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:830
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:832
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:841
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:462
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:854
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:137
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:820
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:818
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:342
#define avio_print(s,...)
Write strings (const char *) to the context.
Definition: avio.h:594
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:675
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:225
void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:245
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1166
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1195
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t *size)
Definition: avpacket.c:368
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t size)
Definition: avpacket.c:343
#define AV_RL64
Definition: intreadwrite.h:173
#define AV_RL32
Definition: intreadwrite.h:146
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
#define AV_BPRINT_SIZE_AUTOMATIC
memory buffer sink API for audio and video
Memory buffer source API.
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:36
#define flags(name, subs,...)
Definition: cbs_av1.c:572
#define is(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:286
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
#define us(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:278
#define s(width, name)
Definition: cbs_vp9.c:257
#define f(width, name)
Definition: cbs_vp9.c:255
static av_always_inline void filter(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhddsp.c:27
audio channel layout utility functions
#define fail()
Definition: checkasm.h:133
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:133
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:117
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1084
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:502
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1183
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
Definition: cmdutils.c:162
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:128
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:87
#define media_type_string
Definition: cmdutils.h:617
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: codec_par.c:72
void avcodec_parameters_free(AVCodecParameters **ppar)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: codec_par.c:61
@ AV_FIELD_TT
Definition: codec_par.h:39
@ AV_FIELD_BB
Definition: codec_par.h:40
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
@ AV_FIELD_BT
Definition: codec_par.h:42
@ AV_FIELD_TB
Definition: codec_par.h:41
#define FFMAX3(a, b, c)
Definition: common.h:104
#define FFSWAP(type, a, b)
Definition: common.h:108
#define FFMIN(a, b)
Definition: common.h:105
#define av_clip
Definition: common.h:122
#define FFMAX(a, b)
Definition: common.h:103
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:101
#define FFSIGN(a)
Definition: common.h:73
#define FFMIN3(a, b, c)
Definition: common.h:106
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
#define max(a, b)
Definition: cuda_runtime.h:33
static enum AVPixelFormat pix_fmt
static AVFrame * frame
Public dictionary API.
Display matrix.
static float sub(float src0, float src1)
#define atomic_store(object, desired)
Definition: stdatomic.h:85
intptr_t atomic_int
Definition: stdatomic.h:55
#define atomic_load(object)
Definition: stdatomic.h:93
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:109
static int nb_frames_drop
Definition: ffmpeg.c:137
AVIOContext * progress_avio
Definition: ffmpeg.c:144
static int nb_frames_dup
Definition: ffmpeg.c:135
static int transcode(void)
Definition: ffmpeg.c:4768
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2524
static void set_tty_echo(int on)
Definition: ffmpeg.c:3969
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2331
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3981
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:1005
int nb_output_streams
Definition: ffmpeg.c:154
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
Definition: ffmpeg.c:4912
static int need_output(void)
Definition: ffmpeg.c:3912
void term_exit(void)
Definition: ffmpeg.c:337
static volatile int received_sigterm
Definition: ffmpeg.c:343
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:114
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2282
int nb_filtergraphs
Definition: ffmpeg.c:159
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2619
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:1063
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2950
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:223
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2128
static void print_sdp(void)
Definition: ffmpeg.c:2809
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity.
Definition: ffmpeg.c:1503
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:924
InputFile ** input_files
Definition: ffmpeg.c:150
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture)
Definition: ffmpeg.c:1146
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2854
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1586
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4632
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:514
int main(int argc, char **argv)
Definition: ffmpeg.c:4960
static double psnr(double d)
Definition: ffmpeg.c:1436
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:3078
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:3349
static int main_return_code
Definition: ffmpeg.c:347
static uint8_t * subtitle_out
Definition: ffmpeg.c:146
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2305
int nb_input_streams
Definition: ffmpeg.c:149
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4938
static void reset_eagain(void)
Definition: ffmpeg.c:4244
int nb_input_files
Definition: ffmpeg.c:151
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1484
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:3262
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3942
FilterGraph ** filtergraphs
Definition: ffmpeg.c:158
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:3023
static unsigned dup_warning
Definition: ffmpeg.c:136
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
Definition: ffmpeg.c:984
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2393
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg.c:2178
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:2148
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:281
static int process_input(int file_index)
Definition: ffmpeg.c:4339
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4956
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:4273
static volatile int ffmpeg_exited
Definition: ffmpeg.c:346
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:2036
int nb_output_files
Definition: ffmpeg.c:156
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
Definition: ffmpeg.c:730
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:3221
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:138
static int read_key(void)
Definition: ffmpeg.c:457
static int check_init_output_file(OutputFile *of, int file_index)
Definition: ffmpeg.c:3029
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:876
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2940
#define SIGNAL(sig, func)
Definition: ffmpeg.c:404
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
Definition: ffmpeg.c:241
static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
Definition: ffmpeg.c:3532
static int got_eagain(void)
Definition: ffmpeg.c:4235
static int want_sdp
Definition: ffmpeg.c:141
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2603
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:509
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:329
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:313
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:192
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:677
static int transcode_init(void)
Definition: ffmpeg.c:3730
static volatile int received_nb_signals
Definition: ffmpeg.c:344
InputStream ** input_streams
Definition: ffmpeg.c:148
static int64_t copy_ts_first_pts
Definition: ffmpeg.c:348
static FILE * vstats_file
Definition: ffmpeg.c:112
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3715
static BenchmarkTimeStamps current_time
Definition: ffmpeg.c:143
static void abort_codec_experimental(const AVCodec *c, int encoder)
Definition: ffmpeg.c:695
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:700
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:4254
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:516
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:937
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4678
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:2167
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:721
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1703
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:3107
void term_init(void)
Definition: ffmpeg.c:408
static unsigned nb_output_dumped
Definition: ffmpeg.c:139
OutputStream ** output_streams
Definition: ffmpeg.c:153
static int get_input_packet(InputFile *f, AVPacket **pkt)
Definition: ffmpeg.c:4214
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1441
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:110
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:2053
OutputFile ** output_files
Definition: ffmpeg.c:155
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:177
static void flush_encoders(void)
Definition: ffmpeg.c:1926
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg.c:898
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:3016
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:686
static int run_as_daemon
Definition: ffmpeg.c:134
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:3325
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1913
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2255
static void sigterm_handler(int sig)
Definition: ffmpeg.c:351
static atomic_int transcode_init_done
Definition: ffmpeg.c:345
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:419
int debug_ts
Definition: ffmpeg_opt.c:166
@ HWACCEL_GENERIC
Definition: ffmpeg.h:61
@ HWACCEL_AUTO
Definition: ffmpeg.h:60
float max_error_rate
Definition: ffmpeg_opt.c:173
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
int audio_volume
Definition: ffmpeg_opt.c:154
#define VSYNC_DROP
Definition: ffmpeg.h:54
char * sdp_filename
Definition: ffmpeg_opt.c:148
int print_stats
Definition: ffmpeg_opt.c:169
int stdin_interaction
Definition: ffmpeg_opt.c:171
int do_benchmark
Definition: ffmpeg_opt.c:159
int do_hex_dump
Definition: ffmpeg_opt.c:161
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:172
float dts_error_threshold
Definition: ffmpeg_opt.c:152
int hwaccel_decode_init(AVCodecContext *avctx)
Definition: ffmpeg_hw.c:516
OSTFinished
Definition: ffmpeg.h:447
@ ENCODER_FINISHED
Definition: ffmpeg.h:448
@ MUXER_FINISHED
Definition: ffmpeg.h:449
#define VSYNC_CFR
Definition: ffmpeg.h:51
int abort_on_flags
Definition: ffmpeg_opt.c:168
float frame_drop_threshold
Definition: ffmpeg_opt.c:157
void show_usage(void)
Definition: ffmpeg_opt.c:3300
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:307
int qp_hist
Definition: ffmpeg_opt.c:170
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:303
#define VSYNC_AUTO
Definition: ffmpeg.h:49
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:274
int vstats_version
Definition: ffmpeg_opt.c:176
char * vstats_filename
Definition: ffmpeg_opt.c:147
int audio_sync_method
Definition: ffmpeg_opt.c:155
int copy_tb
Definition: ffmpeg_opt.c:165
@ FKF_PREV_FORCED_N
Definition: ffmpeg.h:436
@ FKF_T
Definition: ffmpeg.h:438
@ FKF_PREV_FORCED_T
Definition: ffmpeg.h:437
@ FKF_N_FORCED
Definition: ffmpeg.h:435
@ FKF_N
Definition: ffmpeg.h:434
int ffmpeg_parse_options(int argc, char **argv)
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:442
#define DECODING_FOR_OST
Definition: ffmpeg.h:306
int video_sync_method
Definition: ffmpeg_opt.c:156
float dts_delta_threshold
Definition: ffmpeg_opt.c:151
int64_t stats_period
Definition: ffmpeg_opt.c:178
int copy_ts
Definition: ffmpeg_opt.c:163
#define VSYNC_VSCFR
Definition: ffmpeg.h:53
int filtergraph_is_simple(FilterGraph *fg)
int do_benchmark_all
Definition: ffmpeg_opt.c:160
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:50
int configure_filtergraph(FilterGraph *fg)
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:136
#define VSYNC_VFR
Definition: ffmpeg.h:52
int exit_on_error
Definition: ffmpeg_opt.c:167
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
Definition: ffmpeg.h:443
int do_pkt_dump
Definition: ffmpeg_opt.c:162
const OptionDef options[]
sample_rate
static int loop
Definition: ffplay.c:341
static int64_t start_time
Definition: ffplay.c:332
a very simple circular buffer FIFO implementation
static AVCodecContext * dec_ctx
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1661
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1656
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
@ AV_OPT_TYPE_FLAGS
Definition: opt.h:224
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:40
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: codec_par.c:90
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:477
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:300
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:148
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:129
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:77
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:910
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:227
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:296
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:551
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:201
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3501
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: codec_par.c:147
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:116
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
av_cold int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: avcodec.c:570
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:188
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:411
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2670
@ AV_CODEC_ID_DVB_SUBTITLE
Definition: codec_id.h:524
@ AV_CODEC_ID_H264
Definition: codec_id.h:76
@ AV_CODEC_ID_CODEC2
Definition: codec_id.h:491
@ AV_CODEC_ID_AC3
Definition: codec_id.h:427
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:61
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:425
@ AV_CODEC_ID_VP9
Definition: codec_id.h:217
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1695
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:652
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1034
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:589
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:395
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:364
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: avcodec.h:231
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:146
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:861
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: avcodec.c:491
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: packet.h:411
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:634
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:410
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:696
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:690
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:641
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
Definition: avpacket.c:737
@ AV_PKT_DATA_QUALITY_STATS
This side data contains quality related information from the encoder.
Definition: packet.h:132
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:108
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:66
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, size_t size)
Allocate new information from stream.
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5068
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:5056
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4436
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2512
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1741
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4481
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:506
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1259
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1274
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: utils.c:5850
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:849
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:115
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:640
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: utils.c:5778
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
int av_buffersink_get_format(const AVFilterContext *ctx)
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int av_buffersink_get_h(const AVFilterContext *ctx)
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
int av_buffersink_get_w(const AVFilterContext *ctx)
int av_buffersink_get_channels(const AVFilterContext *ctx)
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:198
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:140
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:96
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:166
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:233
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:147
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:265
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:701
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:70
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:74
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:76
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
Definition: error.h:72
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
#define AVERROR(e)
Definition: error.h:43
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:543
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:812
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:337
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
#define AV_LOG_QUIET
Print no output.
Definition: log.h:176
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:455
void av_log_set_level(int level)
Set the log level.
Definition: log.c:440
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:384
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
int av_log_get_level(void)
Get the current log level.
Definition: log.c:435
void av_log_set_flags(int arg)
Definition: log.c:445
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array.
Definition: mem.c:198
AVMediaType
Definition: avutil.h:199
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:71
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
Definition: avutil.h:203
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:83
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
@ AV_PICTURE_TYPE_NONE
Undefined.
Definition: avutil.h:273
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:93
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
int index
Definition: gxfenc.c:89
for(j=16;j >0;--j)
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:92
cl_device_type type
const char * key
misc image utilities
int i
Definition: input.c:407
#define av_log2
Definition: intmath.h:83
#define extra_bits(eb)
Definition: intrax8.c:125
const char * arg
Definition: jacosubdec.c:66
#define LIBAVCODEC_IDENT
Definition: version.h:42
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
common internal API header
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
Replacements for frequently missing libm functions.
#define llrint(x)
Definition: libm.h:394
#define llrintf(x)
Definition: libm.h:399
#define lrintf(x)
Definition: libm_mips.h:70
const char * desc
Definition: libsvtav1.c:79
uint8_t w
Definition: llviddspenc.c:39
#define NAN
Definition: mathematics.h:64
#define mid_pred
Definition: mathops.h:97
int frame_size
Definition: mxfenc.c:2206
AVOptions.
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:94
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:80
miscellaneous OS support macros and functions.
misc parsing utilities
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:372
#define tb
Definition: regdef.h:68
#define FF_ARRAY_ELEMS(a)
#define vsnprintf
Definition: snprintf.h:36
#define snprintf
Definition: snprintf.h:34
The bitstream filter state.
Definition: bsf.h:49
uint8_t * data
The data buffer.
Definition: buffer.h:92
Describe the class of an AVClass context structure.
Definition: log.h:67
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
main external API structure.
Definition: avcodec.h:536
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
int width
picture width / height.
Definition: avcodec.h:709
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1557
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1204
int debug
debug
Definition: avcodec.h:1623
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:602
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1171
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2085
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:788
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1150
enum AVMediaType codec_type
Definition: avcodec.h:544
AVRational framerate
Definition: avcodec.h:2071
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:915
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:668
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1193
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:826
const struct AVCodec * codec
Definition: avcodec.h:545
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1747
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1164
int sample_rate
samples per second
Definition: avcodec.h:1196
attribute_deprecated int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:1812
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:578
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1157
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:659
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:616
int channels
number of audio channels
Definition: avcodec.h:1197
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1178
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1699
enum AVCodecID codec_id
Definition: avcodec.h:546
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1247
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1216
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:1351
This struct describes the properties of a single codec described by an AVCodecID.
Definition: codec_desc.h:38
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: codec.h:464
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: codec.h:457
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:452
This struct describes the properties of an encoded stream.
Definition: codec_par.h:52
uint64_t channel_layout
Audio only.
Definition: codec_par.h:162
int channels
Audio only.
Definition: codec_par.h:166
int width
Video only.
Definition: codec_par.h:126
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
int block_align
Audio only.
Definition: codec_par.h:177
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:136
int video_delay
Video only.
Definition: codec_par.h:155
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: codec_par.h:64
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: codec_par.h:60
int sample_rate
Audio only.
Definition: codec_par.h:170
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:3395
AVCodec.
Definition: codec.h:197
enum AVCodecID id
Definition: codec.h:211
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: codec.h:266
enum AVMediaType type
Definition: codec.h:210
const char * name
Name of the codec implementation.
Definition: codec.h:204
int capabilities
Codec capabilities.
Definition: codec.h:216
int depth
Number of bits in the component.
Definition: pixdesc.h:58
char * key
Definition: dict.h:82
char * value
Definition: dict.h:83
An instance of a filter.
Definition: avfilter.h:341
Format I/O context.
Definition: avformat.h:1232
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1288
AVIOContext * pb
I/O context.
Definition: avformat.h:1274
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1512
char * url
input or output URL.
Definition: avformat.h:1328
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1463
ff_const59 struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1251
AVChapter ** chapters
Definition: avformat.h:1464
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1300
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:582
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
enum AVChromaLocation chroma_location
Definition: frame.h:575
int width
Definition: frame.h:376
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:657
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:396
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:597
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:613
int height
Definition: frame.h:376
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:555
int channels
number of audio channels, only used for audio.
Definition: frame.h:624
enum AVColorPrimaries color_primaries
Definition: frame.h:564
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:495
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:406
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:470
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:441
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:465
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:562
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:573
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:566
int sample_rate
Sample rate of the audio data.
Definition: frame.h:490
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:391
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
Bytestream IO Context.
Definition: avio.h:161
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
AVOption.
Definition: opt.h:248
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:510
const char * name
Definition: avformat.h:491
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:516
uint8_t * data
Definition: packet.h:307
enum AVPacketSideDataType type
Definition: packet.h:313
size_t size
Definition: packet.h:311
This structure stores compressed data.
Definition: packet.h:346
int stream_index
Definition: packet.h:371
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:375
int size
Definition: packet.h:370
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:387
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:362
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:368
uint8_t * data
Definition: packet.h:369
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:389
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1150
unsigned int nb_stream_indexes
Definition: avformat.h:1155
unsigned int * stream_index
Definition: avformat.h:1154
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1153
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int num
Numerator.
Definition: rational.h:59
int den
Denominator.
Definition: rational.h:60
Stream structure.
Definition: avformat.h:873
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:975
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1038
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1065
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:935
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:924
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:928
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:922
AVDictionary * metadata
Definition: avformat.h:937
int id
Format-specific stream ID.
Definition: avformat.h:880
int index
stream index in AVFormatContext
Definition: avformat.h:874
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1055
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:912
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:946
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:902
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:979
int64_t cur_dts
Definition: avformat.h:1066
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1015
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:926
uint32_t end_display_time
Definition: avcodec.h:2725
unsigned num_rects
Definition: avcodec.h:2726
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2728
int64_t sys_usec
Definition: ffmpeg.c:126
int64_t real_usec
Definition: ffmpeg.c:124
int64_t user_usec
Definition: ffmpeg.c:125
int index
Definition: ffmpeg.h:288
int nb_outputs
Definition: ffmpeg.h:297
OutputFilter ** outputs
Definition: ffmpeg.h:296
int nb_inputs
Definition: ffmpeg.h:295
const char * graph_desc
Definition: ffmpeg.h:289
AVFilterGraph * graph
Definition: ffmpeg.h:291
InputFilter ** inputs
Definition: ffmpeg.h:294
Definition: ffmpeg.h:66
enum HWAccelID id
Definition: ffmpeg.h:69
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:68
const char * name
Definition: ffmpeg.h:67
int eagain
Definition: ffmpeg.h:403
int64_t ts_offset
Definition: ffmpeg.h:411
AVFormatContext * ctx
Definition: ffmpeg.h:401
int64_t input_ts_offset
Definition: ffmpeg.h:409
int eof_reached
Definition: ffmpeg.h:402
int nb_streams_warn
Definition: ffmpeg.h:418
int ist_index
Definition: ffmpeg.h:404
int nb_streams
Definition: ffmpeg.h:416
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:259
uint8_t * name
Definition: ffmpeg.h:244
int sample_rate
Definition: ffmpeg.h:255
int height
Definition: ffmpeg.h:252
struct InputStream * ist
Definition: ffmpeg.h:242
AVFilterContext * filter
Definition: ffmpeg.h:241
enum AVMediaType type
Definition: ffmpeg.h:245
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:247
uint64_t channel_layout
Definition: ffmpeg.h:257
struct FilterGraph * graph
Definition: ffmpeg.h:243
AVRational sample_aspect_ratio
Definition: ffmpeg.h:253
int eof
Definition: ffmpeg.h:261
int channels
Definition: ffmpeg.h:256
int format
Definition: ffmpeg.h:250
int width
Definition: ffmpeg.h:252
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg.h:358
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:355
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:381
int saw_first_ts
Definition: ffmpeg.h:337
double ts_scale
Definition: ffmpeg.h:336
AVFrame * decoded_frame
Definition: ffmpeg.h:311
int64_t * dts_buffer
Definition: ffmpeg.h:394
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:319
int fix_sub_duration
Definition: ffmpeg.h:345
int64_t cfr_next_pts
Definition: ffmpeg.h:332
int got_output
Definition: ffmpeg.h:347
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:378
int nb_dts_buffer
Definition: ffmpeg.h:395
AVCodecContext * dec_ctx
Definition: ffmpeg.h:309
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:322
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:383
struct InputStream::@2 prev_sub
int64_t start
Definition: ffmpeg.h:315
int ret
Definition: ffmpeg.h:348
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:371
uint64_t data_size
Definition: ffmpeg.h:387
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:379
int nb_filters
Definition: ffmpeg.h:366
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:325
int64_t next_dts
Definition: ffmpeg.h:318
int reinit_filters
Definition: ffmpeg.h:368
int wrap_correction_done
Definition: ffmpeg.h:323
int64_t max_pts
Definition: ffmpeg.h:328
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:382
int guess_layout_max
Definition: ffmpeg.h:341
AVPacket * pkt
Definition: ffmpeg.h:313
AVFrame * filter_frame
Definition: ffmpeg.h:312
uint64_t samples_decoded
Definition: ffmpeg.h:392
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:321
int top_field_first
Definition: ffmpeg.h:340
int autorotate
Definition: ffmpeg.h:343
int discard
Definition: ffmpeg.h:303
uint64_t frames_decoded
Definition: ffmpeg.h:391
int decoding_needed
Definition: ffmpeg.h:305
struct InputStream::sub2video sub2video
AVStream * st
Definition: ffmpeg.h:302
int file_index
Definition: ffmpeg.h:301
InputFilter ** filters
Definition: ffmpeg.h:365
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:380
uint64_t nb_packets
Definition: ffmpeg.h:389
AVSubtitle subtitle
Definition: ffmpeg.h:349
char * hwaccel_device
Definition: ffmpeg.h:373
AVDictionary * decoder_opts
Definition: ffmpeg.h:338
int64_t min_pts
Definition: ffmpeg.h:327
const AVCodec * dec
Definition: ffmpeg.h:310
enum AVHWDeviceType hwaccel_device_type
Definition: ffmpeg.h:372
int64_t nb_samples
Definition: ffmpeg.h:334
AVRational framerate
Definition: ffmpeg.h:339
uint64_t limit_filesize
Definition: ffmpeg.h:581
AVFormatContext * ctx
Definition: ffmpeg.h:576
int header_written
Definition: ffmpeg.h:585
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:580
AVDictionary * opts
Definition: ffmpeg.h:577
int ost_index
Definition: ffmpeg.h:578
int shortest
Definition: ffmpeg.h:583
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:579
AVFilterInOut * out_tmp
Definition: ffmpeg.h:271
struct OutputStream * ost
Definition: ffmpeg.h:266
uint64_t * channel_layouts
Definition: ffmpeg.h:283
uint8_t * name
Definition: ffmpeg.h:268
int * sample_rates
Definition: ffmpeg.h:284
int * formats
Definition: ffmpeg.h:282
int source_index
Definition: ffmpeg.h:455
OSTFinished finished
Definition: ffmpeg.h:524
int unavailable
Definition: ffmpeg.h:525
AVCodecContext * enc_ctx
Definition: ffmpeg.h:474
uint8_t level
Definition: svq3.c:206
libswresample public header
#define av_malloc_array(a, b)
#define ff_dlog(a,...)
#define av_realloc_f(p, o, n)
#define av_freep(p)
#define av_malloc(s)
#define av_log(a,...)
static void error(const char *err)
static uint8_t tmp[11]
Definition: aes_ctr.c:27
#define src
Definition: vp8dsp.c:255
int64_t bitrate
Definition: h264_levels.c:131
FILE * out
Definition: movenc.c:54
int64_t duration
Definition: movenc.c:64
AVPacket * pkt
Definition: movenc.c:59
AVFormatContext * ctx
Definition: movenc.c:48
static void finish(void)
Definition: movenc.c:342
AVDictionary * opts
Definition: movenc.c:50
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
@ AV_THREAD_MESSAGE_NONBLOCK
Perform non-blocking operation.
Definition: threadmessage.h:31
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
timestamp utils, mostly useful for debugging/logging purposes
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
static int64_t pts
int size
static AVStream * ost
const char * b
Definition: vf_curves.c:118
const char * r
Definition: vf_curves.c:116
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:873
float delta
static double c[64]