FFmpeg  4.4.5
vf_displace.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/imgutils.h"
22 #include "libavutil/pixdesc.h"
23 #include "libavutil/opt.h"
24 #include "avfilter.h"
25 #include "formats.h"
26 #include "framesync.h"
27 #include "internal.h"
28 #include "video.h"
29 
30 enum EdgeMode {
35  EDGE_NB
36 };
37 
38 typedef struct DisplaceContext {
39  const AVClass *class;
40  int width[4], height[4];
41  enum EdgeMode edge;
42  int nb_planes;
44  int step;
47 
48  void (*displace)(struct DisplaceContext *s, const AVFrame *in,
49  const AVFrame *xpic, const AVFrame *ypic, AVFrame *out);
51 
52 #define OFFSET(x) offsetof(DisplaceContext, x)
53 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
54 
55 static const AVOption displace_options[] = {
56  { "edge", "set edge mode", OFFSET(edge), AV_OPT_TYPE_INT, {.i64=EDGE_SMEAR}, 0, EDGE_NB-1, FLAGS, "edge" },
57  { "blank", "", 0, AV_OPT_TYPE_CONST, {.i64=EDGE_BLANK}, 0, 0, FLAGS, "edge" },
58  { "smear", "", 0, AV_OPT_TYPE_CONST, {.i64=EDGE_SMEAR}, 0, 0, FLAGS, "edge" },
59  { "wrap" , "", 0, AV_OPT_TYPE_CONST, {.i64=EDGE_WRAP}, 0, 0, FLAGS, "edge" },
60  { "mirror" , "", 0, AV_OPT_TYPE_CONST, {.i64=EDGE_MIRROR}, 0, 0, FLAGS, "edge" },
61  { NULL }
62 };
63 
65 
67 {
68  static const enum AVPixelFormat pix_fmts[] = {
79  };
80 
82 }
83 
85  const AVFrame *xpic, const AVFrame *ypic,
86  AVFrame *out)
87 {
88  int plane, x, y;
89 
90  for (plane = 0; plane < s->nb_planes; plane++) {
91  const int h = s->height[plane];
92  const int w = s->width[plane];
93  const int dlinesize = out->linesize[plane];
94  const int slinesize = in->linesize[plane];
95  const int xlinesize = xpic->linesize[plane];
96  const int ylinesize = ypic->linesize[plane];
97  const uint8_t *src = in->data[plane];
98  const uint8_t *ysrc = ypic->data[plane];
99  const uint8_t *xsrc = xpic->data[plane];
100  uint8_t *dst = out->data[plane];
101  const uint8_t blank = s->blank[plane];
102 
103  for (y = 0; y < h; y++) {
104  switch (s->edge) {
105  case EDGE_BLANK:
106  for (x = 0; x < w; x++) {
107  int Y = y + ysrc[x] - 128;
108  int X = x + xsrc[x] - 128;
109 
110  if (Y < 0 || Y >= h || X < 0 || X >= w)
111  dst[x] = blank;
112  else
113  dst[x] = src[Y * slinesize + X];
114  }
115  break;
116  case EDGE_SMEAR:
117  for (x = 0; x < w; x++) {
118  int Y = av_clip(y + ysrc[x] - 128, 0, h - 1);
119  int X = av_clip(x + xsrc[x] - 128, 0, w - 1);
120  dst[x] = src[Y * slinesize + X];
121  }
122  break;
123  case EDGE_WRAP:
124  for (x = 0; x < w; x++) {
125  int Y = (y + ysrc[x] - 128) % h;
126  int X = (x + xsrc[x] - 128) % w;
127 
128  if (Y < 0)
129  Y += h;
130  if (X < 0)
131  X += w;
132  dst[x] = src[Y * slinesize + X];
133  }
134  break;
135  case EDGE_MIRROR:
136  for (x = 0; x < w; x++) {
137  int Y = y + ysrc[x] - 128;
138  int X = x + xsrc[x] - 128;
139 
140  if (Y < 0)
141  Y = (-Y) % h;
142  if (X < 0)
143  X = (-X) % w;
144  if (Y >= h)
145  Y = h - (Y % h) - 1;
146  if (X >= w)
147  X = w - (X % w) - 1;
148  dst[x] = src[Y * slinesize + X];
149  }
150  break;
151  }
152 
153  ysrc += ylinesize;
154  xsrc += xlinesize;
155  dst += dlinesize;
156  }
157  }
158 }
159 
161  const AVFrame *xpic, const AVFrame *ypic,
162  AVFrame *out)
163 {
164  const int step = s->step;
165  const int h = s->height[0];
166  const int w = s->width[0];
167  const int dlinesize = out->linesize[0];
168  const int slinesize = in->linesize[0];
169  const int xlinesize = xpic->linesize[0];
170  const int ylinesize = ypic->linesize[0];
171  const uint8_t *src = in->data[0];
172  const uint8_t *ysrc = ypic->data[0];
173  const uint8_t *xsrc = xpic->data[0];
174  const uint8_t *blank = s->blank;
175  uint8_t *dst = out->data[0];
176  int c, x, y;
177 
178  for (y = 0; y < h; y++) {
179  switch (s->edge) {
180  case EDGE_BLANK:
181  for (x = 0; x < w; x++) {
182  for (c = 0; c < s->nb_components; c++) {
183  int Y = y + (ysrc[x * step + c] - 128);
184  int X = x + (xsrc[x * step + c] - 128);
185 
186  if (Y < 0 || Y >= h || X < 0 || X >= w)
187  dst[x * step + c] = blank[c];
188  else
189  dst[x * step + c] = src[Y * slinesize + X * step + c];
190  }
191  }
192  break;
193  case EDGE_SMEAR:
194  for (x = 0; x < w; x++) {
195  for (c = 0; c < s->nb_components; c++) {
196  int Y = av_clip(y + (ysrc[x * step + c] - 128), 0, h - 1);
197  int X = av_clip(x + (xsrc[x * step + c] - 128), 0, w - 1);
198 
199  dst[x * step + c] = src[Y * slinesize + X * step + c];
200  }
201  }
202  break;
203  case EDGE_WRAP:
204  for (x = 0; x < w; x++) {
205  for (c = 0; c < s->nb_components; c++) {
206  int Y = (y + (ysrc[x * step + c] - 128)) % h;
207  int X = (x + (xsrc[x * step + c] - 128)) % w;
208 
209  if (Y < 0)
210  Y += h;
211  if (X < 0)
212  X += w;
213  dst[x * step + c] = src[Y * slinesize + X * step + c];
214  }
215  }
216  break;
217  case EDGE_MIRROR:
218  for (x = 0; x < w; x++) {
219  for (c = 0; c < s->nb_components; c++) {
220  int Y = y + ysrc[x * step + c] - 128;
221  int X = x + xsrc[x * step + c] - 128;
222 
223  if (Y < 0)
224  Y = (-Y) % h;
225  if (X < 0)
226  X = (-X) % w;
227  if (Y >= h)
228  Y = h - (Y % h) - 1;
229  if (X >= w)
230  X = w - (X % w) - 1;
231  dst[x * step + c] = src[Y * slinesize + X * step + c];
232  }
233  }
234  break;
235  }
236 
237  ysrc += ylinesize;
238  xsrc += xlinesize;
239  dst += dlinesize;
240  }
241 }
242 
244 {
245  AVFilterContext *ctx = fs->parent;
246  DisplaceContext *s = fs->opaque;
247  AVFilterLink *outlink = ctx->outputs[0];
248  AVFrame *out, *in, *xpic, *ypic;
249  int ret;
250 
251  if ((ret = ff_framesync_get_frame(&s->fs, 0, &in, 0)) < 0 ||
252  (ret = ff_framesync_get_frame(&s->fs, 1, &xpic, 0)) < 0 ||
253  (ret = ff_framesync_get_frame(&s->fs, 2, &ypic, 0)) < 0)
254  return ret;
255 
256  if (ctx->is_disabled) {
257  out = av_frame_clone(in);
258  if (!out)
259  return AVERROR(ENOMEM);
260  } else {
261  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
262  if (!out)
263  return AVERROR(ENOMEM);
265 
266  s->displace(s, in, xpic, ypic, out);
267  }
268  out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
269 
270  return ff_filter_frame(outlink, out);
271 }
272 
273 static int config_input(AVFilterLink *inlink)
274 {
275  AVFilterContext *ctx = inlink->dst;
276  DisplaceContext *s = ctx->priv;
278  int vsub, hsub;
279 
280  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
281  s->nb_components = desc->nb_components;
282 
283  if (s->nb_planes > 1 || s->nb_components == 1)
284  s->displace = displace_planar;
285  else
286  s->displace = displace_packed;
287 
288  if (!(desc->flags & AV_PIX_FMT_FLAG_RGB)) {
289  s->blank[1] = s->blank[2] = 128;
290  s->blank[0] = 16;
291  }
292 
293  s->step = av_get_padded_bits_per_pixel(desc) >> 3;
294  hsub = desc->log2_chroma_w;
295  vsub = desc->log2_chroma_h;
296  s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
297  s->height[0] = s->height[3] = inlink->h;
298  s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
299  s->width[0] = s->width[3] = inlink->w;
300 
301  return 0;
302 }
303 
304 static int config_output(AVFilterLink *outlink)
305 {
306  AVFilterContext *ctx = outlink->src;
307  DisplaceContext *s = ctx->priv;
308  AVFilterLink *srclink = ctx->inputs[0];
309  AVFilterLink *xlink = ctx->inputs[1];
310  AVFilterLink *ylink = ctx->inputs[2];
311  FFFrameSyncIn *in;
312  int ret;
313 
314  if (srclink->format != xlink->format ||
315  srclink->format != ylink->format) {
316  av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
317  return AVERROR(EINVAL);
318  }
319  if (srclink->w != xlink->w ||
320  srclink->h != xlink->h ||
321  srclink->w != ylink->w ||
322  srclink->h != ylink->h) {
323  av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
324  "(size %dx%d) do not match the corresponding "
325  "second input link %s parameters (%dx%d) "
326  "and/or third input link %s parameters (%dx%d)\n",
327  ctx->input_pads[0].name, srclink->w, srclink->h,
328  ctx->input_pads[1].name, xlink->w, xlink->h,
329  ctx->input_pads[2].name, ylink->w, ylink->h);
330  return AVERROR(EINVAL);
331  }
332 
333  outlink->w = srclink->w;
334  outlink->h = srclink->h;
335  outlink->sample_aspect_ratio = srclink->sample_aspect_ratio;
336  outlink->frame_rate = srclink->frame_rate;
337 
338  ret = ff_framesync_init(&s->fs, ctx, 3);
339  if (ret < 0)
340  return ret;
341 
342  in = s->fs.in;
343  in[0].time_base = srclink->time_base;
344  in[1].time_base = xlink->time_base;
345  in[2].time_base = ylink->time_base;
346  in[0].sync = 2;
347  in[0].before = EXT_STOP;
348  in[0].after = EXT_STOP;
349  in[1].sync = 1;
350  in[1].before = EXT_NULL;
351  in[1].after = EXT_INFINITY;
352  in[2].sync = 1;
353  in[2].before = EXT_NULL;
354  in[2].after = EXT_INFINITY;
355  s->fs.opaque = s;
356  s->fs.on_event = process_frame;
357 
358  ret = ff_framesync_configure(&s->fs);
359  outlink->time_base = s->fs.time_base;
360 
361  return ret;
362 }
363 
365 {
366  DisplaceContext *s = ctx->priv;
367  return ff_framesync_activate(&s->fs);
368 }
369 
371 {
372  DisplaceContext *s = ctx->priv;
373 
374  ff_framesync_uninit(&s->fs);
375 }
376 
377 static const AVFilterPad displace_inputs[] = {
378  {
379  .name = "source",
380  .type = AVMEDIA_TYPE_VIDEO,
381  .config_props = config_input,
382  },
383  {
384  .name = "xmap",
385  .type = AVMEDIA_TYPE_VIDEO,
386  },
387  {
388  .name = "ymap",
389  .type = AVMEDIA_TYPE_VIDEO,
390  },
391  { NULL }
392 };
393 
394 static const AVFilterPad displace_outputs[] = {
395  {
396  .name = "default",
397  .type = AVMEDIA_TYPE_VIDEO,
398  .config_props = config_output,
399  },
400  { NULL }
401 };
402 
404  .name = "displace",
405  .description = NULL_IF_CONFIG_SMALL("Displace pixels."),
406  .priv_size = sizeof(DisplaceContext),
407  .uninit = uninit,
409  .activate = activate,
412  .priv_class = &displace_class,
414 };
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
Main libavfilter public API header.
#define Y
Definition: boxblur.h:38
#define flags(name, subs,...)
Definition: cbs_av1.c:572
#define s(width, name)
Definition: cbs_vp9.c:257
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
#define av_clip
Definition: common.h:122
#define NULL
Definition: coverity.c:32
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:124
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:341
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, unsigned get)
Get the current frame in an input.
Definition: framesync.c:253
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:290
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
Initialize a frame sync structure.
Definition: framesync.c:84
@ EXT_STOP
Completely stop all streams with this one.
Definition: framesync.h:65
@ EXT_NULL
Ignore this stream and continue processing the other ones.
Definition: framesync.h:70
@ EXT_INFINITY
Extend the frame to infinity.
Definition: framesync.h:75
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
@ AV_OPT_TYPE_INT
Definition: opt.h:225
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:134
#define AVERROR(e)
Definition: error.h:43
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
misc image utilities
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
const char * desc
Definition: libsvtav1.c:79
uint8_t w
Definition: llviddspenc.c:39
AVOptions.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
Definition: pixdesc.c:2538
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:148
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:239
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:237
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
typedef void(RENAME(mix_any_func_type))
Describe the class of an AVClass context structure.
Definition: log.h:67
An instance of a filter.
Definition: avfilter.h:341
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
AVOption.
Definition: opt.h:248
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
FFFrameSync fs
Definition: vf_displace.c:46
uint8_t blank[4]
Definition: vf_displace.c:45
void(* displace)(struct DisplaceContext *s, const AVFrame *in, const AVFrame *xpic, const AVFrame *ypic, AVFrame *out)
Definition: vf_displace.c:48
enum EdgeMode edge
Definition: vf_displace.c:41
Input stream structure.
Definition: framesync.h:81
Frame sync structure.
Definition: framesync.h:146
#define av_log(a,...)
#define src
Definition: vp8dsp.c:255
FILE * out
Definition: movenc.c:54
AVFormatContext * ctx
Definition: movenc.c:48
@ X
Definition: vf_addroi.c:26
static void displace_planar(DisplaceContext *s, const AVFrame *in, const AVFrame *xpic, const AVFrame *ypic, AVFrame *out)
Definition: vf_displace.c:84
static const AVFilterPad displace_inputs[]
Definition: vf_displace.c:377
AVFILTER_DEFINE_CLASS(displace)
static int query_formats(AVFilterContext *ctx)
Definition: vf_displace.c:66
static int config_input(AVFilterLink *inlink)
Definition: vf_displace.c:273
#define FLAGS
Definition: vf_displace.c:53
AVFilter ff_vf_displace
Definition: vf_displace.c:403
static const AVFilterPad displace_outputs[]
Definition: vf_displace.c:394
static void displace_packed(DisplaceContext *s, const AVFrame *in, const AVFrame *xpic, const AVFrame *ypic, AVFrame *out)
Definition: vf_displace.c:160
EdgeMode
Definition: vf_displace.c:30
@ EDGE_BLANK
Definition: vf_displace.c:31
@ EDGE_MIRROR
Definition: vf_displace.c:34
@ EDGE_SMEAR
Definition: vf_displace.c:32
@ EDGE_NB
Definition: vf_displace.c:35
@ EDGE_WRAP
Definition: vf_displace.c:33
static int activate(AVFilterContext *ctx)
Definition: vf_displace.c:364
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_displace.c:370
#define OFFSET(x)
Definition: vf_displace.c:52
static int config_output(AVFilterLink *outlink)
Definition: vf_displace.c:304
static const AVOption displace_options[]
Definition: vf_displace.c:55
static int process_frame(FFFrameSync *fs)
Definition: vf_displace.c:243
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:75
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:104
static double c[64]