FFmpeg  4.4.5
vf_photosensitivity.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019 Vladimir Panteleev
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/imgutils.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
26 #include "avfilter.h"
27 
28 #include "formats.h"
29 #include "internal.h"
30 #include "video.h"
31 
32 #define MAX_FRAMES 240
33 #define GRID_SIZE 8
34 #define NUM_CHANNELS 3
35 
36 typedef struct PhotosensitivityFrame {
39 
40 typedef struct PhotosensitivityContext {
41  const AVClass *class;
42 
43  int nb_frames;
44  int skip;
46  int bypass;
47 
49 
50  /* Circular buffer */
53 
57 
58 #define OFFSET(x) offsetof(PhotosensitivityContext, x)
59 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
60 
62  { "frames", "set how many frames to use", OFFSET(nb_frames), AV_OPT_TYPE_INT, {.i64=30}, 2, MAX_FRAMES, FLAGS },
63  { "f", "set how many frames to use", OFFSET(nb_frames), AV_OPT_TYPE_INT, {.i64=30}, 2, MAX_FRAMES, FLAGS },
64  { "threshold", "set detection threshold factor (lower is stricter)", OFFSET(threshold_multiplier), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0.1, FLT_MAX, FLAGS },
65  { "t", "set detection threshold factor (lower is stricter)", OFFSET(threshold_multiplier), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0.1, FLT_MAX, FLAGS },
66  { "skip", "set pixels to skip when sampling frames", OFFSET(skip), AV_OPT_TYPE_INT, {.i64=1}, 1, 1024, FLAGS },
67  { "bypass", "leave frames unchanged", OFFSET(bypass), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
68  { NULL }
69 };
70 
71 AVFILTER_DEFINE_CLASS(photosensitivity);
72 
74 {
75  static const enum AVPixelFormat pixel_fmts[] = {
79  };
81  if (!formats)
82  return AVERROR(ENOMEM);
84 }
85 
87 {
90  int skip;
92 
93 #define NUM_CELLS (GRID_SIZE * GRID_SIZE)
94 
95 static int convert_frame_partial(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
96 {
97  int cell, gx, gy, x0, x1, y0, y1, x, y, c, area;
98  int sum[NUM_CHANNELS];
99  const uint8_t *p;
100 
102 
103  const int slice_start = (NUM_CELLS * jobnr) / nb_jobs;
104  const int slice_end = (NUM_CELLS * (jobnr+1)) / nb_jobs;
105 
106  int width = td->in->width, height = td->in->height, linesize = td->in->linesize[0], skip = td->skip;
107  const uint8_t *data = td->in->data[0];
108 
109  for (cell = slice_start; cell < slice_end; cell++) {
110  gx = cell % GRID_SIZE;
111  gy = cell / GRID_SIZE;
112 
113  x0 = width * gx / GRID_SIZE;
114  x1 = width * (gx+1) / GRID_SIZE;
115  y0 = height * gy / GRID_SIZE;
116  y1 = height * (gy+1) / GRID_SIZE;
117 
118  for (c = 0; c < NUM_CHANNELS; c++) {
119  sum[c] = 0;
120  }
121  for (y = y0; y < y1; y += skip) {
122  p = data + y * linesize + x0 * NUM_CHANNELS;
123  for (x = x0; x < x1; x += skip) {
124  //av_log(NULL, AV_LOG_VERBOSE, "%d %d %d : (%d,%d) (%d,%d) -> %d,%d | *%d\n", c, gx, gy, x0, y0, x1, y1, x, y, (int)row);
125  sum[0] += p[0];
126  sum[1] += p[1];
127  sum[2] += p[2];
128  p += NUM_CHANNELS * skip;
129  // TODO: variable size
130  }
131  }
132 
133  area = ((x1 - x0 + skip - 1) / skip) * ((y1 - y0 + skip - 1) / skip);
134  for (c = 0; c < NUM_CHANNELS; c++) {
135  if (area)
136  sum[c] /= area;
137  td->out->grid[gy][gx][c] = sum[c];
138  }
139  }
140  return 0;
141 }
142 
144 {
146  td.in = in;
147  td.out = out;
148  td.skip = skip;
150 }
151 
153 {
156  uint16_t s_mul;
158 
159 static int blend_frame_partial(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
160 {
161  int x, y;
162  uint8_t *t, *s;
163 
165  const uint16_t s_mul = td->s_mul;
166  const uint16_t t_mul = 0x100 - s_mul;
167  const int slice_start = (td->target->height * jobnr) / nb_jobs;
168  const int slice_end = (td->target->height * (jobnr+1)) / nb_jobs;
169  const int linesize = td->target->linesize[0];
170 
171  for (y = slice_start; y < slice_end; y++) {
172  t = td->target->data[0] + y * td->target->linesize[0];
173  s = td->source->data[0] + y * td->source->linesize[0];
174  for (x = 0; x < linesize; x++) {
175  *t = (*t * t_mul + *s * s_mul) >> 8;
176  t++; s++;
177  }
178  }
179  return 0;
180 }
181 
182 static void blend_frame(AVFilterContext *ctx, AVFrame *target, AVFrame *source, float factor)
183 {
185  td.target = target;
186  td.source = source;
187  td.s_mul = (uint16_t)(factor * 0x100);
188  ctx->internal->execute(ctx, blend_frame_partial, &td, NULL, FFMIN(ctx->outputs[0]->h, ff_filter_get_nb_threads(ctx)));
189 }
190 
192 {
193  int badness, x, y, c;
194  badness = 0;
195  for (c = 0; c < NUM_CHANNELS; c++) {
196  for (y = 0; y < GRID_SIZE; y++) {
197  for (x = 0; x < GRID_SIZE; x++) {
198  badness += abs((int)a->grid[y][x][c] - (int)b->grid[y][x][c]);
199  //av_log(NULL, AV_LOG_VERBOSE, "%d - %d -> %d \n", a->grid[y][x], b->grid[y][x], badness);
200  //av_log(NULL, AV_LOG_VERBOSE, "%d -> %d \n", abs((int)a->grid[y][x] - (int)b->grid[y][x]), badness);
201  }
202  }
203  }
204  return badness;
205 }
206 
207 static int config_input(AVFilterLink *inlink)
208 {
209  /* const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); */
210  AVFilterContext *ctx = inlink->dst;
211  PhotosensitivityContext *s = ctx->priv;
212 
213  s->badness_threshold = (int)(GRID_SIZE * GRID_SIZE * 4 * 256 * s->nb_frames * s->threshold_multiplier / 128);
214 
215  return 0;
216 }
217 
218 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
219 {
220  int this_badness, current_badness, fixed_badness, new_badness, i, res;
222  AVFrame *src, *out;
223  int free_in = 0;
224  float factor;
225  AVDictionary **metadata;
226 
227  AVFilterContext *ctx = inlink->dst;
228  AVFilterLink *outlink = ctx->outputs[0];
229  PhotosensitivityContext *s = ctx->priv;
230 
231  /* weighted moving average */
232  current_badness = 0;
233  for (i = 1; i < s->nb_frames; i++)
234  current_badness += i * s->history[(s->history_pos + i) % s->nb_frames];
235  current_badness /= s->nb_frames;
236 
237  convert_frame(ctx, in, &ef, s->skip);
238  this_badness = get_badness(&ef, &s->last_frame_e);
239  new_badness = current_badness + this_badness;
240  av_log(s, AV_LOG_VERBOSE, "badness: %6d -> %6d / %6d (%3d%% - %s)\n",
241  current_badness, new_badness, s->badness_threshold,
242  100 * new_badness / s->badness_threshold, new_badness < s->badness_threshold ? "OK" : "EXCEEDED");
243 
244  fixed_badness = new_badness;
245  if (new_badness < s->badness_threshold || !s->last_frame_av || s->bypass) {
246  factor = 1; /* for metadata */
247  av_frame_free(&s->last_frame_av);
248  s->last_frame_av = src = in;
249  s->last_frame_e = ef;
250  s->history[s->history_pos] = this_badness;
251  } else {
252  factor = (float)(s->badness_threshold - current_badness) / (new_badness - current_badness);
253  if (factor <= 0) {
254  /* just duplicate the frame */
255  s->history[s->history_pos] = 0; /* frame was duplicated, thus, delta is zero */
256  } else {
257  res = av_frame_make_writable(s->last_frame_av);
258  if (res) {
259  av_frame_free(&in);
260  return res;
261  }
262  blend_frame(ctx, s->last_frame_av, in, factor);
263 
264  convert_frame(ctx, s->last_frame_av, &ef, s->skip);
265  this_badness = get_badness(&ef, &s->last_frame_e);
266  fixed_badness = current_badness + this_badness;
267  av_log(s, AV_LOG_VERBOSE, " fixed: %6d -> %6d / %6d (%3d%%) factor=%5.3f\n",
268  current_badness, fixed_badness, s->badness_threshold,
269  100 * new_badness / s->badness_threshold, factor);
270  s->last_frame_e = ef;
271  s->history[s->history_pos] = this_badness;
272  }
273  src = s->last_frame_av;
274  free_in = 1;
275  }
276  s->history_pos = (s->history_pos + 1) % s->nb_frames;
277 
278  out = ff_get_video_buffer(outlink, in->width, in->height);
279  if (!out) {
280  if (free_in == 1)
281  av_frame_free(&in);
282  return AVERROR(ENOMEM);
283  }
285  metadata = &out->metadata;
286  if (metadata) {
287  char value[128];
288 
289  snprintf(value, sizeof(value), "%f", (float)new_badness / s->badness_threshold);
290  av_dict_set(metadata, "lavfi.photosensitivity.badness", value, 0);
291 
292  snprintf(value, sizeof(value), "%f", (float)fixed_badness / s->badness_threshold);
293  av_dict_set(metadata, "lavfi.photosensitivity.fixed-badness", value, 0);
294 
295  snprintf(value, sizeof(value), "%f", (float)this_badness / s->badness_threshold);
296  av_dict_set(metadata, "lavfi.photosensitivity.frame-badness", value, 0);
297 
298  snprintf(value, sizeof(value), "%f", factor);
299  av_dict_set(metadata, "lavfi.photosensitivity.factor", value, 0);
300  }
302  if (free_in == 1)
303  av_frame_free(&in);
304  return ff_filter_frame(outlink, out);
305 }
306 
308 {
309  PhotosensitivityContext *s = ctx->priv;
310 
311  av_frame_free(&s->last_frame_av);
312 }
313 
314 static const AVFilterPad inputs[] = {
315  {
316  .name = "default",
317  .type = AVMEDIA_TYPE_VIDEO,
318  .filter_frame = filter_frame,
319  .config_props = config_input,
320  },
321  { NULL }
322 };
323 
324 static const AVFilterPad outputs[] = {
325  {
326  .name = "default",
327  .type = AVMEDIA_TYPE_VIDEO,
328  },
329  { NULL }
330 };
331 
333  .name = "photosensitivity",
334  .description = NULL_IF_CONFIG_SMALL("Filter out photosensitive epilepsy seizure-inducing flashes."),
335  .priv_size = sizeof(PhotosensitivityContext),
336  .priv_class = &photosensitivity_class,
337  .uninit = uninit,
339  .inputs = inputs,
340  .outputs = outputs,
341 };
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
Main libavfilter public API header.
#define s(width, name)
Definition: cbs_vp9.c:257
#define FFMIN(a, b)
Definition: common.h:105
#define NULL
Definition: coverity.c:32
#define abs(x)
Definition: cuda_runtime.h:35
double value
Definition: eval.c:98
int
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
@ AV_OPT_TYPE_INT
Definition: opt.h:225
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:611
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:799
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
misc image utilities
int i
Definition: input.c:407
const char * arg
Definition: jacosubdec.c:66
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2033
const char data[16]
Definition: mxf.c:142
AVOptions.
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
#define td
Definition: regdef.h:70
formats
Definition: signature.h:48
#define snprintf
Definition: snprintf.h:34
Describe the class of an AVClass context structure.
Definition: log.h:67
An instance of a filter.
Definition: avfilter.h:341
A list of supported formats for one end of a filter link.
Definition: formats.h:65
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1699
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
AVOption.
Definition: opt.h:248
PhotosensitivityFrame last_frame_e
uint8_t grid[GRID_SIZE][GRID_SIZE][4]
PhotosensitivityFrame * out
In the ELBG jargon, a cell is the set of points that are closest to a codebook entry.
Definition: elbg.c:39
#define av_log(a,...)
#define src
Definition: vp8dsp.c:255
FILE * out
Definition: movenc.c:54
AVFormatContext * ctx
Definition: movenc.c:48
#define height
#define width
const char * b
Definition: vf_curves.c:118
#define GRID_SIZE
static const AVOption photosensitivity_options[]
static int convert_frame_partial(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static void blend_frame(AVFilterContext *ctx, AVFrame *target, AVFrame *source, float factor)
#define NUM_CELLS
#define MAX_FRAMES
static int query_formats(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink)
static int blend_frame_partial(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define FLAGS
static const AVFilterPad inputs[]
static int get_badness(PhotosensitivityFrame *a, PhotosensitivityFrame *b)
static const AVFilterPad outputs[]
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFILTER_DEFINE_CLASS(photosensitivity)
static void convert_frame(AVFilterContext *ctx, AVFrame *in, PhotosensitivityFrame *out, int skip)
static av_cold void uninit(AVFilterContext *ctx)
#define OFFSET(x)
AVFilter ff_vf_photosensitivity
#define NUM_CHANNELS
static const int factor[16]
Definition: vf_pp7.c:77
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:104
static double c[64]