FFmpeg  4.4.5
vf_estdif.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2021 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/common.h"
22 #include "libavutil/imgutils.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/pixdesc.h"
25 #include "avfilter.h"
26 #include "formats.h"
27 #include "internal.h"
28 #include "video.h"
29 
30 typedef struct ESTDIFContext {
31  const AVClass *class;
32 
33  int mode; ///< 0 is frame, 1 is field
34  int parity; ///< frame field parity
35  int deint; ///< which frames to deinterlace
36  int rslope; ///< best edge slope search radius
37  int redge; ///< best edge match search radius
38  int interp; ///< type of interpolation
39  int linesize[4]; ///< bytes of pixel data per line for each plane
40  int planewidth[4]; ///< width of each plane
41  int planeheight[4]; ///< height of each plane
42  int field; ///< which field are we on, 0 or 1
43  int eof;
44  int depth;
45  int half;
46  int nb_planes;
50 
52  const uint8_t *prev_line, const uint8_t *next_line,
53  const uint8_t *prev2_line, const uint8_t *next2_line,
54  const uint8_t *prev3_line, const uint8_t *next3_line,
55  int x, int width, int rslope, int redge, unsigned half,
56  int depth, int *K);
57 
58  unsigned (*mid_8[3])(const uint8_t *const prev,
59  const uint8_t *const next,
60  const uint8_t *const prev2,
61  const uint8_t *const next2,
62  const uint8_t *const prev3,
63  const uint8_t *const next3,
64  int end, int x, int k, int depth);
65 
66  unsigned (*mid_16[3])(const uint16_t *const prev,
67  const uint16_t *const next,
68  const uint16_t *const prev2,
69  const uint16_t *const next2,
70  const uint16_t *const prev3,
71  const uint16_t *const next3,
72  int end, int x, int k, int depth);
74 
75 #define MAX_R 15
76 #define S (MAX_R * 2 + 1)
77 
78 #define OFFSET(x) offsetof(ESTDIFContext, x)
79 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
80 #define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit }
81 
82 static const AVOption estdif_options[] = {
83  { "mode", "specify the mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "mode" },
84  CONST("frame", "send one frame for each frame", 0, "mode"),
85  CONST("field", "send one frame for each field", 1, "mode"),
86  { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=-1}, -1, 1, FLAGS, "parity" },
87  CONST("tff", "assume top field first", 0, "parity"),
88  CONST("bff", "assume bottom field first", 1, "parity"),
89  CONST("auto", "auto detect parity", -1, "parity"),
90  { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "deint" },
91  CONST("all", "deinterlace all frames", 0, "deint"),
92  CONST("interlaced", "only deinterlace frames marked as interlaced", 1, "deint"),
93  { "rslope", "specify the search radius for edge slope tracing", OFFSET(rslope), AV_OPT_TYPE_INT, {.i64=1}, 1, MAX_R, FLAGS, },
94  { "redge", "specify the search radius for best edge matching", OFFSET(redge), AV_OPT_TYPE_INT, {.i64=2}, 0, MAX_R, FLAGS, },
95  { "interp", "specify the type of interpolation", OFFSET(interp), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, FLAGS, "interp" },
96  CONST("2p", "two-point interpolation", 0, "interp"),
97  CONST("4p", "four-point interpolation", 1, "interp"),
98  CONST("6p", "six-point interpolation", 2, "interp"),
99  { NULL }
100 };
101 
103 
105 {
106  static const enum AVPixelFormat pix_fmts[] = {
130  };
131 
133  if (!fmts_list)
134  return AVERROR(ENOMEM);
135  return ff_set_common_formats(ctx, fmts_list);
136 }
137 
138 static int config_output(AVFilterLink *outlink)
139 {
140  AVFilterContext *ctx = outlink->src;
141  AVFilterLink *inlink = ctx->inputs[0];
142 
143  outlink->time_base.num = inlink->time_base.num;
144  outlink->time_base.den = inlink->time_base.den * 2;
145  outlink->frame_rate.num = inlink->frame_rate.num * 2;
146  outlink->frame_rate.den = inlink->frame_rate.den;
147 
148  return 0;
149 }
150 
151 typedef struct ThreadData {
152  AVFrame *out, *in;
153 } ThreadData;
154 
155 #define MIDL(type, ss) \
156 static unsigned midl_##ss(const type *const prev, \
157  const type *const next, \
158  int end, int x, int k) \
159 { \
160  return (prev[av_clip(x + k, 0, end)] + \
161  next[av_clip(x - k, 0, end)] + 1) >> 1; \
162 }
163 
164 MIDL(uint8_t, 8)
165 MIDL(uint16_t, 16)
166 
167 #define MID2(type, ss) \
168 static unsigned mid2_##ss(const type *const prev, \
169  const type *const next, \
170  const type *const prev2, \
171  const type *const next2, \
172  const type *const prev3, \
173  const type *const next3, \
174  int end, int x, int k, int depth) \
175 { \
176  return (prev[av_clip(x + k, 0, end)] + \
177  next[av_clip(x - k, 0, end)] + 1) >> 1; \
178 }
179 
180 MID2(uint8_t, 8)
181 MID2(uint16_t, 16)
182 
183 #define MID4(type, ss) \
184 static unsigned mid4_##ss(const type *const prev, \
185  const type *const next, \
186  const type *const prev2, \
187  const type *const next2, \
188  const type *const prev3, \
189  const type *const next3, \
190  int end, int x, int k, int depth) \
191 { \
192  return av_clip_uintp2_c(( \
193  9 * (prev[av_clip(x + k, 0, end)] + \
194  next[av_clip(x - k, 0, end)]) - \
195  1 * (prev2[av_clip(x + k*3, 0, end)] + \
196  next2[av_clip(x - k*3, 0, end)]) + 8) >> 4, \
197  depth); \
198 }
199 
200 MID4(uint8_t, 8)
201 MID4(uint16_t, 16)
202 
203 #define MID6(type, ss) \
204 static unsigned mid6_##ss(const type *const prev, \
205  const type *const next, \
206  const type *const prev2, \
207  const type *const next2, \
208  const type *const prev3, \
209  const type *const next3, \
210  int end, int x, int k, int depth) \
211 { \
212  return av_clip_uintp2_c(( \
213  20 * (prev[av_clip(x + k, 0, end)] + \
214  next[av_clip(x - k, 0, end)]) - \
215  5 * (prev2[av_clip(x + k*3, 0, end)] + \
216  next2[av_clip(x - k*3, 0, end)]) + \
217  1 * (prev3[av_clip(x + k*5, 0, end)] + \
218  next3[av_clip(x - k*5, 0, end)]) + 16) >> 5, \
219  depth); \
220 }
221 
222 MID6(uint8_t, 8)
223 MID6(uint16_t, 16)
224 
225 #define DIFF(type, ss) \
226 static unsigned diff_##ss(const type *const prev, \
227  const type *const next, \
228  int end, int x, int k, int j) \
229 { \
230  return FFABS(prev[av_clip(x + k + j, 0, end)] - \
231  next[av_clip(x - k + j, 0, end)]); \
232 }
233 
234 DIFF(uint8_t, 8)
235 DIFF(uint16_t, 16)
236 
237 #define COST(type, ss) \
238 static unsigned cost_##ss(const type *const prev, \
239  const type *const next, \
240  int end, int x, int k) \
241 { \
242  const int m = midl_##ss(prev, next, end, x, k); \
243  const int p = prev[x]; \
244  const int n = next[x]; \
245  \
246  return FFABS(p - m) + FFABS(n - m); \
247 }
248 
249 COST(uint8_t, 8)
250 COST(uint16_t, 16)
251 
252 #define INTERPOLATE(type, atype, max, ss) \
253 static void interpolate_##ss(ESTDIFContext *s, uint8_t *ddst, \
254  const uint8_t *const pprev_line, \
255  const uint8_t *const nnext_line, \
256  const uint8_t *const pprev2_line, \
257  const uint8_t *const nnext2_line, \
258  const uint8_t *const pprev3_line, \
259  const uint8_t *const nnext3_line, \
260  int x, int width, int rslope, \
261  int redge, unsigned h, int depth, \
262  int *K) \
263 { \
264  type *dst = (type *)ddst; \
265  const type *const prev_line = (const type *const)pprev_line; \
266  const type *const prev2_line = (const type *const)pprev2_line; \
267  const type *const prev3_line = (const type *const)pprev3_line; \
268  const type *const next_line = (const type *const)nnext_line; \
269  const type *const next2_line = (const type *const)nnext2_line; \
270  const type *const next3_line = (const type *const)nnext3_line; \
271  const int interp = s->interp; \
272  const int end = width - 1; \
273  const atype f = redge + 2; \
274  atype sd[S], sD[S], di = 0; \
275  atype dmin = max; \
276  int k = *K; \
277  \
278  for (int i = -rslope; i <= rslope && abs(k) > rslope; i++) { \
279  atype sum = 0; \
280  \
281  for (int j = -redge; j <= redge; j++) { \
282  sum += diff_##ss(prev_line, next_line, end, x, i, j); \
283  sum += diff_##ss(prev2_line, prev_line, end, x, i, j); \
284  sum += diff_##ss(next_line, next2_line, end, x, i, j); \
285  } \
286  \
287  sD[i + rslope] = sum; \
288  sD[i + rslope] += f * cost_##ss(prev_line, next_line, end, x, i); \
289  sD[i + rslope] += h * abs(i); \
290  \
291  dmin = FFMIN(sD[i + rslope], dmin); \
292  } \
293  \
294  for (int i = -rslope; i <= rslope; i++) { \
295  atype sum = 0; \
296  \
297  for (int j = -redge; j <= redge; j++) { \
298  sum += diff_##ss(prev_line, next_line, end, x, k + i, j); \
299  sum += diff_##ss(prev2_line, prev_line, end, x, k + i, j); \
300  sum += diff_##ss(next_line, next2_line, end, x, k + i, j); \
301  } \
302  \
303  sd[i + rslope] = sum; \
304  sd[i + rslope] += f * cost_##ss(prev_line, next_line, end, x, k + i); \
305  sd[i + rslope] += h * abs(k + i); \
306  \
307  dmin = FFMIN(sd[i + rslope], dmin); \
308  } \
309  \
310  for (int i = -rslope; i <= rslope && abs(k) > rslope; i++) { \
311  if (dmin == sD[i + rslope]) { \
312  di = 1; \
313  k = i; \
314  break; \
315  } \
316  } \
317  \
318  for (int i = -rslope; i <= rslope && !di; i++) { \
319  if (dmin == sd[i + rslope]) { \
320  k += i; \
321  break; \
322  } \
323  } \
324  \
325  dst[x] = s->mid_##ss[interp](prev_line, next_line, \
326  prev2_line, next2_line, \
327  prev3_line, next3_line, \
328  end, x, k, depth); \
329  \
330  *K = k; \
331 }
332 
333 INTERPOLATE(uint8_t, unsigned, UINT_MAX, 8)
334 INTERPOLATE(uint16_t, uint64_t, UINT64_MAX, 16)
335 
337  int jobnr, int nb_jobs)
338 {
339  ESTDIFContext *s = ctx->priv;
340  ThreadData *td = arg;
341  AVFrame *out = td->out;
342  AVFrame *in = td->in;
343  const int rslope = s->rslope;
344  const int redge = s->redge;
345  const int half = s->half;
346  const int depth = s->depth;
347  const int interlaced = in->interlaced_frame;
348  const int tff = (s->field == (s->parity == -1 ? interlaced ? in->top_field_first : 1 :
349  s->parity ^ 1));
350 
351  for (int plane = 0; plane < s->nb_planes; plane++) {
352  const uint8_t *src_data = in->data[plane];
353  uint8_t *dst_data = out->data[plane];
354  const int linesize = s->linesize[plane];
355  const int width = s->planewidth[plane];
356  const int height = s->planeheight[plane];
357  const int src_linesize = in->linesize[plane];
358  const int dst_linesize = out->linesize[plane];
359  const int start = (height * jobnr) / nb_jobs;
360  const int end = (height * (jobnr+1)) / nb_jobs;
361  const uint8_t *prev_line, *prev2_line, *next_line, *next2_line, *in_line;
362  const uint8_t *prev3_line, *next3_line;
363  uint8_t *out_line;
364  int y_out;
365 
366  y_out = start + (tff ^ (start & 1));
367 
368  in_line = src_data + (y_out * src_linesize);
369  out_line = dst_data + (y_out * dst_linesize);
370 
371  while (y_out < end) {
372  memcpy(out_line, in_line, linesize);
373  y_out += 2;
374  in_line += src_linesize * 2;
375  out_line += dst_linesize * 2;
376  }
377 
378  y_out = start + ((!tff) ^ (start & 1));
379  out_line = dst_data + (y_out * dst_linesize);
380 
381  for (int y = y_out; y < end; y += 2) {
382  int y_prev3_in = y - 5;
383  int y_next3_in = y + 5;
384  int y_prev2_in = y - 3;
385  int y_next2_in = y + 3;
386  int y_prev_in = y - 1;
387  int y_next_in = y + 1;
388  int k;
389 
390  while (y_prev3_in < 0)
391  y_prev3_in += 2;
392 
393  while (y_next3_in >= height)
394  y_next3_in -= 2;
395 
396  while (y_prev2_in < 0)
397  y_prev2_in += 2;
398 
399  while (y_next2_in >= height)
400  y_next2_in -= 2;
401 
402  while (y_prev_in < 0)
403  y_prev_in += 2;
404 
405  while (y_next_in >= height)
406  y_next_in -= 2;
407 
408  prev3_line = src_data + (y_prev3_in * src_linesize);
409  next3_line = src_data + (y_next3_in * src_linesize);
410 
411  prev2_line = src_data + (y_prev2_in * src_linesize);
412  next2_line = src_data + (y_next2_in * src_linesize);
413 
414  prev_line = src_data + (y_prev_in * src_linesize);
415  next_line = src_data + (y_next_in * src_linesize);
416 
417  k = 0;
418 
419  for (int x = 0; x < width; x++) {
420  s->interpolate(s, out_line,
421  prev_line, next_line,
422  prev2_line, next2_line,
423  prev3_line, next3_line,
424  x, width, rslope, redge, half, depth, &k);
425  }
426 
427  out_line += 2 * dst_linesize;
428  }
429  }
430 
431  return 0;
432 }
433 
434 static int filter(AVFilterContext *ctx, int is_second, AVFrame *in)
435 {
436  ESTDIFContext *s = ctx->priv;
437  AVFilterLink *outlink = ctx->outputs[0];
438  AVFrame *out;
439  ThreadData td;
440 
441  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
442  if (!out)
443  return AVERROR(ENOMEM);
445  out->interlaced_frame = 0;
446  out->pts = s->pts;
447 
448  td.out = out; td.in = in;
449  ctx->internal->execute(ctx, deinterlace_slice, &td, NULL,
450  FFMIN(s->planeheight[1] / 2, s->nb_threads));
451 
452  if (s->mode)
453  s->field = !s->field;
454 
455  return ff_filter_frame(outlink, out);
456 }
457 
458 static int config_input(AVFilterLink *inlink)
459 {
460  AVFilterContext *ctx = inlink->dst;
461  ESTDIFContext *s = ctx->priv;
463  int ret;
464 
465  if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
466  return ret;
467 
468  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
469  s->planeheight[0] = s->planeheight[3] = inlink->h;
470  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
471  s->planewidth[0] = s->planewidth[3] = inlink->w;
472 
473  if (inlink->h < 3) {
474  av_log(ctx, AV_LOG_ERROR, "Video of less than 3 lines is not supported\n");
475  return AVERROR(EINVAL);
476  }
477 
478  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
479  s->nb_threads = ff_filter_get_nb_threads(ctx);
480  s->depth = desc->comp[0].depth;
481  s->interpolate = s->depth <= 8 ? interpolate_8 : interpolate_16;
482  s->mid_8[0] = mid2_8;
483  s->mid_8[1] = mid4_8;
484  s->mid_8[2] = mid6_8;
485  s->mid_16[0] = mid2_16;
486  s->mid_16[1] = mid4_16;
487  s->mid_16[2] = mid6_16;
488  s->half = 1 << (s->depth - 1);
489 
490  return 0;
491 }
492  static int filter_frame(AVFilterLink *inlink, AVFrame *in)
493 {
494  AVFilterContext *ctx = inlink->dst;
495  ESTDIFContext *s = ctx->priv;
496  int ret;
497 
498  if (!s->prev) {
499  s->prev = in;
500  return 0;
501  }
502 
503  if ((s->deint && !in->interlaced_frame) || ctx->is_disabled) {
504  s->prev->pts *= 2;
505  ret = ff_filter_frame(ctx->outputs[0], s->prev);
506  s->prev = in;
507  return ret;
508  }
509 
510  s->pts = s->prev->pts * 2;
511  ret = filter(ctx, 0, s->prev);
512  if (ret < 0 || s->mode == 0) {
513  av_frame_free(&s->prev);
514  s->prev = in;
515  return ret;
516  }
517 
518  s->pts = s->prev->pts + in->pts;
519  ret = filter(ctx, 1, s->prev);
520  av_frame_free(&s->prev);
521  s->prev = in;
522  return ret;
523 }
524 
525 static int request_frame(AVFilterLink *link)
526 {
527  AVFilterContext *ctx = link->src;
528  ESTDIFContext *s = ctx->priv;
529  int ret;
530 
531  if (s->eof)
532  return AVERROR_EOF;
533 
534  ret = ff_request_frame(ctx->inputs[0]);
535 
536  if (ret == AVERROR_EOF && s->prev) {
537  AVFrame *next = av_frame_clone(s->prev);
538 
539  if (!next)
540  return AVERROR(ENOMEM);
541 
542  next->pts = s->prev->pts + av_rescale_q(1, av_inv_q(ctx->outputs[0]->frame_rate),
543  ctx->outputs[0]->time_base);
544  s->eof = 1;
545  ret = filter_frame(ctx->inputs[0], next);
546  } else if (ret < 0) {
547  return ret;
548  }
549 
550  return ret;
551 }
552 
554 {
555  ESTDIFContext *s = ctx->priv;
556 
557  av_frame_free(&s->prev);
558 }
559 
560 static const AVFilterPad estdif_inputs[] = {
561  {
562  .name = "default",
563  .type = AVMEDIA_TYPE_VIDEO,
564  .filter_frame = filter_frame,
565  .config_props = config_input,
566  },
567  { NULL }
568 };
569 
570 static const AVFilterPad estdif_outputs[] = {
571  {
572  .name = "default",
573  .type = AVMEDIA_TYPE_VIDEO,
574  .config_props = config_output,
575  .request_frame = request_frame,
576  },
577  { NULL }
578 };
579 
581  .name = "estdif",
582  .description = NULL_IF_CONFIG_SMALL("Apply Edge Slope Tracing deinterlace."),
583  .priv_size = sizeof(ESTDIFContext),
584  .priv_class = &estdif_class,
585  .uninit = uninit,
591 };
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_acrusher.c:336
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:882
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:408
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
Main libavfilter public API header.
#define flags(name, subs,...)
Definition: cbs_av1.c:572
#define s(width, name)
Definition: cbs_vp9.c:257
common internal and external API header
#define FFMIN(a, b)
Definition: common.h:105
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
mode
Use these values in ebur128_init (or'ed).
Definition: ebur128.h:83
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
@ AV_OPT_TYPE_INT
Definition: opt.h:225
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:134
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AVERROR(e)
Definition: error.h:43
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
misc image utilities
const char * arg
Definition: jacosubdec.c:66
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
const char * desc
Definition: libsvtav1.c:79
static uint8_t half(int a, int b)
Definition: mobiclip.c:541
uint8_t interlaced
Definition: mxfenc.c:2208
AVOptions.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:420
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:410
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:398
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:405
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:379
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:421
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:414
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:397
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:438
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:441
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:436
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:434
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:415
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:381
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:396
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:433
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:437
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:407
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:439
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:408
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:380
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:382
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:411
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:401
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:419
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:443
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:442
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:418
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:409
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:435
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:417
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:440
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:412
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
#define td
Definition: regdef.h:70
typedef void(RENAME(mix_any_func_type))
Describe the class of an AVClass context structure.
Definition: log.h:67
An instance of a filter.
Definition: avfilter.h:341
A list of supported formats for one end of a filter link.
Definition: formats.h:65
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1699
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
AVOption.
Definition: opt.h:248
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int num
Numerator.
Definition: rational.h:59
int den
Denominator.
Definition: rational.h:60
int planewidth[4]
width of each plane
Definition: vf_estdif.c:40
int64_t pts
Definition: vf_estdif.c:48
int interp
type of interpolation
Definition: vf_estdif.c:38
void(* interpolate)(struct ESTDIFContext *s, uint8_t *dst, const uint8_t *prev_line, const uint8_t *next_line, const uint8_t *prev2_line, const uint8_t *next2_line, const uint8_t *prev3_line, const uint8_t *next3_line, int x, int width, int rslope, int redge, unsigned half, int depth, int *K)
Definition: vf_estdif.c:51
unsigned(* mid_16[3])(const uint16_t *const prev, const uint16_t *const next, const uint16_t *const prev2, const uint16_t *const next2, const uint16_t *const prev3, const uint16_t *const next3, int end, int x, int k, int depth)
Definition: vf_estdif.c:66
int mode
0 is frame, 1 is field
Definition: vf_estdif.c:33
AVFrame * prev
Definition: vf_estdif.c:49
int parity
frame field parity
Definition: vf_estdif.c:34
int nb_threads
Definition: vf_estdif.c:47
int field
which field are we on, 0 or 1
Definition: vf_estdif.c:42
int redge
best edge match search radius
Definition: vf_estdif.c:37
unsigned(* mid_8[3])(const uint8_t *const prev, const uint8_t *const next, const uint8_t *const prev2, const uint8_t *const next2, const uint8_t *const prev3, const uint8_t *const next3, int end, int x, int k, int depth)
Definition: vf_estdif.c:58
int linesize[4]
bytes of pixel data per line for each plane
Definition: vf_estdif.c:39
int rslope
best edge slope search radius
Definition: vf_estdif.c:36
int deint
which frames to deinterlace
Definition: vf_estdif.c:35
int planeheight[4]
height of each plane
Definition: vf_estdif.c:41
Used for passing data between threads.
Definition: dsddec.c:67
AVFrame * out
Definition: af_adeclick.c:502
AVFrame * in
Definition: af_adenorm.c:223
#define av_log(a,...)
FILE * out
Definition: movenc.c:54
AVFormatContext * ctx
Definition: movenc.c:48
#define height
#define width
static int deinterlace_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_estdif.c:336
static const AVFilterPad estdif_outputs[]
Definition: vf_estdif.c:570
#define CONST(name, help, val, unit)
Definition: vf_estdif.c:80
#define MID2(type, ss)
Definition: vf_estdif.c:167
AVFILTER_DEFINE_CLASS(estdif)
static int query_formats(AVFilterContext *ctx)
Definition: vf_estdif.c:104
static int config_input(AVFilterLink *inlink)
Definition: vf_estdif.c:458
#define MID4(type, ss)
Definition: vf_estdif.c:183
#define FLAGS
Definition: vf_estdif.c:79
static int request_frame(AVFilterLink *link)
Definition: vf_estdif.c:525
#define INTERPOLATE(type, atype, max, ss)
Definition: vf_estdif.c:252
static const AVOption estdif_options[]
Definition: vf_estdif.c:82
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_estdif.c:492
#define MIDL(type, ss)
Definition: vf_estdif.c:155
#define MID6(type, ss)
Definition: vf_estdif.c:203
AVFilter ff_vf_estdif
Definition: vf_estdif.c:580
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_estdif.c:553
#define DIFF(type, ss)
Definition: vf_estdif.c:225
#define OFFSET(x)
Definition: vf_estdif.c:78
static int config_output(AVFilterLink *outlink)
Definition: vf_estdif.c:138
static const AVFilterPad estdif_inputs[]
Definition: vf_estdif.c:560
#define MAX_R
Definition: vf_estdif.c:75
static int filter(AVFilterContext *ctx, int is_second, AVFrame *in)
Definition: vf_estdif.c:434
#define COST(type, ss)
Definition: vf_estdif.c:237
mcdeint parity
Definition: vf_mcdeint.c:277
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:104