FFmpeg  4.4.5
af_afade.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2015 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * fade audio filter
24  */
25 
26 #include "libavutil/opt.h"
27 #include "audio.h"
28 #include "avfilter.h"
29 #include "filters.h"
30 #include "internal.h"
31 
32 typedef struct AudioFadeContext {
33  const AVClass *class;
34  int type;
35  int curve, curve2;
40  int overlap;
41  int cf0_eof;
44 
45  void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
46  int nb_samples, int channels, int direction,
47  int64_t start, int64_t range, int curve);
48  void (*crossfade_samples)(uint8_t **dst, uint8_t * const *cf0,
49  uint8_t * const *cf1,
50  int nb_samples, int channels,
51  int curve0, int curve1);
53 
55 
56 #define OFFSET(x) offsetof(AudioFadeContext, x)
57 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
58 #define TFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
59 
61 {
64  static const enum AVSampleFormat sample_fmts[] = {
70  };
71  int ret;
72 
74  if (!layouts)
75  return AVERROR(ENOMEM);
77  if (ret < 0)
78  return ret;
79 
81  if (!formats)
82  return AVERROR(ENOMEM);
84  if (ret < 0)
85  return ret;
86 
88  if (!formats)
89  return AVERROR(ENOMEM);
91 }
92 
93 static double fade_gain(int curve, int64_t index, int64_t range)
94 {
95 #define CUBE(a) ((a)*(a)*(a))
96  double gain;
97 
98  gain = av_clipd(1.0 * index / range, 0, 1.0);
99 
100  switch (curve) {
101  case QSIN:
102  gain = sin(gain * M_PI / 2.0);
103  break;
104  case IQSIN:
105  /* 0.6... = 2 / M_PI */
106  gain = 0.6366197723675814 * asin(gain);
107  break;
108  case ESIN:
109  gain = 1.0 - cos(M_PI / 4.0 * (CUBE(2.0*gain - 1) + 1));
110  break;
111  case HSIN:
112  gain = (1.0 - cos(gain * M_PI)) / 2.0;
113  break;
114  case IHSIN:
115  /* 0.3... = 1 / M_PI */
116  gain = 0.3183098861837907 * acos(1 - 2 * gain);
117  break;
118  case EXP:
119  /* -11.5... = 5*ln(0.1) */
120  gain = exp(-11.512925464970227 * (1 - gain));
121  break;
122  case LOG:
123  gain = av_clipd(1 + 0.2 * log10(gain), 0, 1.0);
124  break;
125  case PAR:
126  gain = 1 - sqrt(1 - gain);
127  break;
128  case IPAR:
129  gain = (1 - (1 - gain) * (1 - gain));
130  break;
131  case QUA:
132  gain *= gain;
133  break;
134  case CUB:
135  gain = CUBE(gain);
136  break;
137  case SQU:
138  gain = sqrt(gain);
139  break;
140  case CBR:
141  gain = cbrt(gain);
142  break;
143  case DESE:
144  gain = gain <= 0.5 ? cbrt(2 * gain) / 2: 1 - cbrt(2 * (1 - gain)) / 2;
145  break;
146  case DESI:
147  gain = gain <= 0.5 ? CUBE(2 * gain) / 2: 1 - CUBE(2 * (1 - gain)) / 2;
148  break;
149  case LOSI: {
150  const double a = 1. / (1. - 0.787) - 1;
151  double A = 1. / (1.0 + exp(0 -((gain-0.5) * a * 2.0)));
152  double B = 1. / (1.0 + exp(a));
153  double C = 1. / (1.0 + exp(0-a));
154  gain = (A - B) / (C - B);
155  }
156  break;
157  case SINC:
158  gain = gain >= 1.0 ? 1.0 : sin(M_PI * (1.0 - gain)) / (M_PI * (1.0 - gain));
159  break;
160  case ISINC:
161  gain = gain <= 0.0 ? 0.0 : 1.0 - sin(M_PI * gain) / (M_PI * gain);
162  break;
163  case NONE:
164  gain = 1.0;
165  break;
166  }
167 
168  return gain;
169 }
170 
171 #define FADE_PLANAR(name, type) \
172 static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
173  int nb_samples, int channels, int dir, \
174  int64_t start, int64_t range, int curve) \
175 { \
176  int i, c; \
177  \
178  for (i = 0; i < nb_samples; i++) { \
179  double gain = fade_gain(curve, start + i * dir, range); \
180  for (c = 0; c < channels; c++) { \
181  type *d = (type *)dst[c]; \
182  const type *s = (type *)src[c]; \
183  \
184  d[i] = s[i] * gain; \
185  } \
186  } \
187 }
188 
189 #define FADE(name, type) \
190 static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \
191  int nb_samples, int channels, int dir, \
192  int64_t start, int64_t range, int curve) \
193 { \
194  type *d = (type *)dst[0]; \
195  const type *s = (type *)src[0]; \
196  int i, c, k = 0; \
197  \
198  for (i = 0; i < nb_samples; i++) { \
199  double gain = fade_gain(curve, start + i * dir, range); \
200  for (c = 0; c < channels; c++, k++) \
201  d[k] = s[k] * gain; \
202  } \
203 }
204 
205 FADE_PLANAR(dbl, double)
206 FADE_PLANAR(flt, float)
207 FADE_PLANAR(s16, int16_t)
208 FADE_PLANAR(s32, int32_t)
209 
210 FADE(dbl, double)
211 FADE(flt, float)
212 FADE(s16, int16_t)
213 FADE(s32, int32_t)
214 
215 static int config_output(AVFilterLink *outlink)
216 {
217  AVFilterContext *ctx = outlink->src;
218  AudioFadeContext *s = ctx->priv;
219 
220  switch (outlink->format) {
221  case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl; break;
222  case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp; break;
223  case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt; break;
224  case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp; break;
225  case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16; break;
226  case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p; break;
227  case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32; break;
228  case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p; break;
229  }
230 
231  if (s->duration)
232  s->nb_samples = av_rescale(s->duration, outlink->sample_rate, AV_TIME_BASE);
233  s->duration = 0;
234  if (s->start_time)
235  s->start_sample = av_rescale(s->start_time, outlink->sample_rate, AV_TIME_BASE);
236  s->start_time = 0;
237 
238  return 0;
239 }
240 
241 #if CONFIG_AFADE_FILTER
242 
243 static const AVOption afade_options[] = {
244  { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, "type" },
245  { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, "type" },
246  { "in", "fade-in", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, TFLAGS, "type" },
247  { "out", "fade-out", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, TFLAGS, "type" },
248  { "start_sample", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
249  { "ss", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
250  { "nb_samples", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
251  { "ns", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
252  { "start_time", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
253  { "st", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
254  { "duration", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
255  { "d", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
256  { "curve", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, "curve" },
257  { "c", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, "curve" },
258  { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, TFLAGS, "curve" },
259  { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, TFLAGS, "curve" },
260  { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, TFLAGS, "curve" },
261  { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, TFLAGS, "curve" },
262  { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, TFLAGS, "curve" },
263  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, TFLAGS, "curve" },
264  { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, TFLAGS, "curve" },
265  { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, TFLAGS, "curve" },
266  { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, TFLAGS, "curve" },
267  { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, TFLAGS, "curve" },
268  { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, TFLAGS, "curve" },
269  { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, TFLAGS, "curve" },
270  { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, TFLAGS, "curve" },
271  { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, TFLAGS, "curve" },
272  { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, TFLAGS, "curve" },
273  { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, TFLAGS, "curve" },
274  { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, TFLAGS, "curve" },
275  { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, TFLAGS, "curve" },
276  { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, TFLAGS, "curve" },
277  { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, TFLAGS, "curve" },
278  { NULL }
279 };
280 
281 AVFILTER_DEFINE_CLASS(afade);
282 
283 static av_cold int init(AVFilterContext *ctx)
284 {
285  AudioFadeContext *s = ctx->priv;
286 
287  if (INT64_MAX - s->nb_samples < s->start_sample)
288  return AVERROR(EINVAL);
289 
290  return 0;
291 }
292 
293 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
294 {
295  AudioFadeContext *s = inlink->dst->priv;
296  AVFilterLink *outlink = inlink->dst->outputs[0];
297  int nb_samples = buf->nb_samples;
298  AVFrame *out_buf;
299  int64_t cur_sample = av_rescale_q(buf->pts, inlink->time_base, (AVRational){1, inlink->sample_rate});
300 
301  if ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
302  ( s->type && (cur_sample + nb_samples < s->start_sample)))
303  return ff_filter_frame(outlink, buf);
304 
305  if (av_frame_is_writable(buf)) {
306  out_buf = buf;
307  } else {
308  out_buf = ff_get_audio_buffer(outlink, nb_samples);
309  if (!out_buf)
310  return AVERROR(ENOMEM);
311  av_frame_copy_props(out_buf, buf);
312  }
313 
314  if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
315  ( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
316  av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
317  out_buf->channels, out_buf->format);
318  } else {
319  int64_t start;
320 
321  if (!s->type)
322  start = cur_sample - s->start_sample;
323  else
324  start = s->start_sample + s->nb_samples - cur_sample;
325 
326  s->fade_samples(out_buf->extended_data, buf->extended_data,
327  nb_samples, buf->channels,
328  s->type ? -1 : 1, start,
329  s->nb_samples, s->curve);
330  }
331 
332  if (buf != out_buf)
333  av_frame_free(&buf);
334 
335  return ff_filter_frame(outlink, out_buf);
336 }
337 
338 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
339  char *res, int res_len, int flags)
340 {
341  int ret;
342 
343  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
344  if (ret < 0)
345  return ret;
346 
347  return config_output(ctx->outputs[0]);
348 }
349 
350 static const AVFilterPad avfilter_af_afade_inputs[] = {
351  {
352  .name = "default",
353  .type = AVMEDIA_TYPE_AUDIO,
354  .filter_frame = filter_frame,
355  },
356  { NULL }
357 };
358 
359 static const AVFilterPad avfilter_af_afade_outputs[] = {
360  {
361  .name = "default",
362  .type = AVMEDIA_TYPE_AUDIO,
363  .config_props = config_output,
364  },
365  { NULL }
366 };
367 
369  .name = "afade",
370  .description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."),
371  .query_formats = query_formats,
372  .priv_size = sizeof(AudioFadeContext),
373  .init = init,
374  .inputs = avfilter_af_afade_inputs,
375  .outputs = avfilter_af_afade_outputs,
376  .priv_class = &afade_class,
379 };
380 
381 #endif /* CONFIG_AFADE_FILTER */
382 
383 #if CONFIG_ACROSSFADE_FILTER
384 
385 static const AVOption acrossfade_options[] = {
386  { "nb_samples", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
387  { "ns", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
388  { "duration", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
389  { "d", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
390  { "overlap", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
391  { "o", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
392  { "curve1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
393  { "c1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
394  { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, FLAGS, "curve" },
395  { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" },
396  { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" },
397  { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" },
398  { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" },
399  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" },
400  { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, FLAGS, "curve" },
401  { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" },
402  { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
403  { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
404  { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
405  { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" },
406  { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, FLAGS, "curve" },
407  { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, FLAGS, "curve" },
408  { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, FLAGS, "curve" },
409  { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, FLAGS, "curve" },
410  { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, FLAGS, "curve" },
411  { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, FLAGS, "curve" },
412  { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, FLAGS, "curve" },
413  { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, FLAGS, "curve" },
414  { "curve2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
415  { "c2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
416  { NULL }
417 };
418 
419 AVFILTER_DEFINE_CLASS(acrossfade);
420 
421 #define CROSSFADE_PLANAR(name, type) \
422 static void crossfade_samples_## name ##p(uint8_t **dst, uint8_t * const *cf0, \
423  uint8_t * const *cf1, \
424  int nb_samples, int channels, \
425  int curve0, int curve1) \
426 { \
427  int i, c; \
428  \
429  for (i = 0; i < nb_samples; i++) { \
430  double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples); \
431  double gain1 = fade_gain(curve1, i, nb_samples); \
432  for (c = 0; c < channels; c++) { \
433  type *d = (type *)dst[c]; \
434  const type *s0 = (type *)cf0[c]; \
435  const type *s1 = (type *)cf1[c]; \
436  \
437  d[i] = s0[i] * gain0 + s1[i] * gain1; \
438  } \
439  } \
440 }
441 
442 #define CROSSFADE(name, type) \
443 static void crossfade_samples_## name (uint8_t **dst, uint8_t * const *cf0, \
444  uint8_t * const *cf1, \
445  int nb_samples, int channels, \
446  int curve0, int curve1) \
447 { \
448  type *d = (type *)dst[0]; \
449  const type *s0 = (type *)cf0[0]; \
450  const type *s1 = (type *)cf1[0]; \
451  int i, c, k = 0; \
452  \
453  for (i = 0; i < nb_samples; i++) { \
454  double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples); \
455  double gain1 = fade_gain(curve1, i, nb_samples); \
456  for (c = 0; c < channels; c++, k++) \
457  d[k] = s0[k] * gain0 + s1[k] * gain1; \
458  } \
459 }
460 
461 CROSSFADE_PLANAR(dbl, double)
462 CROSSFADE_PLANAR(flt, float)
463 CROSSFADE_PLANAR(s16, int16_t)
464 CROSSFADE_PLANAR(s32, int32_t)
465 
466 CROSSFADE(dbl, double)
467 CROSSFADE(flt, float)
468 CROSSFADE(s16, int16_t)
469 CROSSFADE(s32, int32_t)
470 
471 static int activate(AVFilterContext *ctx)
472 {
473  AudioFadeContext *s = ctx->priv;
474  AVFilterLink *outlink = ctx->outputs[0];
475  AVFrame *in = NULL, *out, *cf[2] = { NULL };
476  int ret = 0, nb_samples, status;
477  int64_t pts;
478 
480 
481  if (s->crossfade_is_over) {
482  ret = ff_inlink_consume_frame(ctx->inputs[1], &in);
483  if (ret > 0) {
484  in->pts = s->pts;
485  s->pts += av_rescale_q(in->nb_samples,
486  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
487  return ff_filter_frame(outlink, in);
488  } else if (ret < 0) {
489  return ret;
490  } else if (ff_inlink_acknowledge_status(ctx->inputs[1], &status, &pts)) {
491  ff_outlink_set_status(ctx->outputs[0], status, pts);
492  return 0;
493  } else if (!ret) {
494  if (ff_outlink_frame_wanted(ctx->outputs[0])) {
495  ff_inlink_request_frame(ctx->inputs[1]);
496  return 0;
497  }
498  }
499  }
500 
501  if (ff_inlink_queued_samples(ctx->inputs[0]) > s->nb_samples) {
502  nb_samples = ff_inlink_queued_samples(ctx->inputs[0]) - s->nb_samples;
503  if (nb_samples > 0) {
504  ret = ff_inlink_consume_samples(ctx->inputs[0], nb_samples, nb_samples, &in);
505  if (ret < 0) {
506  return ret;
507  }
508  }
509  in->pts = s->pts;
510  s->pts += av_rescale_q(in->nb_samples,
511  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
512  return ff_filter_frame(outlink, in);
513  } else if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->nb_samples &&
514  ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples && s->cf0_eof) {
515  if (s->overlap) {
516  out = ff_get_audio_buffer(outlink, s->nb_samples);
517  if (!out)
518  return AVERROR(ENOMEM);
519 
520  ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
521  if (ret < 0) {
522  av_frame_free(&out);
523  return ret;
524  }
525 
526  ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
527  if (ret < 0) {
528  av_frame_free(&out);
529  return ret;
530  }
531 
532  s->crossfade_samples(out->extended_data, cf[0]->extended_data,
533  cf[1]->extended_data,
534  s->nb_samples, out->channels,
535  s->curve, s->curve2);
536  out->pts = s->pts;
537  s->pts += av_rescale_q(s->nb_samples,
538  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
539  s->crossfade_is_over = 1;
540  av_frame_free(&cf[0]);
541  av_frame_free(&cf[1]);
542  return ff_filter_frame(outlink, out);
543  } else {
544  out = ff_get_audio_buffer(outlink, s->nb_samples);
545  if (!out)
546  return AVERROR(ENOMEM);
547 
548  ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
549  if (ret < 0) {
550  av_frame_free(&out);
551  return ret;
552  }
553 
554  s->fade_samples(out->extended_data, cf[0]->extended_data, s->nb_samples,
555  outlink->channels, -1, s->nb_samples - 1, s->nb_samples, s->curve);
556  out->pts = s->pts;
557  s->pts += av_rescale_q(s->nb_samples,
558  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
559  av_frame_free(&cf[0]);
560  ret = ff_filter_frame(outlink, out);
561  if (ret < 0)
562  return ret;
563 
564  out = ff_get_audio_buffer(outlink, s->nb_samples);
565  if (!out)
566  return AVERROR(ENOMEM);
567 
568  ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
569  if (ret < 0) {
570  av_frame_free(&out);
571  return ret;
572  }
573 
574  s->fade_samples(out->extended_data, cf[1]->extended_data, s->nb_samples,
575  outlink->channels, 1, 0, s->nb_samples, s->curve2);
576  out->pts = s->pts;
577  s->pts += av_rescale_q(s->nb_samples,
578  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
579  s->crossfade_is_over = 1;
580  av_frame_free(&cf[1]);
581  return ff_filter_frame(outlink, out);
582  }
583  } else if (ff_outlink_frame_wanted(ctx->outputs[0])) {
584  if (!s->cf0_eof && ff_outlink_get_status(ctx->inputs[0])) {
585  s->cf0_eof = 1;
586  }
587  if (ff_outlink_get_status(ctx->inputs[1])) {
589  return 0;
590  }
591  if (!s->cf0_eof)
592  ff_inlink_request_frame(ctx->inputs[0]);
593  else
594  ff_inlink_request_frame(ctx->inputs[1]);
595  return 0;
596  }
597 
598  return ret;
599 }
600 
601 static int acrossfade_config_output(AVFilterLink *outlink)
602 {
603  AVFilterContext *ctx = outlink->src;
604  AudioFadeContext *s = ctx->priv;
605 
606  if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
608  "Inputs must have the same sample rate "
609  "%d for in0 vs %d for in1\n",
610  ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
611  return AVERROR(EINVAL);
612  }
613 
614  outlink->sample_rate = ctx->inputs[0]->sample_rate;
615  outlink->time_base = ctx->inputs[0]->time_base;
616  outlink->channel_layout = ctx->inputs[0]->channel_layout;
617  outlink->channels = ctx->inputs[0]->channels;
618 
619  switch (outlink->format) {
620  case AV_SAMPLE_FMT_DBL: s->crossfade_samples = crossfade_samples_dbl; break;
621  case AV_SAMPLE_FMT_DBLP: s->crossfade_samples = crossfade_samples_dblp; break;
622  case AV_SAMPLE_FMT_FLT: s->crossfade_samples = crossfade_samples_flt; break;
623  case AV_SAMPLE_FMT_FLTP: s->crossfade_samples = crossfade_samples_fltp; break;
624  case AV_SAMPLE_FMT_S16: s->crossfade_samples = crossfade_samples_s16; break;
625  case AV_SAMPLE_FMT_S16P: s->crossfade_samples = crossfade_samples_s16p; break;
626  case AV_SAMPLE_FMT_S32: s->crossfade_samples = crossfade_samples_s32; break;
627  case AV_SAMPLE_FMT_S32P: s->crossfade_samples = crossfade_samples_s32p; break;
628  }
629 
630  config_output(outlink);
631 
632  return 0;
633 }
634 
635 static const AVFilterPad avfilter_af_acrossfade_inputs[] = {
636  {
637  .name = "crossfade0",
638  .type = AVMEDIA_TYPE_AUDIO,
639  },
640  {
641  .name = "crossfade1",
642  .type = AVMEDIA_TYPE_AUDIO,
643  },
644  { NULL }
645 };
646 
647 static const AVFilterPad avfilter_af_acrossfade_outputs[] = {
648  {
649  .name = "default",
650  .type = AVMEDIA_TYPE_AUDIO,
651  .config_props = acrossfade_config_output,
652  },
653  { NULL }
654 };
655 
657  .name = "acrossfade",
658  .description = NULL_IF_CONFIG_SMALL("Cross fade two input audio streams."),
659  .query_formats = query_formats,
660  .priv_size = sizeof(AudioFadeContext),
661  .activate = activate,
662  .priv_class = &acrossfade_class,
663  .inputs = avfilter_af_acrossfade_inputs,
664  .outputs = avfilter_af_acrossfade_outputs,
665 };
666 
667 #endif /* CONFIG_ACROSSFADE_FILTER */
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:925
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_acrusher.c:336
static int activate(AVFilterContext *ctx)
Definition: af_adeclick.c:630
CurveType
Definition: af_afade.c:54
@ QSIN
Definition: af_afade.c:54
@ IQSIN
Definition: af_afade.c:54
@ IHSIN
Definition: af_afade.c:54
@ EXP
Definition: af_afade.c:54
@ DESI
Definition: af_afade.c:54
@ ISINC
Definition: af_afade.c:54
@ LOSI
Definition: af_afade.c:54
@ NB_CURVES
Definition: af_afade.c:54
@ PAR
Definition: af_afade.c:54
@ QUA
Definition: af_afade.c:54
@ DESE
Definition: af_afade.c:54
@ SQU
Definition: af_afade.c:54
@ NONE
Definition: af_afade.c:54
@ LOG
Definition: af_afade.c:54
@ TRI
Definition: af_afade.c:54
@ SINC
Definition: af_afade.c:54
@ CBR
Definition: af_afade.c:54
@ CUB
Definition: af_afade.c:54
@ IPAR
Definition: af_afade.c:54
@ ESIN
Definition: af_afade.c:54
@ HSIN
Definition: af_afade.c:54
#define FADE_PLANAR(name, type)
Definition: af_afade.c:171
static double fade_gain(int curve, int64_t index, int64_t range)
Definition: af_afade.c:93
#define FADE(name, type)
Definition: af_afade.c:189
#define CUBE(a)
#define TFLAGS
Definition: af_afade.c:58
static int query_formats(AVFilterContext *ctx)
Definition: af_afade.c:60
#define FLAGS
Definition: af_afade.c:57
#define OFFSET(x)
Definition: af_afade.c:56
static int config_output(AVFilterLink *outlink)
Definition: af_afade.c:215
AVFilter ff_af_afade
AVFilter ff_af_acrossfade
channels
Definition: aptx.h:33
#define A(x)
Definition: vp56_arith.h:28
#define av_cold
Definition: attributes.h:88
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
int32_t
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1643
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1449
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:882
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1513
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1494
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1474
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1620
Main libavfilter public API header.
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
#define flags(name, subs,...)
Definition: cbs_av1.c:572
#define s(width, name)
Definition: cbs_vp9.c:257
#define av_clipd
Definition: common.h:173
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
static int filter_frame(DBEDecodeContext *s, AVFrame *frame)
Definition: dolby_e.c:1049
int8_t exp
Definition: eval.c:72
static int64_t start_time
Definition: ffplay.c:332
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
static int ff_outlink_frame_wanted(AVFilterLink *link)
Test if a frame is wanted on an output link.
Definition: filters.h:172
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:436
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:575
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *channel_layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates.
Definition: formats.c:568
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:421
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
@ AV_OPT_TYPE_DURATION
Definition: opt.h:239
@ AV_OPT_TYPE_INT64
Definition: opt.h:226
@ AV_OPT_TYPE_INT
Definition: opt.h:225
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:126
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AVERROR(e)
Definition: error.h:43
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:63
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:68
@ AV_SAMPLE_FMT_S32
signed 32 bits
Definition: samplefmt.h:62
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
@ AV_SAMPLE_FMT_DBL
double
Definition: samplefmt.h:64
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Fill an audio buffer with silence.
Definition: samplefmt.c:237
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
int index
Definition: gxfenc.c:89
#define B
Definition: huffyuvdsp.h:32
cl_device_type type
#define C
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:288
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
#define M_PI
Definition: mathematics.h:52
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVOptions.
typedef void(RENAME(mix_any_func_type))
formats
Definition: signature.h:48
Describe the class of an AVClass context structure.
Definition: log.h:67
A list of supported channel layouts.
Definition: formats.h:86
An instance of a filter.
Definition: avfilter.h:341
void * priv
private data for use by the filter
Definition: avfilter.h:356
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
A list of supported formats for one end of a filter link.
Definition: formats.h:65
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
int channels
number of audio channels, only used for audio.
Definition: frame.h:624
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:391
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:365
AVOption.
Definition: opt.h:248
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int64_t pts
Definition: af_afade.c:43
void(* fade_samples)(uint8_t **dst, uint8_t *const *src, int nb_samples, int channels, int direction, int64_t start, int64_t range, int curve)
Definition: af_afade.c:45
void(* crossfade_samples)(uint8_t **dst, uint8_t *const *cf0, uint8_t *const *cf1, int nb_samples, int channels, int curve0, int curve1)
Definition: af_afade.c:48
int crossfade_is_over
Definition: af_afade.c:42
int64_t nb_samples
Definition: af_afade.c:36
int64_t start_time
Definition: af_afade.c:39
int64_t start_sample
Definition: af_afade.c:37
int64_t duration
Definition: af_afade.c:38
#define cbrt
Definition: tablegen.h:35
#define av_log(a,...)
#define src
Definition: vp8dsp.c:255
FILE * out
Definition: movenc.c:54
int64_t duration
Definition: movenc.c:64
AVFormatContext * ctx
Definition: movenc.c:48
static int64_t pts