af_amix.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. /*
  2. * Audio Mix Filter
  3. * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Audio Mix Filter
  24. *
  25. * Mixes audio from multiple sources into a single output. The channel layout,
  26. * sample rate, and sample format will be the same for all inputs and the
  27. * output.
  28. */
  29. #include "libavutil/attributes.h"
  30. #include "libavutil/audio_fifo.h"
  31. #include "libavutil/avassert.h"
  32. #include "libavutil/avstring.h"
  33. #include "libavutil/channel_layout.h"
  34. #include "libavutil/common.h"
  35. #include "libavutil/float_dsp.h"
  36. #include "libavutil/mathematics.h"
  37. #include "libavutil/opt.h"
  38. #include "libavutil/samplefmt.h"
  39. #include "audio.h"
  40. #include "avfilter.h"
  41. #include "filters.h"
  42. #include "formats.h"
  43. #include "internal.h"
  44. #define INPUT_ON 1 /**< input is active */
  45. #define INPUT_EOF 2 /**< input has reached EOF (may still be active) */
  46. #define DURATION_LONGEST 0
  47. #define DURATION_SHORTEST 1
  48. #define DURATION_FIRST 2
  49. typedef struct FrameInfo {
  50. int nb_samples;
  51. int64_t pts;
  52. struct FrameInfo *next;
  53. } FrameInfo;
  54. /**
  55. * Linked list used to store timestamps and frame sizes of all frames in the
  56. * FIFO for the first input.
  57. *
  58. * This is needed to keep timestamps synchronized for the case where multiple
  59. * input frames are pushed to the filter for processing before a frame is
  60. * requested by the output link.
  61. */
  62. typedef struct FrameList {
  63. int nb_frames;
  64. int nb_samples;
  65. FrameInfo *list;
  66. FrameInfo *end;
  67. } FrameList;
  68. static void frame_list_clear(FrameList *frame_list)
  69. {
  70. if (frame_list) {
  71. while (frame_list->list) {
  72. FrameInfo *info = frame_list->list;
  73. frame_list->list = info->next;
  74. av_free(info);
  75. }
  76. frame_list->nb_frames = 0;
  77. frame_list->nb_samples = 0;
  78. frame_list->end = NULL;
  79. }
  80. }
  81. static int frame_list_next_frame_size(FrameList *frame_list)
  82. {
  83. if (!frame_list->list)
  84. return 0;
  85. return frame_list->list->nb_samples;
  86. }
  87. static int64_t frame_list_next_pts(FrameList *frame_list)
  88. {
  89. if (!frame_list->list)
  90. return AV_NOPTS_VALUE;
  91. return frame_list->list->pts;
  92. }
  93. static void frame_list_remove_samples(FrameList *frame_list, int nb_samples)
  94. {
  95. if (nb_samples >= frame_list->nb_samples) {
  96. frame_list_clear(frame_list);
  97. } else {
  98. int samples = nb_samples;
  99. while (samples > 0) {
  100. FrameInfo *info = frame_list->list;
  101. av_assert0(info);
  102. if (info->nb_samples <= samples) {
  103. samples -= info->nb_samples;
  104. frame_list->list = info->next;
  105. if (!frame_list->list)
  106. frame_list->end = NULL;
  107. frame_list->nb_frames--;
  108. frame_list->nb_samples -= info->nb_samples;
  109. av_free(info);
  110. } else {
  111. info->nb_samples -= samples;
  112. info->pts += samples;
  113. frame_list->nb_samples -= samples;
  114. samples = 0;
  115. }
  116. }
  117. }
  118. }
  119. static int frame_list_add_frame(FrameList *frame_list, int nb_samples, int64_t pts)
  120. {
  121. FrameInfo *info = av_malloc(sizeof(*info));
  122. if (!info)
  123. return AVERROR(ENOMEM);
  124. info->nb_samples = nb_samples;
  125. info->pts = pts;
  126. info->next = NULL;
  127. if (!frame_list->list) {
  128. frame_list->list = info;
  129. frame_list->end = info;
  130. } else {
  131. av_assert0(frame_list->end);
  132. frame_list->end->next = info;
  133. frame_list->end = info;
  134. }
  135. frame_list->nb_frames++;
  136. frame_list->nb_samples += nb_samples;
  137. return 0;
  138. }
  139. /* FIXME: use directly links fifo */
  140. typedef struct MixContext {
  141. const AVClass *class; /**< class for AVOptions */
  142. AVFloatDSPContext *fdsp;
  143. int nb_inputs; /**< number of inputs */
  144. int active_inputs; /**< number of input currently active */
  145. int duration_mode; /**< mode for determining duration */
  146. float dropout_transition; /**< transition time when an input drops out */
  147. char *weights_str; /**< string for custom weights for every input */
  148. int nb_channels; /**< number of channels */
  149. int sample_rate; /**< sample rate */
  150. int planar;
  151. AVAudioFifo **fifos; /**< audio fifo for each input */
  152. uint8_t *input_state; /**< current state of each input */
  153. float *input_scale; /**< mixing scale factor for each input */
  154. float *weights; /**< custom weights for every input */
  155. float weight_sum; /**< sum of custom weights for every input */
  156. float *scale_norm; /**< normalization factor for every input */
  157. int64_t next_pts; /**< calculated pts for next output frame */
  158. FrameList *frame_list; /**< list of frame info for the first input */
  159. } MixContext;
  160. #define OFFSET(x) offsetof(MixContext, x)
  161. #define A AV_OPT_FLAG_AUDIO_PARAM
  162. #define F AV_OPT_FLAG_FILTERING_PARAM
  163. static const AVOption amix_options[] = {
  164. { "inputs", "Number of inputs.",
  165. OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 1024, A|F },
  166. { "duration", "How to determine the end-of-stream.",
  167. OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A|F, "duration" },
  168. { "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, 0, 0, A|F, "duration" },
  169. { "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, 0, 0, A|F, "duration" },
  170. { "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, 0, 0, A|F, "duration" },
  171. { "dropout_transition", "Transition time, in seconds, for volume "
  172. "renormalization when an input stream ends.",
  173. OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A|F },
  174. { "weights", "Set weight for each input.",
  175. OFFSET(weights_str), AV_OPT_TYPE_STRING, {.str="1 1"}, 0, 0, A|F },
  176. { NULL }
  177. };
  178. AVFILTER_DEFINE_CLASS(amix);
  179. /**
  180. * Update the scaling factors to apply to each input during mixing.
  181. *
  182. * This balances the full volume range between active inputs and handles
  183. * volume transitions when EOF is encountered on an input but mixing continues
  184. * with the remaining inputs.
  185. */
  186. static void calculate_scales(MixContext *s, int nb_samples)
  187. {
  188. float weight_sum = 0.f;
  189. int i;
  190. for (i = 0; i < s->nb_inputs; i++)
  191. if (s->input_state[i] & INPUT_ON)
  192. weight_sum += s->weights[i];
  193. for (i = 0; i < s->nb_inputs; i++) {
  194. if (s->input_state[i] & INPUT_ON) {
  195. if (s->scale_norm[i] > weight_sum / s->weights[i]) {
  196. s->scale_norm[i] -= ((s->weight_sum / s->weights[i]) / s->nb_inputs) *
  197. nb_samples / (s->dropout_transition * s->sample_rate);
  198. s->scale_norm[i] = FFMAX(s->scale_norm[i], weight_sum / s->weights[i]);
  199. }
  200. }
  201. }
  202. for (i = 0; i < s->nb_inputs; i++) {
  203. if (s->input_state[i] & INPUT_ON)
  204. s->input_scale[i] = 1.0f / s->scale_norm[i];
  205. else
  206. s->input_scale[i] = 0.0f;
  207. }
  208. }
  209. static int config_output(AVFilterLink *outlink)
  210. {
  211. AVFilterContext *ctx = outlink->src;
  212. MixContext *s = ctx->priv;
  213. int i;
  214. char buf[64];
  215. s->planar = av_sample_fmt_is_planar(outlink->format);
  216. s->sample_rate = outlink->sample_rate;
  217. outlink->time_base = (AVRational){ 1, outlink->sample_rate };
  218. s->next_pts = AV_NOPTS_VALUE;
  219. s->frame_list = av_mallocz(sizeof(*s->frame_list));
  220. if (!s->frame_list)
  221. return AVERROR(ENOMEM);
  222. s->fifos = av_mallocz_array(s->nb_inputs, sizeof(*s->fifos));
  223. if (!s->fifos)
  224. return AVERROR(ENOMEM);
  225. s->nb_channels = outlink->channels;
  226. for (i = 0; i < s->nb_inputs; i++) {
  227. s->fifos[i] = av_audio_fifo_alloc(outlink->format, s->nb_channels, 1024);
  228. if (!s->fifos[i])
  229. return AVERROR(ENOMEM);
  230. }
  231. s->input_state = av_malloc(s->nb_inputs);
  232. if (!s->input_state)
  233. return AVERROR(ENOMEM);
  234. memset(s->input_state, INPUT_ON, s->nb_inputs);
  235. s->active_inputs = s->nb_inputs;
  236. s->input_scale = av_mallocz_array(s->nb_inputs, sizeof(*s->input_scale));
  237. s->scale_norm = av_mallocz_array(s->nb_inputs, sizeof(*s->scale_norm));
  238. if (!s->input_scale || !s->scale_norm)
  239. return AVERROR(ENOMEM);
  240. for (i = 0; i < s->nb_inputs; i++)
  241. s->scale_norm[i] = s->weight_sum / s->weights[i];
  242. calculate_scales(s, 0);
  243. av_get_channel_layout_string(buf, sizeof(buf), -1, outlink->channel_layout);
  244. av_log(ctx, AV_LOG_VERBOSE,
  245. "inputs:%d fmt:%s srate:%d cl:%s\n", s->nb_inputs,
  246. av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf);
  247. return 0;
  248. }
  249. /**
  250. * Read samples from the input FIFOs, mix, and write to the output link.
  251. */
  252. static int output_frame(AVFilterLink *outlink)
  253. {
  254. AVFilterContext *ctx = outlink->src;
  255. MixContext *s = ctx->priv;
  256. AVFrame *out_buf, *in_buf;
  257. int nb_samples, ns, i;
  258. if (s->input_state[0] & INPUT_ON) {
  259. /* first input live: use the corresponding frame size */
  260. nb_samples = frame_list_next_frame_size(s->frame_list);
  261. for (i = 1; i < s->nb_inputs; i++) {
  262. if (s->input_state[i] & INPUT_ON) {
  263. ns = av_audio_fifo_size(s->fifos[i]);
  264. if (ns < nb_samples) {
  265. if (!(s->input_state[i] & INPUT_EOF))
  266. /* unclosed input with not enough samples */
  267. return 0;
  268. /* closed input to drain */
  269. nb_samples = ns;
  270. }
  271. }
  272. }
  273. } else {
  274. /* first input closed: use the available samples */
  275. nb_samples = INT_MAX;
  276. for (i = 1; i < s->nb_inputs; i++) {
  277. if (s->input_state[i] & INPUT_ON) {
  278. ns = av_audio_fifo_size(s->fifos[i]);
  279. nb_samples = FFMIN(nb_samples, ns);
  280. }
  281. }
  282. if (nb_samples == INT_MAX) {
  283. ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
  284. return 0;
  285. }
  286. }
  287. s->next_pts = frame_list_next_pts(s->frame_list);
  288. frame_list_remove_samples(s->frame_list, nb_samples);
  289. calculate_scales(s, nb_samples);
  290. if (nb_samples == 0)
  291. return 0;
  292. out_buf = ff_get_audio_buffer(outlink, nb_samples);
  293. if (!out_buf)
  294. return AVERROR(ENOMEM);
  295. in_buf = ff_get_audio_buffer(outlink, nb_samples);
  296. if (!in_buf) {
  297. av_frame_free(&out_buf);
  298. return AVERROR(ENOMEM);
  299. }
  300. for (i = 0; i < s->nb_inputs; i++) {
  301. if (s->input_state[i] & INPUT_ON) {
  302. int planes, plane_size, p;
  303. av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data,
  304. nb_samples);
  305. planes = s->planar ? s->nb_channels : 1;
  306. plane_size = nb_samples * (s->planar ? 1 : s->nb_channels);
  307. plane_size = FFALIGN(plane_size, 16);
  308. if (out_buf->format == AV_SAMPLE_FMT_FLT ||
  309. out_buf->format == AV_SAMPLE_FMT_FLTP) {
  310. for (p = 0; p < planes; p++) {
  311. s->fdsp->vector_fmac_scalar((float *)out_buf->extended_data[p],
  312. (float *) in_buf->extended_data[p],
  313. s->input_scale[i], plane_size);
  314. }
  315. } else {
  316. for (p = 0; p < planes; p++) {
  317. s->fdsp->vector_dmac_scalar((double *)out_buf->extended_data[p],
  318. (double *) in_buf->extended_data[p],
  319. s->input_scale[i], plane_size);
  320. }
  321. }
  322. }
  323. }
  324. av_frame_free(&in_buf);
  325. out_buf->pts = s->next_pts;
  326. if (s->next_pts != AV_NOPTS_VALUE)
  327. s->next_pts += nb_samples;
  328. return ff_filter_frame(outlink, out_buf);
  329. }
  330. /**
  331. * Requests a frame, if needed, from each input link other than the first.
  332. */
  333. static int request_samples(AVFilterContext *ctx, int min_samples)
  334. {
  335. MixContext *s = ctx->priv;
  336. int i;
  337. av_assert0(s->nb_inputs > 1);
  338. for (i = 1; i < s->nb_inputs; i++) {
  339. if (!(s->input_state[i] & INPUT_ON) ||
  340. (s->input_state[i] & INPUT_EOF))
  341. continue;
  342. if (av_audio_fifo_size(s->fifos[i]) >= min_samples)
  343. continue;
  344. ff_inlink_request_frame(ctx->inputs[i]);
  345. }
  346. return output_frame(ctx->outputs[0]);
  347. }
  348. /**
  349. * Calculates the number of active inputs and determines EOF based on the
  350. * duration option.
  351. *
  352. * @return 0 if mixing should continue, or AVERROR_EOF if mixing should stop.
  353. */
  354. static int calc_active_inputs(MixContext *s)
  355. {
  356. int i;
  357. int active_inputs = 0;
  358. for (i = 0; i < s->nb_inputs; i++)
  359. active_inputs += !!(s->input_state[i] & INPUT_ON);
  360. s->active_inputs = active_inputs;
  361. if (!active_inputs ||
  362. (s->duration_mode == DURATION_FIRST && !(s->input_state[0] & INPUT_ON)) ||
  363. (s->duration_mode == DURATION_SHORTEST && active_inputs != s->nb_inputs))
  364. return AVERROR_EOF;
  365. return 0;
  366. }
  367. static int activate(AVFilterContext *ctx)
  368. {
  369. AVFilterLink *outlink = ctx->outputs[0];
  370. MixContext *s = ctx->priv;
  371. AVFrame *buf = NULL;
  372. int i, ret;
  373. FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
  374. for (i = 0; i < s->nb_inputs; i++) {
  375. AVFilterLink *inlink = ctx->inputs[i];
  376. if ((ret = ff_inlink_consume_frame(ctx->inputs[i], &buf)) > 0) {
  377. if (i == 0) {
  378. int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
  379. outlink->time_base);
  380. ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts);
  381. if (ret < 0) {
  382. av_frame_free(&buf);
  383. return ret;
  384. }
  385. }
  386. ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
  387. buf->nb_samples);
  388. if (ret < 0) {
  389. av_frame_free(&buf);
  390. return ret;
  391. }
  392. av_frame_free(&buf);
  393. ret = output_frame(outlink);
  394. if (ret < 0)
  395. return ret;
  396. }
  397. }
  398. for (i = 0; i < s->nb_inputs; i++) {
  399. int64_t pts;
  400. int status;
  401. if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
  402. if (status == AVERROR_EOF) {
  403. if (i == 0) {
  404. s->input_state[i] = 0;
  405. if (s->nb_inputs == 1) {
  406. ff_outlink_set_status(outlink, status, pts);
  407. return 0;
  408. }
  409. } else {
  410. s->input_state[i] |= INPUT_EOF;
  411. if (av_audio_fifo_size(s->fifos[i]) == 0) {
  412. s->input_state[i] = 0;
  413. }
  414. }
  415. }
  416. }
  417. }
  418. if (calc_active_inputs(s)) {
  419. ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
  420. return 0;
  421. }
  422. if (ff_outlink_frame_wanted(outlink)) {
  423. int wanted_samples;
  424. if (!(s->input_state[0] & INPUT_ON))
  425. return request_samples(ctx, 1);
  426. if (s->frame_list->nb_frames == 0) {
  427. ff_inlink_request_frame(ctx->inputs[0]);
  428. return 0;
  429. }
  430. av_assert0(s->frame_list->nb_frames > 0);
  431. wanted_samples = frame_list_next_frame_size(s->frame_list);
  432. return request_samples(ctx, wanted_samples);
  433. }
  434. return 0;
  435. }
  436. static av_cold int init(AVFilterContext *ctx)
  437. {
  438. MixContext *s = ctx->priv;
  439. char *p, *arg, *saveptr = NULL;
  440. float last_weight = 1.f;
  441. int i, ret;
  442. for (i = 0; i < s->nb_inputs; i++) {
  443. AVFilterPad pad = { 0 };
  444. pad.type = AVMEDIA_TYPE_AUDIO;
  445. pad.name = av_asprintf("input%d", i);
  446. if (!pad.name)
  447. return AVERROR(ENOMEM);
  448. if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
  449. av_freep(&pad.name);
  450. return ret;
  451. }
  452. }
  453. s->fdsp = avpriv_float_dsp_alloc(0);
  454. if (!s->fdsp)
  455. return AVERROR(ENOMEM);
  456. s->weights = av_mallocz_array(s->nb_inputs, sizeof(*s->weights));
  457. if (!s->weights)
  458. return AVERROR(ENOMEM);
  459. p = s->weights_str;
  460. for (i = 0; i < s->nb_inputs; i++) {
  461. if (!(arg = av_strtok(p, " ", &saveptr)))
  462. break;
  463. p = NULL;
  464. sscanf(arg, "%f", &last_weight);
  465. s->weights[i] = last_weight;
  466. s->weight_sum += last_weight;
  467. }
  468. for (; i < s->nb_inputs; i++) {
  469. s->weights[i] = last_weight;
  470. s->weight_sum += last_weight;
  471. }
  472. return 0;
  473. }
  474. static av_cold void uninit(AVFilterContext *ctx)
  475. {
  476. int i;
  477. MixContext *s = ctx->priv;
  478. if (s->fifos) {
  479. for (i = 0; i < s->nb_inputs; i++)
  480. av_audio_fifo_free(s->fifos[i]);
  481. av_freep(&s->fifos);
  482. }
  483. frame_list_clear(s->frame_list);
  484. av_freep(&s->frame_list);
  485. av_freep(&s->input_state);
  486. av_freep(&s->input_scale);
  487. av_freep(&s->scale_norm);
  488. av_freep(&s->weights);
  489. av_freep(&s->fdsp);
  490. for (i = 0; i < ctx->nb_inputs; i++)
  491. av_freep(&ctx->input_pads[i].name);
  492. }
  493. static int query_formats(AVFilterContext *ctx)
  494. {
  495. AVFilterFormats *formats = NULL;
  496. AVFilterChannelLayouts *layouts;
  497. int ret;
  498. layouts = ff_all_channel_counts();
  499. if (!layouts) {
  500. ret = AVERROR(ENOMEM);
  501. goto fail;
  502. }
  503. if ((ret = ff_add_format(&formats, AV_SAMPLE_FMT_FLT )) < 0 ||
  504. (ret = ff_add_format(&formats, AV_SAMPLE_FMT_FLTP)) < 0 ||
  505. (ret = ff_add_format(&formats, AV_SAMPLE_FMT_DBL )) < 0 ||
  506. (ret = ff_add_format(&formats, AV_SAMPLE_FMT_DBLP)) < 0 ||
  507. (ret = ff_set_common_formats (ctx, formats)) < 0 ||
  508. (ret = ff_set_common_channel_layouts(ctx, layouts)) < 0 ||
  509. (ret = ff_set_common_samplerates(ctx, ff_all_samplerates())) < 0)
  510. goto fail;
  511. return 0;
  512. fail:
  513. if (layouts)
  514. av_freep(&layouts->channel_layouts);
  515. av_freep(&layouts);
  516. return ret;
  517. }
  518. static const AVFilterPad avfilter_af_amix_outputs[] = {
  519. {
  520. .name = "default",
  521. .type = AVMEDIA_TYPE_AUDIO,
  522. .config_props = config_output,
  523. },
  524. { NULL }
  525. };
  526. AVFilter ff_af_amix = {
  527. .name = "amix",
  528. .description = NULL_IF_CONFIG_SMALL("Audio mixing."),
  529. .priv_size = sizeof(MixContext),
  530. .priv_class = &amix_class,
  531. .init = init,
  532. .uninit = uninit,
  533. .activate = activate,
  534. .query_formats = query_formats,
  535. .inputs = NULL,
  536. .outputs = avfilter_af_amix_outputs,
  537. .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
  538. };