af_compand.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598
  1. /*
  2. * Copyright (c) 1999 Chris Bagwell
  3. * Copyright (c) 1999 Nick Bailey
  4. * Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
  5. * Copyright (c) 2013 Paul B Mahol
  6. * Copyright (c) 2014 Andrew Kelley
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * audio compand filter
  27. */
  28. #include "libavutil/avassert.h"
  29. #include "libavutil/avstring.h"
  30. #include "libavutil/ffmath.h"
  31. #include "libavutil/opt.h"
  32. #include "libavutil/samplefmt.h"
  33. #include "audio.h"
  34. #include "avfilter.h"
  35. #include "internal.h"
  36. typedef struct ChanParam {
  37. double attack;
  38. double decay;
  39. double volume;
  40. } ChanParam;
  41. typedef struct CompandSegment {
  42. double x, y;
  43. double a, b;
  44. } CompandSegment;
  45. typedef struct CompandContext {
  46. const AVClass *class;
  47. int nb_segments;
  48. char *attacks, *decays, *points;
  49. CompandSegment *segments;
  50. ChanParam *channels;
  51. double in_min_lin;
  52. double out_min_lin;
  53. double curve_dB;
  54. double gain_dB;
  55. double initial_volume;
  56. double delay;
  57. AVFrame *delay_frame;
  58. int delay_samples;
  59. int delay_count;
  60. int delay_index;
  61. int64_t pts;
  62. int (*compand)(AVFilterContext *ctx, AVFrame *frame);
  63. } CompandContext;
  64. #define OFFSET(x) offsetof(CompandContext, x)
  65. #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  66. static const AVOption compand_options[] = {
  67. { "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0" }, 0, 0, A },
  68. { "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, { .str = "0.8" }, 0, 0, A },
  69. { "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, { .str = "-70/-70|-60/-20|1/0" }, 0, 0, A },
  70. { "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.01, 900, A },
  71. { "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 900, A },
  72. { "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 0, A },
  73. { "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, 20, A },
  74. { NULL }
  75. };
  76. AVFILTER_DEFINE_CLASS(compand);
  77. static av_cold int init(AVFilterContext *ctx)
  78. {
  79. CompandContext *s = ctx->priv;
  80. s->pts = AV_NOPTS_VALUE;
  81. return 0;
  82. }
  83. static av_cold void uninit(AVFilterContext *ctx)
  84. {
  85. CompandContext *s = ctx->priv;
  86. av_freep(&s->channels);
  87. av_freep(&s->segments);
  88. av_frame_free(&s->delay_frame);
  89. }
  90. static int query_formats(AVFilterContext *ctx)
  91. {
  92. AVFilterChannelLayouts *layouts;
  93. AVFilterFormats *formats;
  94. static const enum AVSampleFormat sample_fmts[] = {
  95. AV_SAMPLE_FMT_DBLP,
  96. AV_SAMPLE_FMT_NONE
  97. };
  98. int ret;
  99. layouts = ff_all_channel_counts();
  100. if (!layouts)
  101. return AVERROR(ENOMEM);
  102. ret = ff_set_common_channel_layouts(ctx, layouts);
  103. if (ret < 0)
  104. return ret;
  105. formats = ff_make_format_list(sample_fmts);
  106. if (!formats)
  107. return AVERROR(ENOMEM);
  108. ret = ff_set_common_formats(ctx, formats);
  109. if (ret < 0)
  110. return ret;
  111. formats = ff_all_samplerates();
  112. if (!formats)
  113. return AVERROR(ENOMEM);
  114. return ff_set_common_samplerates(ctx, formats);
  115. }
  116. static void count_items(char *item_str, int *nb_items)
  117. {
  118. char *p;
  119. *nb_items = 1;
  120. for (p = item_str; *p; p++) {
  121. if (*p == ' ' || *p == '|')
  122. (*nb_items)++;
  123. }
  124. }
  125. static void update_volume(ChanParam *cp, double in)
  126. {
  127. double delta = in - cp->volume;
  128. if (delta > 0.0)
  129. cp->volume += delta * cp->attack;
  130. else
  131. cp->volume += delta * cp->decay;
  132. }
  133. static double get_volume(CompandContext *s, double in_lin)
  134. {
  135. CompandSegment *cs;
  136. double in_log, out_log;
  137. int i;
  138. if (in_lin < s->in_min_lin)
  139. return s->out_min_lin;
  140. in_log = log(in_lin);
  141. for (i = 1; i < s->nb_segments; i++)
  142. if (in_log <= s->segments[i].x)
  143. break;
  144. cs = &s->segments[i - 1];
  145. in_log -= cs->x;
  146. out_log = cs->y + in_log * (cs->a * in_log + cs->b);
  147. return exp(out_log);
  148. }
  149. static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
  150. {
  151. CompandContext *s = ctx->priv;
  152. AVFilterLink *inlink = ctx->inputs[0];
  153. const int channels = inlink->channels;
  154. const int nb_samples = frame->nb_samples;
  155. AVFrame *out_frame;
  156. int chan, i;
  157. int err;
  158. if (av_frame_is_writable(frame)) {
  159. out_frame = frame;
  160. } else {
  161. out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
  162. if (!out_frame) {
  163. av_frame_free(&frame);
  164. return AVERROR(ENOMEM);
  165. }
  166. err = av_frame_copy_props(out_frame, frame);
  167. if (err < 0) {
  168. av_frame_free(&out_frame);
  169. av_frame_free(&frame);
  170. return err;
  171. }
  172. }
  173. for (chan = 0; chan < channels; chan++) {
  174. const double *src = (double *)frame->extended_data[chan];
  175. double *dst = (double *)out_frame->extended_data[chan];
  176. ChanParam *cp = &s->channels[chan];
  177. for (i = 0; i < nb_samples; i++) {
  178. update_volume(cp, fabs(src[i]));
  179. dst[i] = src[i] * get_volume(s, cp->volume);
  180. }
  181. }
  182. if (frame != out_frame)
  183. av_frame_free(&frame);
  184. return ff_filter_frame(ctx->outputs[0], out_frame);
  185. }
  186. #define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
  187. static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
  188. {
  189. CompandContext *s = ctx->priv;
  190. AVFilterLink *inlink = ctx->inputs[0];
  191. const int channels = inlink->channels;
  192. const int nb_samples = frame->nb_samples;
  193. int chan, i, av_uninit(dindex), oindex, av_uninit(count);
  194. AVFrame *out_frame = NULL;
  195. int err;
  196. if (s->pts == AV_NOPTS_VALUE) {
  197. s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
  198. }
  199. av_assert1(channels > 0); /* would corrupt delay_count and delay_index */
  200. for (chan = 0; chan < channels; chan++) {
  201. AVFrame *delay_frame = s->delay_frame;
  202. const double *src = (double *)frame->extended_data[chan];
  203. double *dbuf = (double *)delay_frame->extended_data[chan];
  204. ChanParam *cp = &s->channels[chan];
  205. double *dst;
  206. count = s->delay_count;
  207. dindex = s->delay_index;
  208. for (i = 0, oindex = 0; i < nb_samples; i++) {
  209. const double in = src[i];
  210. update_volume(cp, fabs(in));
  211. if (count >= s->delay_samples) {
  212. if (!out_frame) {
  213. out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples - i);
  214. if (!out_frame) {
  215. av_frame_free(&frame);
  216. return AVERROR(ENOMEM);
  217. }
  218. err = av_frame_copy_props(out_frame, frame);
  219. if (err < 0) {
  220. av_frame_free(&out_frame);
  221. av_frame_free(&frame);
  222. return err;
  223. }
  224. out_frame->pts = s->pts;
  225. s->pts += av_rescale_q(nb_samples - i,
  226. (AVRational){ 1, inlink->sample_rate },
  227. inlink->time_base);
  228. }
  229. dst = (double *)out_frame->extended_data[chan];
  230. dst[oindex++] = dbuf[dindex] * get_volume(s, cp->volume);
  231. } else {
  232. count++;
  233. }
  234. dbuf[dindex] = in;
  235. dindex = MOD(dindex + 1, s->delay_samples);
  236. }
  237. }
  238. s->delay_count = count;
  239. s->delay_index = dindex;
  240. av_frame_free(&frame);
  241. if (out_frame) {
  242. err = ff_filter_frame(ctx->outputs[0], out_frame);
  243. return err;
  244. }
  245. return 0;
  246. }
  247. static int compand_drain(AVFilterLink *outlink)
  248. {
  249. AVFilterContext *ctx = outlink->src;
  250. CompandContext *s = ctx->priv;
  251. const int channels = outlink->channels;
  252. AVFrame *frame = NULL;
  253. int chan, i, dindex;
  254. /* 2048 is to limit output frame size during drain */
  255. frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
  256. if (!frame)
  257. return AVERROR(ENOMEM);
  258. frame->pts = s->pts;
  259. s->pts += av_rescale_q(frame->nb_samples,
  260. (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
  261. av_assert0(channels > 0);
  262. for (chan = 0; chan < channels; chan++) {
  263. AVFrame *delay_frame = s->delay_frame;
  264. double *dbuf = (double *)delay_frame->extended_data[chan];
  265. double *dst = (double *)frame->extended_data[chan];
  266. ChanParam *cp = &s->channels[chan];
  267. dindex = s->delay_index;
  268. for (i = 0; i < frame->nb_samples; i++) {
  269. dst[i] = dbuf[dindex] * get_volume(s, cp->volume);
  270. dindex = MOD(dindex + 1, s->delay_samples);
  271. }
  272. }
  273. s->delay_count -= frame->nb_samples;
  274. s->delay_index = dindex;
  275. return ff_filter_frame(outlink, frame);
  276. }
  277. static int config_output(AVFilterLink *outlink)
  278. {
  279. AVFilterContext *ctx = outlink->src;
  280. CompandContext *s = ctx->priv;
  281. const int sample_rate = outlink->sample_rate;
  282. double radius = s->curve_dB * M_LN10 / 20.0;
  283. char *p, *saveptr = NULL;
  284. const int channels = outlink->channels;
  285. int nb_attacks, nb_decays, nb_points;
  286. int new_nb_items, num;
  287. int i;
  288. int err;
  289. count_items(s->attacks, &nb_attacks);
  290. count_items(s->decays, &nb_decays);
  291. count_items(s->points, &nb_points);
  292. if (channels <= 0) {
  293. av_log(ctx, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
  294. return AVERROR(EINVAL);
  295. }
  296. if (nb_attacks > channels || nb_decays > channels) {
  297. av_log(ctx, AV_LOG_ERROR,
  298. "Number of attacks/decays bigger than number of channels.\n");
  299. return AVERROR(EINVAL);
  300. }
  301. uninit(ctx);
  302. s->channels = av_mallocz_array(channels, sizeof(*s->channels));
  303. s->nb_segments = (nb_points + 4) * 2;
  304. s->segments = av_mallocz_array(s->nb_segments, sizeof(*s->segments));
  305. if (!s->channels || !s->segments) {
  306. uninit(ctx);
  307. return AVERROR(ENOMEM);
  308. }
  309. p = s->attacks;
  310. for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
  311. char *tstr = av_strtok(p, " |", &saveptr);
  312. if (!tstr) {
  313. uninit(ctx);
  314. return AVERROR(EINVAL);
  315. }
  316. p = NULL;
  317. new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
  318. if (s->channels[i].attack < 0) {
  319. uninit(ctx);
  320. return AVERROR(EINVAL);
  321. }
  322. }
  323. nb_attacks = new_nb_items;
  324. p = s->decays;
  325. for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
  326. char *tstr = av_strtok(p, " |", &saveptr);
  327. if (!tstr) {
  328. uninit(ctx);
  329. return AVERROR(EINVAL);
  330. }
  331. p = NULL;
  332. new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
  333. if (s->channels[i].decay < 0) {
  334. uninit(ctx);
  335. return AVERROR(EINVAL);
  336. }
  337. }
  338. nb_decays = new_nb_items;
  339. if (nb_attacks != nb_decays) {
  340. av_log(ctx, AV_LOG_ERROR,
  341. "Number of attacks %d differs from number of decays %d.\n",
  342. nb_attacks, nb_decays);
  343. uninit(ctx);
  344. return AVERROR(EINVAL);
  345. }
  346. for (i = nb_decays; i < channels; i++) {
  347. s->channels[i].attack = s->channels[nb_decays - 1].attack;
  348. s->channels[i].decay = s->channels[nb_decays - 1].decay;
  349. }
  350. #define S(x) s->segments[2 * ((x) + 1)]
  351. p = s->points;
  352. for (i = 0, new_nb_items = 0; i < nb_points; i++) {
  353. char *tstr = av_strtok(p, " |", &saveptr);
  354. p = NULL;
  355. if (!tstr || sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
  356. av_log(ctx, AV_LOG_ERROR,
  357. "Invalid and/or missing input/output value.\n");
  358. uninit(ctx);
  359. return AVERROR(EINVAL);
  360. }
  361. if (i && S(i - 1).x > S(i).x) {
  362. av_log(ctx, AV_LOG_ERROR,
  363. "Transfer function input values must be increasing.\n");
  364. uninit(ctx);
  365. return AVERROR(EINVAL);
  366. }
  367. S(i).y -= S(i).x;
  368. av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
  369. new_nb_items++;
  370. }
  371. num = new_nb_items;
  372. /* Add 0,0 if necessary */
  373. if (num == 0 || S(num - 1).x)
  374. num++;
  375. #undef S
  376. #define S(x) s->segments[2 * (x)]
  377. /* Add a tail off segment at the start */
  378. S(0).x = S(1).x - 2 * s->curve_dB;
  379. S(0).y = S(1).y;
  380. num++;
  381. /* Join adjacent colinear segments */
  382. for (i = 2; i < num; i++) {
  383. double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
  384. double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
  385. int j;
  386. if (fabs(g1 - g2))
  387. continue;
  388. num--;
  389. for (j = --i; j < num; j++)
  390. S(j) = S(j + 1);
  391. }
  392. for (i = 0; i < s->nb_segments; i += 2) {
  393. s->segments[i].y += s->gain_dB;
  394. s->segments[i].x *= M_LN10 / 20;
  395. s->segments[i].y *= M_LN10 / 20;
  396. }
  397. #define L(x) s->segments[i - (x)]
  398. for (i = 4; i < s->nb_segments; i += 2) {
  399. double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
  400. L(4).a = 0;
  401. L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
  402. L(2).a = 0;
  403. L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
  404. theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
  405. len = hypot(L(2).x - L(4).x, L(2).y - L(4).y);
  406. r = FFMIN(radius, len);
  407. L(3).x = L(2).x - r * cos(theta);
  408. L(3).y = L(2).y - r * sin(theta);
  409. theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
  410. len = hypot(L(0).x - L(2).x, L(0).y - L(2).y);
  411. r = FFMIN(radius, len / 2);
  412. x = L(2).x + r * cos(theta);
  413. y = L(2).y + r * sin(theta);
  414. cx = (L(3).x + L(2).x + x) / 3;
  415. cy = (L(3).y + L(2).y + y) / 3;
  416. L(2).x = x;
  417. L(2).y = y;
  418. in1 = cx - L(3).x;
  419. out1 = cy - L(3).y;
  420. in2 = L(2).x - L(3).x;
  421. out2 = L(2).y - L(3).y;
  422. L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
  423. L(3).b = out1 / in1 - L(3).a * in1;
  424. }
  425. L(3).x = 0;
  426. L(3).y = L(2).y;
  427. s->in_min_lin = exp(s->segments[1].x);
  428. s->out_min_lin = exp(s->segments[1].y);
  429. for (i = 0; i < channels; i++) {
  430. ChanParam *cp = &s->channels[i];
  431. if (cp->attack > 1.0 / sample_rate)
  432. cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
  433. else
  434. cp->attack = 1.0;
  435. if (cp->decay > 1.0 / sample_rate)
  436. cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
  437. else
  438. cp->decay = 1.0;
  439. cp->volume = ff_exp10(s->initial_volume / 20);
  440. }
  441. s->delay_samples = s->delay * sample_rate;
  442. if (s->delay_samples <= 0) {
  443. s->compand = compand_nodelay;
  444. return 0;
  445. }
  446. s->delay_frame = av_frame_alloc();
  447. if (!s->delay_frame) {
  448. uninit(ctx);
  449. return AVERROR(ENOMEM);
  450. }
  451. s->delay_frame->format = outlink->format;
  452. s->delay_frame->nb_samples = s->delay_samples;
  453. s->delay_frame->channel_layout = outlink->channel_layout;
  454. err = av_frame_get_buffer(s->delay_frame, 32);
  455. if (err)
  456. return err;
  457. s->compand = compand_delay;
  458. return 0;
  459. }
  460. static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
  461. {
  462. AVFilterContext *ctx = inlink->dst;
  463. CompandContext *s = ctx->priv;
  464. return s->compand(ctx, frame);
  465. }
  466. static int request_frame(AVFilterLink *outlink)
  467. {
  468. AVFilterContext *ctx = outlink->src;
  469. CompandContext *s = ctx->priv;
  470. int ret = 0;
  471. ret = ff_request_frame(ctx->inputs[0]);
  472. if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count)
  473. ret = compand_drain(outlink);
  474. return ret;
  475. }
  476. static const AVFilterPad compand_inputs[] = {
  477. {
  478. .name = "default",
  479. .type = AVMEDIA_TYPE_AUDIO,
  480. .filter_frame = filter_frame,
  481. },
  482. { NULL }
  483. };
  484. static const AVFilterPad compand_outputs[] = {
  485. {
  486. .name = "default",
  487. .request_frame = request_frame,
  488. .config_props = config_output,
  489. .type = AVMEDIA_TYPE_AUDIO,
  490. },
  491. { NULL }
  492. };
  493. AVFilter ff_af_compand = {
  494. .name = "compand",
  495. .description = NULL_IF_CONFIG_SMALL(
  496. "Compress or expand audio dynamic range."),
  497. .query_formats = query_formats,
  498. .priv_size = sizeof(CompandContext),
  499. .priv_class = &compand_class,
  500. .init = init,
  501. .uninit = uninit,
  502. .inputs = compand_inputs,
  503. .outputs = compand_outputs,
  504. };