vf_smartblur.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. /*
  2. * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
  3. * Copyright (c) 2012 Jeremy Tran
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along
  18. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  20. */
  21. /**
  22. * @file
  23. * Apply a smartblur filter to the input video
  24. * Ported from MPlayer libmpcodecs/vf_smartblur.c by Michael Niedermayer.
  25. */
  26. #include "libavutil/opt.h"
  27. #include "libavutil/pixdesc.h"
  28. #include "libswscale/swscale.h"
  29. #include "avfilter.h"
  30. #include "formats.h"
  31. #include "internal.h"
  32. #define RADIUS_MIN 0.1
  33. #define RADIUS_MAX 5.0
  34. #define STRENGTH_MIN -1.0
  35. #define STRENGTH_MAX 1.0
  36. #define THRESHOLD_MIN -30
  37. #define THRESHOLD_MAX 30
  38. typedef struct FilterParam {
  39. float radius;
  40. float strength;
  41. int threshold;
  42. float quality;
  43. struct SwsContext *filter_context;
  44. } FilterParam;
  45. typedef struct SmartblurContext {
  46. const AVClass *class;
  47. FilterParam luma;
  48. FilterParam chroma;
  49. int hsub;
  50. int vsub;
  51. unsigned int sws_flags;
  52. } SmartblurContext;
  53. #define OFFSET(x) offsetof(SmartblurContext, x)
  54. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  55. static const AVOption smartblur_options[] = {
  56. { "luma_radius", "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
  57. { "lr" , "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
  58. { "luma_strength", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
  59. { "ls", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
  60. { "luma_threshold", "set luma threshold", OFFSET(luma.threshold), AV_OPT_TYPE_INT, {.i64=0}, THRESHOLD_MIN, THRESHOLD_MAX, .flags=FLAGS },
  61. { "lt", "set luma threshold", OFFSET(luma.threshold), AV_OPT_TYPE_INT, {.i64=0}, THRESHOLD_MIN, THRESHOLD_MAX, .flags=FLAGS },
  62. { "chroma_radius", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
  63. { "cr", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
  64. { "chroma_strength", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
  65. { "cs", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
  66. { "chroma_threshold", "set chroma threshold", OFFSET(chroma.threshold), AV_OPT_TYPE_INT, {.i64=THRESHOLD_MIN-1}, THRESHOLD_MIN-1, THRESHOLD_MAX, .flags=FLAGS },
  67. { "ct", "set chroma threshold", OFFSET(chroma.threshold), AV_OPT_TYPE_INT, {.i64=THRESHOLD_MIN-1}, THRESHOLD_MIN-1, THRESHOLD_MAX, .flags=FLAGS },
  68. { NULL }
  69. };
  70. AVFILTER_DEFINE_CLASS(smartblur);
  71. static av_cold int init(AVFilterContext *ctx)
  72. {
  73. SmartblurContext *s = ctx->priv;
  74. /* make chroma default to luma values, if not explicitly set */
  75. if (s->chroma.radius < RADIUS_MIN)
  76. s->chroma.radius = s->luma.radius;
  77. if (s->chroma.strength < STRENGTH_MIN)
  78. s->chroma.strength = s->luma.strength;
  79. if (s->chroma.threshold < THRESHOLD_MIN)
  80. s->chroma.threshold = s->luma.threshold;
  81. s->luma.quality = s->chroma.quality = 3.0;
  82. s->sws_flags = SWS_BICUBIC;
  83. av_log(ctx, AV_LOG_VERBOSE,
  84. "luma_radius:%f luma_strength:%f luma_threshold:%d "
  85. "chroma_radius:%f chroma_strength:%f chroma_threshold:%d\n",
  86. s->luma.radius, s->luma.strength, s->luma.threshold,
  87. s->chroma.radius, s->chroma.strength, s->chroma.threshold);
  88. return 0;
  89. }
  90. static av_cold void uninit(AVFilterContext *ctx)
  91. {
  92. SmartblurContext *s = ctx->priv;
  93. sws_freeContext(s->luma.filter_context);
  94. sws_freeContext(s->chroma.filter_context);
  95. }
  96. static int query_formats(AVFilterContext *ctx)
  97. {
  98. static const enum AVPixelFormat pix_fmts[] = {
  99. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
  100. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
  101. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
  102. AV_PIX_FMT_GRAY8,
  103. AV_PIX_FMT_NONE
  104. };
  105. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  106. if (!fmts_list)
  107. return AVERROR(ENOMEM);
  108. return ff_set_common_formats(ctx, fmts_list);
  109. }
  110. static int alloc_sws_context(FilterParam *f, int width, int height, unsigned int flags)
  111. {
  112. SwsVector *vec;
  113. SwsFilter sws_filter;
  114. vec = sws_getGaussianVec(f->radius, f->quality);
  115. if (!vec)
  116. return AVERROR(EINVAL);
  117. sws_scaleVec(vec, f->strength);
  118. vec->coeff[vec->length / 2] += 1.0 - f->strength;
  119. sws_filter.lumH = sws_filter.lumV = vec;
  120. sws_filter.chrH = sws_filter.chrV = NULL;
  121. f->filter_context = sws_getCachedContext(NULL,
  122. width, height, AV_PIX_FMT_GRAY8,
  123. width, height, AV_PIX_FMT_GRAY8,
  124. flags, &sws_filter, NULL, NULL);
  125. sws_freeVec(vec);
  126. if (!f->filter_context)
  127. return AVERROR(EINVAL);
  128. return 0;
  129. }
  130. static int config_props(AVFilterLink *inlink)
  131. {
  132. SmartblurContext *s = inlink->dst->priv;
  133. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  134. s->hsub = desc->log2_chroma_w;
  135. s->vsub = desc->log2_chroma_h;
  136. alloc_sws_context(&s->luma, inlink->w, inlink->h, s->sws_flags);
  137. alloc_sws_context(&s->chroma,
  138. AV_CEIL_RSHIFT(inlink->w, s->hsub),
  139. AV_CEIL_RSHIFT(inlink->h, s->vsub),
  140. s->sws_flags);
  141. return 0;
  142. }
  143. static void blur(uint8_t *dst, const int dst_linesize,
  144. const uint8_t *src, const int src_linesize,
  145. const int w, const int h, const int threshold,
  146. struct SwsContext *filter_context)
  147. {
  148. int x, y;
  149. int orig, filtered;
  150. int diff;
  151. /* Declare arrays of 4 to get aligned data */
  152. const uint8_t* const src_array[4] = {src};
  153. uint8_t *dst_array[4] = {dst};
  154. int src_linesize_array[4] = {src_linesize};
  155. int dst_linesize_array[4] = {dst_linesize};
  156. sws_scale(filter_context, src_array, src_linesize_array,
  157. 0, h, dst_array, dst_linesize_array);
  158. if (threshold > 0) {
  159. for (y = 0; y < h; ++y) {
  160. for (x = 0; x < w; ++x) {
  161. orig = src[x + y * src_linesize];
  162. filtered = dst[x + y * dst_linesize];
  163. diff = orig - filtered;
  164. if (diff > 0) {
  165. if (diff > 2 * threshold)
  166. dst[x + y * dst_linesize] = orig;
  167. else if (diff > threshold)
  168. /* add 'diff' and subtract 'threshold' from 'filtered' */
  169. dst[x + y * dst_linesize] = orig - threshold;
  170. } else {
  171. if (-diff > 2 * threshold)
  172. dst[x + y * dst_linesize] = orig;
  173. else if (-diff > threshold)
  174. /* add 'diff' and 'threshold' to 'filtered' */
  175. dst[x + y * dst_linesize] = orig + threshold;
  176. }
  177. }
  178. }
  179. } else if (threshold < 0) {
  180. for (y = 0; y < h; ++y) {
  181. for (x = 0; x < w; ++x) {
  182. orig = src[x + y * src_linesize];
  183. filtered = dst[x + y * dst_linesize];
  184. diff = orig - filtered;
  185. if (diff > 0) {
  186. if (diff <= -threshold)
  187. dst[x + y * dst_linesize] = orig;
  188. else if (diff <= -2 * threshold)
  189. /* subtract 'diff' and 'threshold' from 'orig' */
  190. dst[x + y * dst_linesize] = filtered - threshold;
  191. } else {
  192. if (diff >= threshold)
  193. dst[x + y * dst_linesize] = orig;
  194. else if (diff >= 2 * threshold)
  195. /* add 'threshold' and subtract 'diff' from 'orig' */
  196. dst[x + y * dst_linesize] = filtered + threshold;
  197. }
  198. }
  199. }
  200. }
  201. }
  202. static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
  203. {
  204. SmartblurContext *s = inlink->dst->priv;
  205. AVFilterLink *outlink = inlink->dst->outputs[0];
  206. AVFrame *outpic;
  207. int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub);
  208. int ch = AV_CEIL_RSHIFT(inlink->h, s->vsub);
  209. outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  210. if (!outpic) {
  211. av_frame_free(&inpic);
  212. return AVERROR(ENOMEM);
  213. }
  214. av_frame_copy_props(outpic, inpic);
  215. blur(outpic->data[0], outpic->linesize[0],
  216. inpic->data[0], inpic->linesize[0],
  217. inlink->w, inlink->h, s->luma.threshold,
  218. s->luma.filter_context);
  219. if (inpic->data[2]) {
  220. blur(outpic->data[1], outpic->linesize[1],
  221. inpic->data[1], inpic->linesize[1],
  222. cw, ch, s->chroma.threshold,
  223. s->chroma.filter_context);
  224. blur(outpic->data[2], outpic->linesize[2],
  225. inpic->data[2], inpic->linesize[2],
  226. cw, ch, s->chroma.threshold,
  227. s->chroma.filter_context);
  228. }
  229. av_frame_free(&inpic);
  230. return ff_filter_frame(outlink, outpic);
  231. }
  232. static const AVFilterPad smartblur_inputs[] = {
  233. {
  234. .name = "default",
  235. .type = AVMEDIA_TYPE_VIDEO,
  236. .filter_frame = filter_frame,
  237. .config_props = config_props,
  238. },
  239. { NULL }
  240. };
  241. static const AVFilterPad smartblur_outputs[] = {
  242. {
  243. .name = "default",
  244. .type = AVMEDIA_TYPE_VIDEO,
  245. },
  246. { NULL }
  247. };
  248. AVFilter ff_vf_smartblur = {
  249. .name = "smartblur",
  250. .description = NULL_IF_CONFIG_SMALL("Blur the input video without impacting the outlines."),
  251. .priv_size = sizeof(SmartblurContext),
  252. .init = init,
  253. .uninit = uninit,
  254. .query_formats = query_formats,
  255. .inputs = smartblur_inputs,
  256. .outputs = smartblur_outputs,
  257. .priv_class = &smartblur_class,
  258. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
  259. };