vf_lumakey.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. /*
  2. * Copyright (c) 2017 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/opt.h"
  21. #include "libavutil/imgutils.h"
  22. #include "avfilter.h"
  23. #include "formats.h"
  24. #include "internal.h"
  25. #include "video.h"
  26. typedef struct LumakeyContext {
  27. const AVClass *class;
  28. int threshold;
  29. int tolerance;
  30. int softness;
  31. int white;
  32. int black;
  33. int max;
  34. int (*do_lumakey_slice)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
  35. } LumakeyContext;
  36. static int do_lumakey_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  37. {
  38. LumakeyContext *s = ctx->priv;
  39. AVFrame *frame = arg;
  40. const int slice_start = (frame->height * jobnr) / nb_jobs;
  41. const int slice_end = (frame->height * (jobnr + 1)) / nb_jobs;
  42. uint8_t *alpha = frame->data[3] + slice_start * frame->linesize[3];
  43. const uint8_t *luma = frame->data[0] + slice_start * frame->linesize[0];
  44. const int so = s->softness;
  45. const int w = s->white;
  46. const int b = s->black;
  47. int x, y;
  48. for (y = slice_start; y < slice_end; y++) {
  49. for (x = 0; x < frame->width; x++) {
  50. if (luma[x] >= b && luma[x] <= w) {
  51. alpha[x] = 0;
  52. } else if (luma[x] > b - so && luma[x] < w + so) {
  53. if (luma[x] < b) {
  54. alpha[x] = 255 - (luma[x] - b + so) * 255 / so;
  55. } else {
  56. alpha[x] = (luma[x] - w) * 255 / so;
  57. }
  58. }
  59. }
  60. luma += frame->linesize[0];
  61. alpha += frame->linesize[3];
  62. }
  63. return 0;
  64. }
  65. static int do_lumakey_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  66. {
  67. LumakeyContext *s = ctx->priv;
  68. AVFrame *frame = arg;
  69. const int slice_start = (frame->height * jobnr) / nb_jobs;
  70. const int slice_end = (frame->height * (jobnr + 1)) / nb_jobs;
  71. uint16_t *alpha = (uint16_t *)(frame->data[3] + slice_start * frame->linesize[3]);
  72. const uint16_t *luma = (const uint16_t *)(frame->data[0] + slice_start * frame->linesize[0]);
  73. const int so = s->softness;
  74. const int w = s->white;
  75. const int b = s->black;
  76. const int m = s->max;
  77. int x, y;
  78. for (y = slice_start; y < slice_end; y++) {
  79. for (x = 0; x < frame->width; x++) {
  80. if (luma[x] >= b && luma[x] <= w) {
  81. alpha[x] = 0;
  82. } else if (luma[x] > b - so && luma[x] < w + so) {
  83. if (luma[x] < b) {
  84. alpha[x] = m - (luma[x] - b + so) * m / so;
  85. } else {
  86. alpha[x] = (luma[x] - w) * m / so;
  87. }
  88. }
  89. }
  90. luma += frame->linesize[0] / 2;
  91. alpha += frame->linesize[3] / 2;
  92. }
  93. return 0;
  94. }
  95. static int config_input(AVFilterLink *inlink)
  96. {
  97. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  98. AVFilterContext *ctx = inlink->dst;
  99. LumakeyContext *s = ctx->priv;
  100. int depth;
  101. depth = desc->comp[0].depth;
  102. if (depth == 8) {
  103. s->white = av_clip_uint8(s->threshold + s->tolerance);
  104. s->black = av_clip_uint8(s->threshold - s->tolerance);
  105. s->do_lumakey_slice = do_lumakey_slice8;
  106. } else {
  107. s->max = (1 << depth) - 1;
  108. s->white = av_clip(s->threshold + s->tolerance, 0, s->max);
  109. s->black = av_clip(s->threshold - s->tolerance, 0, s->max);
  110. s->do_lumakey_slice = do_lumakey_slice16;
  111. }
  112. return 0;
  113. }
  114. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  115. {
  116. AVFilterContext *ctx = link->dst;
  117. LumakeyContext *s = ctx->priv;
  118. int ret;
  119. if (ret = av_frame_make_writable(frame))
  120. return ret;
  121. if (ret = ctx->internal->execute(ctx, s->do_lumakey_slice, frame, NULL, FFMIN(frame->height, ff_filter_get_nb_threads(ctx))))
  122. return ret;
  123. return ff_filter_frame(ctx->outputs[0], frame);
  124. }
  125. static av_cold int query_formats(AVFilterContext *ctx)
  126. {
  127. static const enum AVPixelFormat pixel_fmts[] = {
  128. AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
  129. AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA420P9,
  130. AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA420P10,
  131. AV_PIX_FMT_YUVA444P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA420P16,
  132. AV_PIX_FMT_NONE
  133. };
  134. AVFilterFormats *formats;
  135. formats = ff_make_format_list(pixel_fmts);
  136. if (!formats)
  137. return AVERROR(ENOMEM);
  138. return ff_set_common_formats(ctx, formats);
  139. }
  140. static const AVFilterPad lumakey_inputs[] = {
  141. {
  142. .name = "default",
  143. .type = AVMEDIA_TYPE_VIDEO,
  144. .filter_frame = filter_frame,
  145. .config_props = config_input,
  146. },
  147. { NULL }
  148. };
  149. static const AVFilterPad lumakey_outputs[] = {
  150. {
  151. .name = "default",
  152. .type = AVMEDIA_TYPE_VIDEO,
  153. },
  154. { NULL }
  155. };
  156. #define OFFSET(x) offsetof(LumakeyContext, x)
  157. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  158. static const AVOption lumakey_options[] = {
  159. { "threshold", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, FLAGS },
  160. { "tolerance", "set the tolerance value", OFFSET(tolerance), AV_OPT_TYPE_INT, {.i64=1}, 0, UINT16_MAX, FLAGS },
  161. { "softness", "set the softness value", OFFSET(softness), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, FLAGS },
  162. { NULL }
  163. };
  164. AVFILTER_DEFINE_CLASS(lumakey);
  165. AVFilter ff_vf_lumakey = {
  166. .name = "lumakey",
  167. .description = NULL_IF_CONFIG_SMALL("Turns a certain luma into transparency."),
  168. .priv_size = sizeof(LumakeyContext),
  169. .priv_class = &lumakey_class,
  170. .query_formats = query_formats,
  171. .inputs = lumakey_inputs,
  172. .outputs = lumakey_outputs,
  173. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
  174. };