vf_lut2.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654
  1. /*
  2. * Copyright (c) 2016 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/attributes.h"
  21. #include "libavutil/common.h"
  22. #include "libavutil/eval.h"
  23. #include "libavutil/opt.h"
  24. #include "libavutil/pixdesc.h"
  25. #include "avfilter.h"
  26. #include "drawutils.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #include "framesync.h"
  31. static const char *const var_names[] = {
  32. "w", ///< width of the input video
  33. "h", ///< height of the input video
  34. "x", ///< input value for the pixel from input #1
  35. "y", ///< input value for the pixel from input #2
  36. "bdx", ///< input #1 video bitdepth
  37. "bdy", ///< input #2 video bitdepth
  38. NULL
  39. };
  40. enum var_name {
  41. VAR_W,
  42. VAR_H,
  43. VAR_X,
  44. VAR_Y,
  45. VAR_BITDEPTHX,
  46. VAR_BITDEPTHY,
  47. VAR_VARS_NB
  48. };
  49. typedef struct LUT2Context {
  50. const AVClass *class;
  51. FFFrameSync fs;
  52. int odepth;
  53. char *comp_expr_str[4];
  54. AVExpr *comp_expr[4];
  55. double var_values[VAR_VARS_NB];
  56. uint16_t *lut[4]; ///< lookup table for each component
  57. int width[4], height[4];
  58. int widthx[4], heightx[4];
  59. int widthy[4], heighty[4];
  60. int nb_planesx;
  61. int nb_planesy;
  62. int nb_planes;
  63. int depth, depthx, depthy;
  64. int tlut2;
  65. AVFrame *prev_frame; /* only used with tlut2 */
  66. int (*lut2)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
  67. } LUT2Context;
  68. typedef struct ThreadData {
  69. AVFrame *out, *srcx, *srcy;
  70. } ThreadData;
  71. #define OFFSET(x) offsetof(LUT2Context, x)
  72. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  73. static const AVOption options[] = {
  74. { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
  75. { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
  76. { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
  77. { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
  78. { "d", "set output depth", OFFSET(odepth), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 16, .flags = FLAGS },
  79. { NULL }
  80. };
  81. static av_cold void uninit(AVFilterContext *ctx)
  82. {
  83. LUT2Context *s = ctx->priv;
  84. int i;
  85. ff_framesync_uninit(&s->fs);
  86. av_frame_free(&s->prev_frame);
  87. for (i = 0; i < 4; i++) {
  88. av_expr_free(s->comp_expr[i]);
  89. s->comp_expr[i] = NULL;
  90. av_freep(&s->comp_expr_str[i]);
  91. av_freep(&s->lut[i]);
  92. }
  93. }
  94. #define BIT8_FMTS \
  95. AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, \
  96. AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, \
  97. AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P, \
  98. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
  99. AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, \
  100. AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
  101. #define BIT9_FMTS \
  102. AV_PIX_FMT_GBRP9, AV_PIX_FMT_GRAY9, \
  103. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, \
  104. AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
  105. #define BIT10_FMTS \
  106. AV_PIX_FMT_GRAY10, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRAP10, \
  107. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, \
  108. AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
  109. #define BIT12_FMTS \
  110. AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12, \
  111. AV_PIX_FMT_GRAY12, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRP12,
  112. #define BIT14_FMTS \
  113. AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14, \
  114. AV_PIX_FMT_GRAY12, AV_PIX_FMT_GBRP14,
  115. #define BIT16_FMTS \
  116. AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, \
  117. AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16, \
  118. AV_PIX_FMT_GBRP16, AV_PIX_FMT_GBRAP16, AV_PIX_FMT_GRAY16,
  119. static int query_formats(AVFilterContext *ctx)
  120. {
  121. LUT2Context *s = ctx->priv;
  122. static const enum AVPixelFormat all_pix_fmts[] = {
  123. BIT8_FMTS
  124. BIT9_FMTS
  125. BIT10_FMTS
  126. BIT12_FMTS
  127. AV_PIX_FMT_NONE
  128. };
  129. static const enum AVPixelFormat bit8_pix_fmts[] = {
  130. BIT8_FMTS
  131. AV_PIX_FMT_NONE
  132. };
  133. static const enum AVPixelFormat bit9_pix_fmts[] = {
  134. BIT9_FMTS
  135. AV_PIX_FMT_NONE
  136. };
  137. static const enum AVPixelFormat bit10_pix_fmts[] = {
  138. BIT10_FMTS
  139. AV_PIX_FMT_NONE
  140. };
  141. static const enum AVPixelFormat bit12_pix_fmts[] = {
  142. BIT12_FMTS
  143. AV_PIX_FMT_NONE
  144. };
  145. static const enum AVPixelFormat bit14_pix_fmts[] = {
  146. BIT14_FMTS
  147. AV_PIX_FMT_NONE
  148. };
  149. static const enum AVPixelFormat bit16_pix_fmts[] = {
  150. BIT16_FMTS
  151. AV_PIX_FMT_NONE
  152. };
  153. const enum AVPixelFormat *pix_fmts;
  154. int ret;
  155. if (s->tlut2 || !s->odepth)
  156. return ff_set_common_formats(ctx, ff_make_format_list(all_pix_fmts));
  157. ret = ff_formats_ref(ff_make_format_list(all_pix_fmts), &ctx->inputs[0]->out_formats);
  158. if (ret < 0)
  159. return ret;
  160. switch (s->odepth) {
  161. case 8: pix_fmts = bit8_pix_fmts; break;
  162. case 9: pix_fmts = bit9_pix_fmts; break;
  163. case 10: pix_fmts = bit10_pix_fmts; break;
  164. case 12: pix_fmts = bit12_pix_fmts; break;
  165. case 14: pix_fmts = bit14_pix_fmts; break;
  166. case 16: pix_fmts = bit16_pix_fmts; break;
  167. default: av_log(ctx, AV_LOG_ERROR, "Unsupported output bit depth %d.\n", s->odepth);
  168. return AVERROR(EINVAL);
  169. }
  170. return ff_formats_ref(ff_make_format_list(pix_fmts), &ctx->outputs[0]->in_formats);
  171. }
  172. static int config_inputx(AVFilterLink *inlink)
  173. {
  174. AVFilterContext *ctx = inlink->dst;
  175. LUT2Context *s = ctx->priv;
  176. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  177. int hsub = desc->log2_chroma_w;
  178. int vsub = desc->log2_chroma_h;
  179. s->nb_planesx = av_pix_fmt_count_planes(inlink->format);
  180. s->heightx[1] = s->heightx[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
  181. s->heightx[0] = s->heightx[3] = inlink->h;
  182. s->widthx[1] = s->widthx[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
  183. s->widthx[0] = s->widthx[3] = inlink->w;
  184. s->var_values[VAR_W] = inlink->w;
  185. s->var_values[VAR_H] = inlink->h;
  186. s->depthx = desc->comp[0].depth;
  187. s->var_values[VAR_BITDEPTHX] = s->depthx;
  188. if (s->tlut2) {
  189. s->depthy = desc->comp[0].depth;
  190. s->var_values[VAR_BITDEPTHY] = s->depthy;
  191. }
  192. return 0;
  193. }
  194. static int config_inputy(AVFilterLink *inlink)
  195. {
  196. AVFilterContext *ctx = inlink->dst;
  197. LUT2Context *s = ctx->priv;
  198. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  199. int hsub = desc->log2_chroma_w;
  200. int vsub = desc->log2_chroma_h;
  201. s->nb_planesy = av_pix_fmt_count_planes(inlink->format);
  202. s->depthy = desc->comp[0].depth;
  203. s->var_values[VAR_BITDEPTHY] = s->depthy;
  204. s->heighty[1] = s->heighty[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
  205. s->heighty[0] = s->heighty[3] = inlink->h;
  206. s->widthy[1] = s->widthy[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
  207. s->widthy[0] = s->widthy[3] = inlink->w;
  208. return 0;
  209. }
  210. #define DEFINE_LUT2(zname, xname, yname, ztype, xtype, ytype, zdiv, xdiv, ydiv) \
  211. static int lut2_##zname##_##xname##_##yname(AVFilterContext *ctx, \
  212. void *arg, \
  213. int jobnr, int nb_jobs) \
  214. { \
  215. LUT2Context *s = ctx->priv; \
  216. ThreadData *td = arg; \
  217. AVFrame *out = td->out; \
  218. AVFrame *srcx = td->srcx; \
  219. AVFrame *srcy = td->srcy; \
  220. const int odepth = s->odepth; \
  221. int p, y, x; \
  222. \
  223. for (p = 0; p < s->nb_planes; p++) { \
  224. const int slice_start = (s->heightx[p] * jobnr) / nb_jobs; \
  225. const int slice_end = (s->heightx[p] * (jobnr+1)) / nb_jobs; \
  226. const uint16_t *lut = s->lut[p]; \
  227. const xtype *srcxx; \
  228. const ytype *srcyy; \
  229. ztype *dst; \
  230. \
  231. dst = (ztype *)(out->data[p] + slice_start * out->linesize[p]); \
  232. srcxx = (const xtype *)(srcx->data[p] + slice_start * srcx->linesize[p]);\
  233. srcyy = (const ytype *)(srcy->data[p] + slice_start * srcy->linesize[p]);\
  234. \
  235. for (y = slice_start; y < slice_end; y++) { \
  236. for (x = 0; x < s->widthx[p]; x++) { \
  237. dst[x] = av_clip_uintp2_c(lut[(srcyy[x] << s->depthx) | srcxx[x]], odepth); \
  238. } \
  239. \
  240. dst += out->linesize[p] / zdiv; \
  241. srcxx += srcx->linesize[p] / xdiv; \
  242. srcyy += srcy->linesize[p] / ydiv; \
  243. } \
  244. } \
  245. return 0; \
  246. }
  247. DEFINE_LUT2(8, 8, 8, uint8_t, uint8_t, uint8_t, 1, 1, 1)
  248. DEFINE_LUT2(8, 8, 16, uint8_t, uint8_t, uint16_t, 1, 1, 2)
  249. DEFINE_LUT2(8, 16, 8, uint8_t, uint16_t, uint8_t, 1, 2, 1)
  250. DEFINE_LUT2(8, 16, 16, uint8_t, uint16_t, uint16_t, 1, 2, 2)
  251. DEFINE_LUT2(16, 8, 8, uint16_t, uint8_t, uint8_t, 2, 1, 1)
  252. DEFINE_LUT2(16, 8, 16, uint16_t, uint8_t, uint16_t, 2, 1, 2)
  253. DEFINE_LUT2(16, 16, 8, uint16_t, uint16_t, uint8_t, 2, 2, 1)
  254. DEFINE_LUT2(16, 16, 16, uint16_t, uint16_t, uint16_t, 2, 2, 2)
  255. static int process_frame(FFFrameSync *fs)
  256. {
  257. AVFilterContext *ctx = fs->parent;
  258. LUT2Context *s = fs->opaque;
  259. AVFilterLink *outlink = ctx->outputs[0];
  260. AVFrame *out, *srcx = NULL, *srcy = NULL;
  261. int ret;
  262. if ((ret = ff_framesync_get_frame(&s->fs, 0, &srcx, 0)) < 0 ||
  263. (ret = ff_framesync_get_frame(&s->fs, 1, &srcy, 0)) < 0)
  264. return ret;
  265. if (ctx->is_disabled || !srcy) {
  266. out = av_frame_clone(srcx);
  267. if (!out)
  268. return AVERROR(ENOMEM);
  269. } else {
  270. ThreadData td;
  271. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  272. if (!out)
  273. return AVERROR(ENOMEM);
  274. av_frame_copy_props(out, srcx);
  275. td.out = out;
  276. td.srcx = srcx;
  277. td.srcy = srcy;
  278. ctx->internal->execute(ctx, s->lut2, &td, NULL, FFMIN(s->heightx[1], ff_filter_get_nb_threads(ctx)));
  279. }
  280. out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
  281. return ff_filter_frame(outlink, out);
  282. }
  283. static int config_output(AVFilterLink *outlink)
  284. {
  285. AVFilterContext *ctx = outlink->src;
  286. LUT2Context *s = ctx->priv;
  287. int p, ret;
  288. s->depth = s->depthx + s->depthy;
  289. s->nb_planes = s->nb_planesx;
  290. s->lut2 = s->depth > 16 ? lut2_16_16_16 : lut2_8_8_8;
  291. if (s->odepth) {
  292. if (s->depthx == 8 && s->depthy == 8 && s->odepth > 8)
  293. s->lut2 = lut2_16_8_8;
  294. if (s->depthx > 8 && s->depthy == 8 && s->odepth > 8)
  295. s->lut2 = lut2_16_16_8;
  296. if (s->depthx == 8 && s->depthy > 8 && s->odepth > 8)
  297. s->lut2 = lut2_16_8_16;
  298. if (s->depthx == 8 && s->depthy == 8 && s->odepth == 8)
  299. s->lut2 = lut2_8_8_8;
  300. if (s->depthx > 8 && s->depthy == 8 && s->odepth == 8)
  301. s->lut2 = lut2_8_16_8;
  302. if (s->depthx == 8 && s->depthy > 8 && s->odepth == 8)
  303. s->lut2 = lut2_8_8_16;
  304. if (s->depthx > 8 && s->depthy > 8 && s->odepth == 8)
  305. s->lut2 = lut2_8_16_16;
  306. } else {
  307. s->odepth = s->depthx;
  308. }
  309. for (p = 0; p < s->nb_planes; p++) {
  310. s->lut[p] = av_malloc_array(1 << s->depth, sizeof(uint16_t));
  311. if (!s->lut[p])
  312. return AVERROR(ENOMEM);
  313. }
  314. for (p = 0; p < s->nb_planes; p++) {
  315. double res;
  316. int x, y;
  317. /* create the parsed expression */
  318. av_expr_free(s->comp_expr[p]);
  319. s->comp_expr[p] = NULL;
  320. ret = av_expr_parse(&s->comp_expr[p], s->comp_expr_str[p],
  321. var_names, NULL, NULL, NULL, NULL, 0, ctx);
  322. if (ret < 0) {
  323. av_log(ctx, AV_LOG_ERROR,
  324. "Error when parsing the expression '%s' for the component %d.\n",
  325. s->comp_expr_str[p], p);
  326. return AVERROR(EINVAL);
  327. }
  328. /* compute the lut */
  329. for (y = 0; y < (1 << s->depthy); y++) {
  330. s->var_values[VAR_Y] = y;
  331. for (x = 0; x < (1 << s->depthx); x++) {
  332. s->var_values[VAR_X] = x;
  333. res = av_expr_eval(s->comp_expr[p], s->var_values, s);
  334. if (isnan(res)) {
  335. av_log(ctx, AV_LOG_ERROR,
  336. "Error when evaluating the expression '%s' for the values %d and %d for the component %d.\n",
  337. s->comp_expr_str[p], x, y, p);
  338. return AVERROR(EINVAL);
  339. }
  340. s->lut[p][(y << s->depthx) + x] = res;
  341. }
  342. }
  343. }
  344. return 0;
  345. }
  346. static int lut2_config_output(AVFilterLink *outlink)
  347. {
  348. AVFilterContext *ctx = outlink->src;
  349. LUT2Context *s = ctx->priv;
  350. AVFilterLink *srcx = ctx->inputs[0];
  351. AVFilterLink *srcy = ctx->inputs[1];
  352. FFFrameSyncIn *in;
  353. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
  354. int hsub = desc->log2_chroma_w;
  355. int vsub = desc->log2_chroma_h;
  356. int ret;
  357. outlink->w = srcx->w;
  358. outlink->h = srcx->h;
  359. outlink->time_base = srcx->time_base;
  360. outlink->sample_aspect_ratio = srcx->sample_aspect_ratio;
  361. outlink->frame_rate = srcx->frame_rate;
  362. s->nb_planes = av_pix_fmt_count_planes(outlink->format);
  363. s->height[1] = s->height[2] = AV_CEIL_RSHIFT(outlink->h, vsub);
  364. s->height[0] = s->height[3] = outlink->h;
  365. s->width[1] = s->width[2] = AV_CEIL_RSHIFT(outlink->w, hsub);
  366. s->width[0] = s->width[3] = outlink->w;
  367. if (!s->odepth && srcx->format != srcy->format) {
  368. av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
  369. return AVERROR(EINVAL);
  370. }
  371. if (srcx->w != srcy->w || srcx->h != srcy->h) {
  372. av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
  373. "(size %dx%d) do not match the corresponding "
  374. "second input link %s parameters (size %dx%d)\n",
  375. ctx->input_pads[0].name, srcx->w, srcx->h,
  376. ctx->input_pads[1].name,
  377. srcy->w, srcy->h);
  378. return AVERROR(EINVAL);
  379. }
  380. if (s->nb_planesx != s->nb_planesy) {
  381. av_log(ctx, AV_LOG_ERROR, "First input link %s number of planes "
  382. "(%d) do not match the corresponding "
  383. "second input link %s number of planes (%d)\n",
  384. ctx->input_pads[0].name, s->nb_planesx,
  385. ctx->input_pads[1].name, s->nb_planesy);
  386. return AVERROR(EINVAL);
  387. }
  388. if (s->nb_planesx != s->nb_planes) {
  389. av_log(ctx, AV_LOG_ERROR, "First input link %s number of planes "
  390. "(%d) do not match the corresponding "
  391. "output link %s number of planes (%d)\n",
  392. ctx->input_pads[0].name, s->nb_planesx,
  393. ctx->output_pads[0].name, s->nb_planes);
  394. return AVERROR(EINVAL);
  395. }
  396. if (s->widthx[1] != s->widthy[1] || s->heightx[1] != s->heighty[1]) {
  397. av_log(ctx, AV_LOG_ERROR, "First input link %s 2nd plane "
  398. "(size %dx%d) do not match the corresponding "
  399. "second input link %s 2nd plane (size %dx%d)\n",
  400. ctx->input_pads[0].name, s->widthx[1], s->heightx[1],
  401. ctx->input_pads[1].name,
  402. s->widthy[1], s->heighty[1]);
  403. return AVERROR(EINVAL);
  404. }
  405. if (s->widthx[2] != s->widthy[2] || s->heightx[2] != s->heighty[2]) {
  406. av_log(ctx, AV_LOG_ERROR, "First input link %s 3rd plane "
  407. "(size %dx%d) do not match the corresponding "
  408. "second input link %s 3rd plane (size %dx%d)\n",
  409. ctx->input_pads[0].name, s->widthx[2], s->heightx[2],
  410. ctx->input_pads[1].name,
  411. s->widthy[2], s->heighty[2]);
  412. return AVERROR(EINVAL);
  413. }
  414. if (s->widthx[1] != s->width[1] || s->heightx[1] != s->height[1]) {
  415. av_log(ctx, AV_LOG_ERROR, "First input link %s 2nd plane "
  416. "(size %dx%d) do not match the corresponding "
  417. "output link %s 2nd plane (size %dx%d)\n",
  418. ctx->input_pads[0].name, s->widthx[1], s->heightx[1],
  419. ctx->output_pads[0].name, s->width[1], s->height[1]);
  420. return AVERROR(EINVAL);
  421. }
  422. if (s->widthx[2] != s->width[2] || s->heightx[2] != s->height[2]) {
  423. av_log(ctx, AV_LOG_ERROR, "First input link %s 3rd plane "
  424. "(size %dx%d) do not match the corresponding "
  425. "output link %s 3rd plane (size %dx%d)\n",
  426. ctx->input_pads[0].name, s->widthx[2], s->heightx[2],
  427. ctx->output_pads[0].name, s->width[2], s->height[2]);
  428. return AVERROR(EINVAL);
  429. }
  430. if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
  431. return ret;
  432. in = s->fs.in;
  433. in[0].time_base = srcx->time_base;
  434. in[1].time_base = srcy->time_base;
  435. in[0].sync = 2;
  436. in[0].before = EXT_STOP;
  437. in[0].after = EXT_INFINITY;
  438. in[1].sync = 1;
  439. in[1].before = EXT_STOP;
  440. in[1].after = EXT_INFINITY;
  441. s->fs.opaque = s;
  442. s->fs.on_event = process_frame;
  443. if ((ret = config_output(outlink)) < 0)
  444. return ret;
  445. ret = ff_framesync_configure(&s->fs);
  446. outlink->time_base = s->fs.time_base;
  447. return ret;
  448. }
  449. static int activate(AVFilterContext *ctx)
  450. {
  451. LUT2Context *s = ctx->priv;
  452. return ff_framesync_activate(&s->fs);
  453. }
  454. static const AVFilterPad inputs[] = {
  455. {
  456. .name = "srcx",
  457. .type = AVMEDIA_TYPE_VIDEO,
  458. .config_props = config_inputx,
  459. },
  460. {
  461. .name = "srcy",
  462. .type = AVMEDIA_TYPE_VIDEO,
  463. .config_props = config_inputy,
  464. },
  465. { NULL }
  466. };
  467. static const AVFilterPad outputs[] = {
  468. {
  469. .name = "default",
  470. .type = AVMEDIA_TYPE_VIDEO,
  471. .config_props = lut2_config_output,
  472. },
  473. { NULL }
  474. };
  475. #define lut2_options options
  476. FRAMESYNC_DEFINE_CLASS(lut2, LUT2Context, fs);
  477. AVFilter ff_vf_lut2 = {
  478. .name = "lut2",
  479. .description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two video inputs."),
  480. .preinit = lut2_framesync_preinit,
  481. .priv_size = sizeof(LUT2Context),
  482. .priv_class = &lut2_class,
  483. .uninit = uninit,
  484. .query_formats = query_formats,
  485. .activate = activate,
  486. .inputs = inputs,
  487. .outputs = outputs,
  488. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
  489. AVFILTER_FLAG_SLICE_THREADS,
  490. };
  491. #if CONFIG_TLUT2_FILTER
  492. static av_cold int init(AVFilterContext *ctx)
  493. {
  494. LUT2Context *s = ctx->priv;
  495. s->tlut2 = !strcmp(ctx->filter->name, "tlut2");
  496. return 0;
  497. }
  498. static int tlut2_filter_frame(AVFilterLink *inlink, AVFrame *frame)
  499. {
  500. AVFilterContext *ctx = inlink->dst;
  501. LUT2Context *s = ctx->priv;
  502. AVFilterLink *outlink = ctx->outputs[0];
  503. if (s->prev_frame) {
  504. AVFrame *out;
  505. if (ctx->is_disabled) {
  506. out = av_frame_clone(frame);
  507. } else {
  508. ThreadData td;
  509. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  510. if (!out) {
  511. av_frame_free(&s->prev_frame);
  512. s->prev_frame = frame;
  513. return AVERROR(ENOMEM);
  514. }
  515. av_frame_copy_props(out, frame);
  516. td.out = out;
  517. td.srcx = frame;
  518. td.srcy = s->prev_frame;
  519. ctx->internal->execute(ctx, s->lut2, &td, NULL, FFMIN(s->heightx[1], ff_filter_get_nb_threads(ctx)));
  520. }
  521. av_frame_free(&s->prev_frame);
  522. s->prev_frame = frame;
  523. return ff_filter_frame(outlink, out);
  524. }
  525. s->prev_frame = frame;
  526. return 0;
  527. }
  528. static const AVOption tlut2_options[] = {
  529. { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
  530. { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
  531. { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
  532. { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
  533. { NULL }
  534. };
  535. AVFILTER_DEFINE_CLASS(tlut2);
  536. static const AVFilterPad tlut2_inputs[] = {
  537. {
  538. .name = "default",
  539. .type = AVMEDIA_TYPE_VIDEO,
  540. .filter_frame = tlut2_filter_frame,
  541. .config_props = config_inputx,
  542. },
  543. { NULL }
  544. };
  545. static const AVFilterPad tlut2_outputs[] = {
  546. {
  547. .name = "default",
  548. .type = AVMEDIA_TYPE_VIDEO,
  549. .config_props = config_output,
  550. },
  551. { NULL }
  552. };
  553. AVFilter ff_vf_tlut2 = {
  554. .name = "tlut2",
  555. .description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two successive frames."),
  556. .priv_size = sizeof(LUT2Context),
  557. .priv_class = &tlut2_class,
  558. .query_formats = query_formats,
  559. .init = init,
  560. .uninit = uninit,
  561. .inputs = tlut2_inputs,
  562. .outputs = tlut2_outputs,
  563. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
  564. AVFILTER_FLAG_SLICE_THREADS,
  565. };
  566. #endif