vf_scale_qsv.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. /**
  19. * @file
  20. * scale video filter - QSV
  21. */
  22. #include <mfx/mfxvideo.h>
  23. #include <stdio.h>
  24. #include <string.h>
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/common.h"
  27. #include "libavutil/eval.h"
  28. #include "libavutil/hwcontext.h"
  29. #include "libavutil/hwcontext_qsv.h"
  30. #include "libavutil/internal.h"
  31. #include "libavutil/mathematics.h"
  32. #include "libavutil/opt.h"
  33. #include "libavutil/pixdesc.h"
  34. #include "libavutil/time.h"
  35. #include "libavfilter/qsvvpp.h"
  36. #include "avfilter.h"
  37. #include "formats.h"
  38. #include "internal.h"
  39. #include "video.h"
  40. static const char *const var_names[] = {
  41. "PI",
  42. "PHI",
  43. "E",
  44. "in_w", "iw",
  45. "in_h", "ih",
  46. "out_w", "ow",
  47. "out_h", "oh",
  48. "a", "dar",
  49. "sar",
  50. NULL
  51. };
  52. enum var_name {
  53. VAR_PI,
  54. VAR_PHI,
  55. VAR_E,
  56. VAR_IN_W, VAR_IW,
  57. VAR_IN_H, VAR_IH,
  58. VAR_OUT_W, VAR_OW,
  59. VAR_OUT_H, VAR_OH,
  60. VAR_A, VAR_DAR,
  61. VAR_SAR,
  62. VARS_NB
  63. };
  64. #define QSV_HAVE_SCALING_CONFIG QSV_VERSION_ATLEAST(1, 19)
  65. typedef struct QSVScaleContext {
  66. const AVClass *class;
  67. /* a clone of the main session, used internally for scaling */
  68. mfxSession session;
  69. mfxMemId *mem_ids_in;
  70. int nb_mem_ids_in;
  71. mfxMemId *mem_ids_out;
  72. int nb_mem_ids_out;
  73. mfxFrameSurface1 **surface_ptrs_in;
  74. int nb_surface_ptrs_in;
  75. mfxFrameSurface1 **surface_ptrs_out;
  76. int nb_surface_ptrs_out;
  77. mfxExtOpaqueSurfaceAlloc opaque_alloc;
  78. #if QSV_HAVE_SCALING_CONFIG
  79. mfxExtVPPScaling scale_conf;
  80. #endif
  81. int mode;
  82. mfxExtBuffer *ext_buffers[1 + QSV_HAVE_SCALING_CONFIG];
  83. int num_ext_buf;
  84. int shift_width, shift_height;
  85. /**
  86. * New dimensions. Special values are:
  87. * 0 = original width/height
  88. * -1 = keep original aspect
  89. */
  90. int w, h;
  91. /**
  92. * Output sw format. AV_PIX_FMT_NONE for no conversion.
  93. */
  94. enum AVPixelFormat format;
  95. char *w_expr; ///< width expression string
  96. char *h_expr; ///< height expression string
  97. char *format_str;
  98. } QSVScaleContext;
  99. static av_cold int qsvscale_init(AVFilterContext *ctx)
  100. {
  101. QSVScaleContext *s = ctx->priv;
  102. if (!strcmp(s->format_str, "same")) {
  103. s->format = AV_PIX_FMT_NONE;
  104. } else {
  105. s->format = av_get_pix_fmt(s->format_str);
  106. if (s->format == AV_PIX_FMT_NONE) {
  107. av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", s->format_str);
  108. return AVERROR(EINVAL);
  109. }
  110. }
  111. return 0;
  112. }
  113. static av_cold void qsvscale_uninit(AVFilterContext *ctx)
  114. {
  115. QSVScaleContext *s = ctx->priv;
  116. if (s->session) {
  117. MFXClose(s->session);
  118. s->session = NULL;
  119. }
  120. av_freep(&s->mem_ids_in);
  121. av_freep(&s->mem_ids_out);
  122. s->nb_mem_ids_in = 0;
  123. s->nb_mem_ids_out = 0;
  124. av_freep(&s->surface_ptrs_in);
  125. av_freep(&s->surface_ptrs_out);
  126. s->nb_surface_ptrs_in = 0;
  127. s->nb_surface_ptrs_out = 0;
  128. }
  129. static int qsvscale_query_formats(AVFilterContext *ctx)
  130. {
  131. static const enum AVPixelFormat pixel_formats[] = {
  132. AV_PIX_FMT_QSV, AV_PIX_FMT_NONE,
  133. };
  134. AVFilterFormats *pix_fmts = ff_make_format_list(pixel_formats);
  135. int ret;
  136. if ((ret = ff_set_common_formats(ctx, pix_fmts)) < 0)
  137. return ret;
  138. return 0;
  139. }
  140. static int init_out_pool(AVFilterContext *ctx,
  141. int out_width, int out_height)
  142. {
  143. QSVScaleContext *s = ctx->priv;
  144. AVFilterLink *outlink = ctx->outputs[0];
  145. AVHWFramesContext *in_frames_ctx;
  146. AVHWFramesContext *out_frames_ctx;
  147. AVQSVFramesContext *in_frames_hwctx;
  148. AVQSVFramesContext *out_frames_hwctx;
  149. enum AVPixelFormat in_format;
  150. enum AVPixelFormat out_format;
  151. int i, ret;
  152. /* check that we have a hw context */
  153. if (!ctx->inputs[0]->hw_frames_ctx) {
  154. av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n");
  155. return AVERROR(EINVAL);
  156. }
  157. in_frames_ctx = (AVHWFramesContext*)ctx->inputs[0]->hw_frames_ctx->data;
  158. in_frames_hwctx = in_frames_ctx->hwctx;
  159. in_format = in_frames_ctx->sw_format;
  160. out_format = (s->format == AV_PIX_FMT_NONE) ? in_format : s->format;
  161. outlink->hw_frames_ctx = av_hwframe_ctx_alloc(in_frames_ctx->device_ref);
  162. if (!outlink->hw_frames_ctx)
  163. return AVERROR(ENOMEM);
  164. out_frames_ctx = (AVHWFramesContext*)outlink->hw_frames_ctx->data;
  165. out_frames_hwctx = out_frames_ctx->hwctx;
  166. out_frames_ctx->format = AV_PIX_FMT_QSV;
  167. out_frames_ctx->width = FFALIGN(out_width, 32);
  168. out_frames_ctx->height = FFALIGN(out_height, 32);
  169. out_frames_ctx->sw_format = out_format;
  170. out_frames_ctx->initial_pool_size = 4;
  171. out_frames_hwctx->frame_type = in_frames_hwctx->frame_type;
  172. ret = ff_filter_init_hw_frames(ctx, outlink, 32);
  173. if (ret < 0)
  174. return ret;
  175. ret = av_hwframe_ctx_init(outlink->hw_frames_ctx);
  176. if (ret < 0)
  177. return ret;
  178. for (i = 0; i < out_frames_hwctx->nb_surfaces; i++) {
  179. mfxFrameInfo *info = &out_frames_hwctx->surfaces[i].Info;
  180. info->CropW = out_width;
  181. info->CropH = out_height;
  182. }
  183. return 0;
  184. }
  185. static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
  186. mfxFrameAllocResponse *resp)
  187. {
  188. AVFilterContext *ctx = pthis;
  189. QSVScaleContext *s = ctx->priv;
  190. if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
  191. !(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
  192. !(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
  193. return MFX_ERR_UNSUPPORTED;
  194. if (req->Type & MFX_MEMTYPE_FROM_VPPIN) {
  195. resp->mids = s->mem_ids_in;
  196. resp->NumFrameActual = s->nb_mem_ids_in;
  197. } else {
  198. resp->mids = s->mem_ids_out;
  199. resp->NumFrameActual = s->nb_mem_ids_out;
  200. }
  201. return MFX_ERR_NONE;
  202. }
  203. static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
  204. {
  205. return MFX_ERR_NONE;
  206. }
  207. static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
  208. {
  209. return MFX_ERR_UNSUPPORTED;
  210. }
  211. static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
  212. {
  213. return MFX_ERR_UNSUPPORTED;
  214. }
  215. static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
  216. {
  217. *hdl = mid;
  218. return MFX_ERR_NONE;
  219. }
  220. static const mfxHandleType handle_types[] = {
  221. MFX_HANDLE_VA_DISPLAY,
  222. MFX_HANDLE_D3D9_DEVICE_MANAGER,
  223. MFX_HANDLE_D3D11_DEVICE,
  224. };
  225. static int init_out_session(AVFilterContext *ctx)
  226. {
  227. QSVScaleContext *s = ctx->priv;
  228. AVHWFramesContext *in_frames_ctx = (AVHWFramesContext*)ctx->inputs[0]->hw_frames_ctx->data;
  229. AVHWFramesContext *out_frames_ctx = (AVHWFramesContext*)ctx->outputs[0]->hw_frames_ctx->data;
  230. AVQSVFramesContext *in_frames_hwctx = in_frames_ctx->hwctx;
  231. AVQSVFramesContext *out_frames_hwctx = out_frames_ctx->hwctx;
  232. AVQSVDeviceContext *device_hwctx = in_frames_ctx->device_ctx->hwctx;
  233. int opaque = !!(in_frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME);
  234. mfxHDL handle = NULL;
  235. mfxHandleType handle_type;
  236. mfxVersion ver;
  237. mfxIMPL impl;
  238. mfxVideoParam par;
  239. mfxStatus err;
  240. int i;
  241. s->num_ext_buf = 0;
  242. /* extract the properties of the "master" session given to us */
  243. err = MFXQueryIMPL(device_hwctx->session, &impl);
  244. if (err == MFX_ERR_NONE)
  245. err = MFXQueryVersion(device_hwctx->session, &ver);
  246. if (err != MFX_ERR_NONE) {
  247. av_log(ctx, AV_LOG_ERROR, "Error querying the session attributes\n");
  248. return AVERROR_UNKNOWN;
  249. }
  250. for (i = 0; i < FF_ARRAY_ELEMS(handle_types); i++) {
  251. err = MFXVideoCORE_GetHandle(device_hwctx->session, handle_types[i], &handle);
  252. if (err == MFX_ERR_NONE) {
  253. handle_type = handle_types[i];
  254. break;
  255. }
  256. }
  257. if (err != MFX_ERR_NONE) {
  258. av_log(ctx, AV_LOG_ERROR, "Error getting the session handle\n");
  259. return AVERROR_UNKNOWN;
  260. }
  261. /* create a "slave" session with those same properties, to be used for
  262. * actual scaling */
  263. err = MFXInit(impl, &ver, &s->session);
  264. if (err != MFX_ERR_NONE) {
  265. av_log(ctx, AV_LOG_ERROR, "Error initializing a session for scaling\n");
  266. return AVERROR_UNKNOWN;
  267. }
  268. if (handle) {
  269. err = MFXVideoCORE_SetHandle(s->session, handle_type, handle);
  270. if (err != MFX_ERR_NONE)
  271. return AVERROR_UNKNOWN;
  272. }
  273. if (QSV_RUNTIME_VERSION_ATLEAST(ver, 1, 25)) {
  274. err = MFXJoinSession(device_hwctx->session, s->session);
  275. if (err != MFX_ERR_NONE)
  276. return AVERROR_UNKNOWN;
  277. }
  278. memset(&par, 0, sizeof(par));
  279. if (opaque) {
  280. s->surface_ptrs_in = av_mallocz_array(in_frames_hwctx->nb_surfaces,
  281. sizeof(*s->surface_ptrs_in));
  282. if (!s->surface_ptrs_in)
  283. return AVERROR(ENOMEM);
  284. for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
  285. s->surface_ptrs_in[i] = in_frames_hwctx->surfaces + i;
  286. s->nb_surface_ptrs_in = in_frames_hwctx->nb_surfaces;
  287. s->surface_ptrs_out = av_mallocz_array(out_frames_hwctx->nb_surfaces,
  288. sizeof(*s->surface_ptrs_out));
  289. if (!s->surface_ptrs_out)
  290. return AVERROR(ENOMEM);
  291. for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
  292. s->surface_ptrs_out[i] = out_frames_hwctx->surfaces + i;
  293. s->nb_surface_ptrs_out = out_frames_hwctx->nb_surfaces;
  294. s->opaque_alloc.In.Surfaces = s->surface_ptrs_in;
  295. s->opaque_alloc.In.NumSurface = s->nb_surface_ptrs_in;
  296. s->opaque_alloc.In.Type = in_frames_hwctx->frame_type;
  297. s->opaque_alloc.Out.Surfaces = s->surface_ptrs_out;
  298. s->opaque_alloc.Out.NumSurface = s->nb_surface_ptrs_out;
  299. s->opaque_alloc.Out.Type = out_frames_hwctx->frame_type;
  300. s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
  301. s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc);
  302. s->ext_buffers[s->num_ext_buf++] = (mfxExtBuffer*)&s->opaque_alloc;
  303. par.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
  304. } else {
  305. mfxFrameAllocator frame_allocator = {
  306. .pthis = ctx,
  307. .Alloc = frame_alloc,
  308. .Lock = frame_lock,
  309. .Unlock = frame_unlock,
  310. .GetHDL = frame_get_hdl,
  311. .Free = frame_free,
  312. };
  313. s->mem_ids_in = av_mallocz_array(in_frames_hwctx->nb_surfaces,
  314. sizeof(*s->mem_ids_in));
  315. if (!s->mem_ids_in)
  316. return AVERROR(ENOMEM);
  317. for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
  318. s->mem_ids_in[i] = in_frames_hwctx->surfaces[i].Data.MemId;
  319. s->nb_mem_ids_in = in_frames_hwctx->nb_surfaces;
  320. s->mem_ids_out = av_mallocz_array(out_frames_hwctx->nb_surfaces,
  321. sizeof(*s->mem_ids_out));
  322. if (!s->mem_ids_out)
  323. return AVERROR(ENOMEM);
  324. for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
  325. s->mem_ids_out[i] = out_frames_hwctx->surfaces[i].Data.MemId;
  326. s->nb_mem_ids_out = out_frames_hwctx->nb_surfaces;
  327. err = MFXVideoCORE_SetFrameAllocator(s->session, &frame_allocator);
  328. if (err != MFX_ERR_NONE)
  329. return AVERROR_UNKNOWN;
  330. par.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY | MFX_IOPATTERN_OUT_VIDEO_MEMORY;
  331. }
  332. #if QSV_HAVE_SCALING_CONFIG
  333. memset(&s->scale_conf, 0, sizeof(mfxExtVPPScaling));
  334. s->scale_conf.Header.BufferId = MFX_EXTBUFF_VPP_SCALING;
  335. s->scale_conf.Header.BufferSz = sizeof(mfxExtVPPScaling);
  336. s->scale_conf.ScalingMode = s->mode;
  337. s->ext_buffers[s->num_ext_buf++] = (mfxExtBuffer*)&s->scale_conf;
  338. av_log(ctx, AV_LOG_VERBOSE, "Scaling mode: %"PRIu16"\n", s->mode);
  339. #endif
  340. par.ExtParam = s->ext_buffers;
  341. par.NumExtParam = s->num_ext_buf;
  342. par.AsyncDepth = 1; // TODO async
  343. par.vpp.In = in_frames_hwctx->surfaces[0].Info;
  344. par.vpp.Out = out_frames_hwctx->surfaces[0].Info;
  345. /* Apparently VPP requires the frame rate to be set to some value, otherwise
  346. * init will fail (probably for the framerate conversion filter). Since we
  347. * are only doing scaling here, we just invent an arbitrary
  348. * value */
  349. par.vpp.In.FrameRateExtN = 25;
  350. par.vpp.In.FrameRateExtD = 1;
  351. par.vpp.Out.FrameRateExtN = 25;
  352. par.vpp.Out.FrameRateExtD = 1;
  353. err = MFXVideoVPP_Init(s->session, &par);
  354. if (err != MFX_ERR_NONE) {
  355. av_log(ctx, AV_LOG_ERROR, "Error opening the VPP for scaling\n");
  356. return AVERROR_UNKNOWN;
  357. }
  358. return 0;
  359. }
  360. static int init_scale_session(AVFilterContext *ctx, int in_width, int in_height,
  361. int out_width, int out_height)
  362. {
  363. int ret;
  364. qsvscale_uninit(ctx);
  365. ret = init_out_pool(ctx, out_width, out_height);
  366. if (ret < 0)
  367. return ret;
  368. ret = init_out_session(ctx);
  369. if (ret < 0)
  370. return ret;
  371. return 0;
  372. }
  373. static int qsvscale_config_props(AVFilterLink *outlink)
  374. {
  375. AVFilterContext *ctx = outlink->src;
  376. AVFilterLink *inlink = outlink->src->inputs[0];
  377. QSVScaleContext *s = ctx->priv;
  378. int64_t w, h;
  379. double var_values[VARS_NB], res;
  380. char *expr;
  381. int ret;
  382. var_values[VAR_PI] = M_PI;
  383. var_values[VAR_PHI] = M_PHI;
  384. var_values[VAR_E] = M_E;
  385. var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
  386. var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
  387. var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
  388. var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
  389. var_values[VAR_A] = (double) inlink->w / inlink->h;
  390. var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
  391. (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
  392. var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
  393. /* evaluate width and height */
  394. av_expr_parse_and_eval(&res, (expr = s->w_expr),
  395. var_names, var_values,
  396. NULL, NULL, NULL, NULL, NULL, 0, ctx);
  397. s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
  398. if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
  399. var_names, var_values,
  400. NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  401. goto fail;
  402. s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
  403. /* evaluate again the width, as it may depend on the output height */
  404. if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
  405. var_names, var_values,
  406. NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  407. goto fail;
  408. s->w = res;
  409. w = s->w;
  410. h = s->h;
  411. /* sanity check params */
  412. if (w < -1 || h < -1) {
  413. av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
  414. return AVERROR(EINVAL);
  415. }
  416. if (w == -1 && h == -1)
  417. s->w = s->h = 0;
  418. if (!(w = s->w))
  419. w = inlink->w;
  420. if (!(h = s->h))
  421. h = inlink->h;
  422. if (w == -1)
  423. w = av_rescale(h, inlink->w, inlink->h);
  424. if (h == -1)
  425. h = av_rescale(w, inlink->h, inlink->w);
  426. if (w > INT_MAX || h > INT_MAX ||
  427. (h * inlink->w) > INT_MAX ||
  428. (w * inlink->h) > INT_MAX)
  429. av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
  430. outlink->w = w;
  431. outlink->h = h;
  432. ret = init_scale_session(ctx, inlink->w, inlink->h, w, h);
  433. if (ret < 0)
  434. return ret;
  435. av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n",
  436. inlink->w, inlink->h, outlink->w, outlink->h);
  437. if (inlink->sample_aspect_ratio.num)
  438. outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w,
  439. outlink->w*inlink->h},
  440. inlink->sample_aspect_ratio);
  441. else
  442. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  443. return 0;
  444. fail:
  445. av_log(NULL, AV_LOG_ERROR,
  446. "Error when evaluating the expression '%s'\n", expr);
  447. return ret;
  448. }
  449. static int qsvscale_filter_frame(AVFilterLink *link, AVFrame *in)
  450. {
  451. AVFilterContext *ctx = link->dst;
  452. QSVScaleContext *s = ctx->priv;
  453. AVFilterLink *outlink = ctx->outputs[0];
  454. mfxSyncPoint sync = NULL;
  455. mfxStatus err;
  456. AVFrame *out = NULL;
  457. int ret = 0;
  458. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  459. if (!out) {
  460. ret = AVERROR(ENOMEM);
  461. goto fail;
  462. }
  463. do {
  464. err = MFXVideoVPP_RunFrameVPPAsync(s->session,
  465. (mfxFrameSurface1*)in->data[3],
  466. (mfxFrameSurface1*)out->data[3],
  467. NULL, &sync);
  468. if (err == MFX_WRN_DEVICE_BUSY)
  469. av_usleep(1);
  470. } while (err == MFX_WRN_DEVICE_BUSY);
  471. if (err < 0 || !sync) {
  472. av_log(ctx, AV_LOG_ERROR, "Error during scaling\n");
  473. ret = AVERROR_UNKNOWN;
  474. goto fail;
  475. }
  476. do {
  477. err = MFXVideoCORE_SyncOperation(s->session, sync, 1000);
  478. } while (err == MFX_WRN_IN_EXECUTION);
  479. if (err < 0) {
  480. av_log(ctx, AV_LOG_ERROR, "Error synchronizing the operation: %d\n", err);
  481. ret = AVERROR_UNKNOWN;
  482. goto fail;
  483. }
  484. ret = av_frame_copy_props(out, in);
  485. if (ret < 0)
  486. goto fail;
  487. out->width = outlink->w;
  488. out->height = outlink->h;
  489. av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
  490. (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
  491. (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
  492. INT_MAX);
  493. av_frame_free(&in);
  494. return ff_filter_frame(outlink, out);
  495. fail:
  496. av_frame_free(&in);
  497. av_frame_free(&out);
  498. return ret;
  499. }
  500. #define OFFSET(x) offsetof(QSVScaleContext, x)
  501. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  502. static const AVOption options[] = {
  503. { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
  504. { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
  505. { "format", "Output pixel format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
  506. #if QSV_HAVE_SCALING_CONFIG
  507. { "mode", "set scaling mode", OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = MFX_SCALING_MODE_DEFAULT}, MFX_SCALING_MODE_DEFAULT, MFX_SCALING_MODE_QUALITY, FLAGS, "mode"},
  508. { "low_power", "low power mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_LOWPOWER}, INT_MIN, INT_MAX, FLAGS, "mode"},
  509. { "hq", "high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_QUALITY}, INT_MIN, INT_MAX, FLAGS, "mode"},
  510. #else
  511. { "mode", "(not supported)", OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = 0}, 0, INT_MAX, FLAGS, "mode"},
  512. { "low_power", "", 0, AV_OPT_TYPE_CONST, { .i64 = 1}, 0, 0, FLAGS, "mode"},
  513. { "hq", "", 0, AV_OPT_TYPE_CONST, { .i64 = 2}, 0, 0, FLAGS, "mode"},
  514. #endif
  515. { NULL },
  516. };
  517. static const AVClass qsvscale_class = {
  518. .class_name = "qsvscale",
  519. .item_name = av_default_item_name,
  520. .option = options,
  521. .version = LIBAVUTIL_VERSION_INT,
  522. };
  523. static const AVFilterPad qsvscale_inputs[] = {
  524. {
  525. .name = "default",
  526. .type = AVMEDIA_TYPE_VIDEO,
  527. .filter_frame = qsvscale_filter_frame,
  528. },
  529. { NULL }
  530. };
  531. static const AVFilterPad qsvscale_outputs[] = {
  532. {
  533. .name = "default",
  534. .type = AVMEDIA_TYPE_VIDEO,
  535. .config_props = qsvscale_config_props,
  536. },
  537. { NULL }
  538. };
  539. AVFilter ff_vf_scale_qsv = {
  540. .name = "scale_qsv",
  541. .description = NULL_IF_CONFIG_SMALL("QuickSync video scaling and format conversion"),
  542. .init = qsvscale_init,
  543. .uninit = qsvscale_uninit,
  544. .query_formats = qsvscale_query_formats,
  545. .priv_size = sizeof(QSVScaleContext),
  546. .priv_class = &qsvscale_class,
  547. .inputs = qsvscale_inputs,
  548. .outputs = qsvscale_outputs,
  549. .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
  550. };