2
0

scale_argb.cc 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. /*
  2. * Copyright 2011 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/scale.h"
  11. #include <assert.h>
  12. #include <string.h>
  13. #include "libyuv/cpu_id.h"
  14. #include "libyuv/planar_functions.h" // For CopyARGB
  15. #include "libyuv/row.h"
  16. #include "libyuv/scale_row.h"
  17. #ifdef __cplusplus
  18. namespace libyuv {
  19. extern "C" {
  20. #endif
  21. static __inline int Abs(int v) {
  22. return v >= 0 ? v : -v;
  23. }
  24. // ScaleARGB ARGB, 1/2
  25. // This is an optimized version for scaling down a ARGB to 1/2 of
  26. // its original size.
  27. static void ScaleARGBDown2(int src_width,
  28. int src_height,
  29. int dst_width,
  30. int dst_height,
  31. int src_stride,
  32. int dst_stride,
  33. const uint8_t* src_argb,
  34. uint8_t* dst_argb,
  35. int x,
  36. int dx,
  37. int y,
  38. int dy,
  39. enum FilterMode filtering) {
  40. int j;
  41. int row_stride = src_stride * (dy >> 16);
  42. void (*ScaleARGBRowDown2)(const uint8_t* src_argb, ptrdiff_t src_stride,
  43. uint8_t* dst_argb, int dst_width) =
  44. filtering == kFilterNone
  45. ? ScaleARGBRowDown2_C
  46. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_C
  47. : ScaleARGBRowDown2Box_C);
  48. (void)src_width;
  49. (void)src_height;
  50. (void)dx;
  51. assert(dx == 65536 * 2); // Test scale factor of 2.
  52. assert((dy & 0x1ffff) == 0); // Test vertical scale is multiple of 2.
  53. // Advance to odd row, even column.
  54. if (filtering == kFilterBilinear) {
  55. src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
  56. } else {
  57. src_argb += (y >> 16) * src_stride + ((x >> 16) - 1) * 4;
  58. }
  59. #if defined(HAS_SCALEARGBROWDOWN2_SSE2)
  60. if (TestCpuFlag(kCpuHasSSE2)) {
  61. ScaleARGBRowDown2 =
  62. filtering == kFilterNone
  63. ? ScaleARGBRowDown2_Any_SSE2
  64. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_SSE2
  65. : ScaleARGBRowDown2Box_Any_SSE2);
  66. if (IS_ALIGNED(dst_width, 4)) {
  67. ScaleARGBRowDown2 =
  68. filtering == kFilterNone
  69. ? ScaleARGBRowDown2_SSE2
  70. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_SSE2
  71. : ScaleARGBRowDown2Box_SSE2);
  72. }
  73. }
  74. #endif
  75. #if defined(HAS_SCALEARGBROWDOWN2_NEON)
  76. if (TestCpuFlag(kCpuHasNEON)) {
  77. ScaleARGBRowDown2 =
  78. filtering == kFilterNone
  79. ? ScaleARGBRowDown2_Any_NEON
  80. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_NEON
  81. : ScaleARGBRowDown2Box_Any_NEON);
  82. if (IS_ALIGNED(dst_width, 8)) {
  83. ScaleARGBRowDown2 =
  84. filtering == kFilterNone
  85. ? ScaleARGBRowDown2_NEON
  86. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_NEON
  87. : ScaleARGBRowDown2Box_NEON);
  88. }
  89. }
  90. #endif
  91. #if defined(HAS_SCALEARGBROWDOWN2_MSA)
  92. if (TestCpuFlag(kCpuHasMSA)) {
  93. ScaleARGBRowDown2 =
  94. filtering == kFilterNone
  95. ? ScaleARGBRowDown2_Any_MSA
  96. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_MSA
  97. : ScaleARGBRowDown2Box_Any_MSA);
  98. if (IS_ALIGNED(dst_width, 4)) {
  99. ScaleARGBRowDown2 =
  100. filtering == kFilterNone
  101. ? ScaleARGBRowDown2_MSA
  102. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_MSA
  103. : ScaleARGBRowDown2Box_MSA);
  104. }
  105. }
  106. #endif
  107. #if defined(HAS_SCALEARGBROWDOWN2_MMI)
  108. if (TestCpuFlag(kCpuHasMMI)) {
  109. ScaleARGBRowDown2 =
  110. filtering == kFilterNone
  111. ? ScaleARGBRowDown2_Any_MMI
  112. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_MMI
  113. : ScaleARGBRowDown2Box_Any_MMI);
  114. if (IS_ALIGNED(dst_width, 2)) {
  115. ScaleARGBRowDown2 =
  116. filtering == kFilterNone
  117. ? ScaleARGBRowDown2_MMI
  118. : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_MMI
  119. : ScaleARGBRowDown2Box_MMI);
  120. }
  121. }
  122. #endif
  123. if (filtering == kFilterLinear) {
  124. src_stride = 0;
  125. }
  126. for (j = 0; j < dst_height; ++j) {
  127. ScaleARGBRowDown2(src_argb, src_stride, dst_argb, dst_width);
  128. src_argb += row_stride;
  129. dst_argb += dst_stride;
  130. }
  131. }
  132. // ScaleARGB ARGB, 1/4
  133. // This is an optimized version for scaling down a ARGB to 1/4 of
  134. // its original size.
  135. static void ScaleARGBDown4Box(int src_width,
  136. int src_height,
  137. int dst_width,
  138. int dst_height,
  139. int src_stride,
  140. int dst_stride,
  141. const uint8_t* src_argb,
  142. uint8_t* dst_argb,
  143. int x,
  144. int dx,
  145. int y,
  146. int dy) {
  147. int j;
  148. // Allocate 2 rows of ARGB.
  149. const int kRowSize = (dst_width * 2 * 4 + 31) & ~31;
  150. align_buffer_64(row, kRowSize * 2);
  151. int row_stride = src_stride * (dy >> 16);
  152. void (*ScaleARGBRowDown2)(const uint8_t* src_argb, ptrdiff_t src_stride,
  153. uint8_t* dst_argb, int dst_width) =
  154. ScaleARGBRowDown2Box_C;
  155. // Advance to odd row, even column.
  156. src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
  157. (void)src_width;
  158. (void)src_height;
  159. (void)dx;
  160. assert(dx == 65536 * 4); // Test scale factor of 4.
  161. assert((dy & 0x3ffff) == 0); // Test vertical scale is multiple of 4.
  162. #if defined(HAS_SCALEARGBROWDOWN2_SSE2)
  163. if (TestCpuFlag(kCpuHasSSE2)) {
  164. ScaleARGBRowDown2 = ScaleARGBRowDown2Box_Any_SSE2;
  165. if (IS_ALIGNED(dst_width, 4)) {
  166. ScaleARGBRowDown2 = ScaleARGBRowDown2Box_SSE2;
  167. }
  168. }
  169. #endif
  170. #if defined(HAS_SCALEARGBROWDOWN2_NEON)
  171. if (TestCpuFlag(kCpuHasNEON)) {
  172. ScaleARGBRowDown2 = ScaleARGBRowDown2Box_Any_NEON;
  173. if (IS_ALIGNED(dst_width, 8)) {
  174. ScaleARGBRowDown2 = ScaleARGBRowDown2Box_NEON;
  175. }
  176. }
  177. #endif
  178. for (j = 0; j < dst_height; ++j) {
  179. ScaleARGBRowDown2(src_argb, src_stride, row, dst_width * 2);
  180. ScaleARGBRowDown2(src_argb + src_stride * 2, src_stride, row + kRowSize,
  181. dst_width * 2);
  182. ScaleARGBRowDown2(row, kRowSize, dst_argb, dst_width);
  183. src_argb += row_stride;
  184. dst_argb += dst_stride;
  185. }
  186. free_aligned_buffer_64(row);
  187. }
  188. // ScaleARGB ARGB Even
  189. // This is an optimized version for scaling down a ARGB to even
  190. // multiple of its original size.
  191. static void ScaleARGBDownEven(int src_width,
  192. int src_height,
  193. int dst_width,
  194. int dst_height,
  195. int src_stride,
  196. int dst_stride,
  197. const uint8_t* src_argb,
  198. uint8_t* dst_argb,
  199. int x,
  200. int dx,
  201. int y,
  202. int dy,
  203. enum FilterMode filtering) {
  204. int j;
  205. int col_step = dx >> 16;
  206. int row_stride = (dy >> 16) * src_stride;
  207. void (*ScaleARGBRowDownEven)(const uint8_t* src_argb, ptrdiff_t src_stride,
  208. int src_step, uint8_t* dst_argb, int dst_width) =
  209. filtering ? ScaleARGBRowDownEvenBox_C : ScaleARGBRowDownEven_C;
  210. (void)src_width;
  211. (void)src_height;
  212. assert(IS_ALIGNED(src_width, 2));
  213. assert(IS_ALIGNED(src_height, 2));
  214. src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
  215. #if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
  216. if (TestCpuFlag(kCpuHasSSE2)) {
  217. ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_SSE2
  218. : ScaleARGBRowDownEven_Any_SSE2;
  219. if (IS_ALIGNED(dst_width, 4)) {
  220. ScaleARGBRowDownEven =
  221. filtering ? ScaleARGBRowDownEvenBox_SSE2 : ScaleARGBRowDownEven_SSE2;
  222. }
  223. }
  224. #endif
  225. #if defined(HAS_SCALEARGBROWDOWNEVEN_NEON)
  226. if (TestCpuFlag(kCpuHasNEON)) {
  227. ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_NEON
  228. : ScaleARGBRowDownEven_Any_NEON;
  229. if (IS_ALIGNED(dst_width, 4)) {
  230. ScaleARGBRowDownEven =
  231. filtering ? ScaleARGBRowDownEvenBox_NEON : ScaleARGBRowDownEven_NEON;
  232. }
  233. }
  234. #endif
  235. #if defined(HAS_SCALEARGBROWDOWNEVEN_MSA)
  236. if (TestCpuFlag(kCpuHasMSA)) {
  237. ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_MSA
  238. : ScaleARGBRowDownEven_Any_MSA;
  239. if (IS_ALIGNED(dst_width, 4)) {
  240. ScaleARGBRowDownEven =
  241. filtering ? ScaleARGBRowDownEvenBox_MSA : ScaleARGBRowDownEven_MSA;
  242. }
  243. }
  244. #endif
  245. #if defined(HAS_SCALEARGBROWDOWNEVEN_MMI)
  246. if (TestCpuFlag(kCpuHasMMI)) {
  247. ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_MMI
  248. : ScaleARGBRowDownEven_Any_MMI;
  249. if (IS_ALIGNED(dst_width, 2)) {
  250. ScaleARGBRowDownEven =
  251. filtering ? ScaleARGBRowDownEvenBox_MMI : ScaleARGBRowDownEven_MMI;
  252. }
  253. }
  254. #endif
  255. if (filtering == kFilterLinear) {
  256. src_stride = 0;
  257. }
  258. for (j = 0; j < dst_height; ++j) {
  259. ScaleARGBRowDownEven(src_argb, src_stride, col_step, dst_argb, dst_width);
  260. src_argb += row_stride;
  261. dst_argb += dst_stride;
  262. }
  263. }
  264. // Scale ARGB down with bilinear interpolation.
  265. static void ScaleARGBBilinearDown(int src_width,
  266. int src_height,
  267. int dst_width,
  268. int dst_height,
  269. int src_stride,
  270. int dst_stride,
  271. const uint8_t* src_argb,
  272. uint8_t* dst_argb,
  273. int x,
  274. int dx,
  275. int y,
  276. int dy,
  277. enum FilterMode filtering) {
  278. int j;
  279. void (*InterpolateRow)(uint8_t * dst_argb, const uint8_t* src_argb,
  280. ptrdiff_t src_stride, int dst_width,
  281. int source_y_fraction) = InterpolateRow_C;
  282. void (*ScaleARGBFilterCols)(uint8_t * dst_argb, const uint8_t* src_argb,
  283. int dst_width, int x, int dx) =
  284. (src_width >= 32768) ? ScaleARGBFilterCols64_C : ScaleARGBFilterCols_C;
  285. int64_t xlast = x + (int64_t)(dst_width - 1) * dx;
  286. int64_t xl = (dx >= 0) ? x : xlast;
  287. int64_t xr = (dx >= 0) ? xlast : x;
  288. int clip_src_width;
  289. xl = (xl >> 16) & ~3; // Left edge aligned.
  290. xr = (xr >> 16) + 1; // Right most pixel used. Bilinear uses 2 pixels.
  291. xr = (xr + 1 + 3) & ~3; // 1 beyond 4 pixel aligned right most pixel.
  292. if (xr > src_width) {
  293. xr = src_width;
  294. }
  295. clip_src_width = (int)(xr - xl) * 4; // Width aligned to 4.
  296. src_argb += xl * 4;
  297. x -= (int)(xl << 16);
  298. #if defined(HAS_INTERPOLATEROW_SSSE3)
  299. if (TestCpuFlag(kCpuHasSSSE3)) {
  300. InterpolateRow = InterpolateRow_Any_SSSE3;
  301. if (IS_ALIGNED(clip_src_width, 16)) {
  302. InterpolateRow = InterpolateRow_SSSE3;
  303. }
  304. }
  305. #endif
  306. #if defined(HAS_INTERPOLATEROW_AVX2)
  307. if (TestCpuFlag(kCpuHasAVX2)) {
  308. InterpolateRow = InterpolateRow_Any_AVX2;
  309. if (IS_ALIGNED(clip_src_width, 32)) {
  310. InterpolateRow = InterpolateRow_AVX2;
  311. }
  312. }
  313. #endif
  314. #if defined(HAS_INTERPOLATEROW_NEON)
  315. if (TestCpuFlag(kCpuHasNEON)) {
  316. InterpolateRow = InterpolateRow_Any_NEON;
  317. if (IS_ALIGNED(clip_src_width, 16)) {
  318. InterpolateRow = InterpolateRow_NEON;
  319. }
  320. }
  321. #endif
  322. #if defined(HAS_INTERPOLATEROW_MSA)
  323. if (TestCpuFlag(kCpuHasMSA)) {
  324. InterpolateRow = InterpolateRow_Any_MSA;
  325. if (IS_ALIGNED(clip_src_width, 32)) {
  326. InterpolateRow = InterpolateRow_MSA;
  327. }
  328. }
  329. #endif
  330. #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
  331. if (TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
  332. ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
  333. }
  334. #endif
  335. #if defined(HAS_SCALEARGBFILTERCOLS_NEON)
  336. if (TestCpuFlag(kCpuHasNEON)) {
  337. ScaleARGBFilterCols = ScaleARGBFilterCols_Any_NEON;
  338. if (IS_ALIGNED(dst_width, 4)) {
  339. ScaleARGBFilterCols = ScaleARGBFilterCols_NEON;
  340. }
  341. }
  342. #endif
  343. #if defined(HAS_SCALEARGBFILTERCOLS_MSA)
  344. if (TestCpuFlag(kCpuHasMSA)) {
  345. ScaleARGBFilterCols = ScaleARGBFilterCols_Any_MSA;
  346. if (IS_ALIGNED(dst_width, 8)) {
  347. ScaleARGBFilterCols = ScaleARGBFilterCols_MSA;
  348. }
  349. }
  350. #endif
  351. // TODO(fbarchard): Consider not allocating row buffer for kFilterLinear.
  352. // Allocate a row of ARGB.
  353. {
  354. align_buffer_64(row, clip_src_width * 4);
  355. const int max_y = (src_height - 1) << 16;
  356. if (y > max_y) {
  357. y = max_y;
  358. }
  359. for (j = 0; j < dst_height; ++j) {
  360. int yi = y >> 16;
  361. const uint8_t* src = src_argb + yi * src_stride;
  362. if (filtering == kFilterLinear) {
  363. ScaleARGBFilterCols(dst_argb, src, dst_width, x, dx);
  364. } else {
  365. int yf = (y >> 8) & 255;
  366. InterpolateRow(row, src, src_stride, clip_src_width, yf);
  367. ScaleARGBFilterCols(dst_argb, row, dst_width, x, dx);
  368. }
  369. dst_argb += dst_stride;
  370. y += dy;
  371. if (y > max_y) {
  372. y = max_y;
  373. }
  374. }
  375. free_aligned_buffer_64(row);
  376. }
  377. }
  378. // Scale ARGB up with bilinear interpolation.
  379. static void ScaleARGBBilinearUp(int src_width,
  380. int src_height,
  381. int dst_width,
  382. int dst_height,
  383. int src_stride,
  384. int dst_stride,
  385. const uint8_t* src_argb,
  386. uint8_t* dst_argb,
  387. int x,
  388. int dx,
  389. int y,
  390. int dy,
  391. enum FilterMode filtering) {
  392. int j;
  393. void (*InterpolateRow)(uint8_t * dst_argb, const uint8_t* src_argb,
  394. ptrdiff_t src_stride, int dst_width,
  395. int source_y_fraction) = InterpolateRow_C;
  396. void (*ScaleARGBFilterCols)(uint8_t * dst_argb, const uint8_t* src_argb,
  397. int dst_width, int x, int dx) =
  398. filtering ? ScaleARGBFilterCols_C : ScaleARGBCols_C;
  399. const int max_y = (src_height - 1) << 16;
  400. #if defined(HAS_INTERPOLATEROW_SSSE3)
  401. if (TestCpuFlag(kCpuHasSSSE3)) {
  402. InterpolateRow = InterpolateRow_Any_SSSE3;
  403. if (IS_ALIGNED(dst_width, 4)) {
  404. InterpolateRow = InterpolateRow_SSSE3;
  405. }
  406. }
  407. #endif
  408. #if defined(HAS_INTERPOLATEROW_AVX2)
  409. if (TestCpuFlag(kCpuHasAVX2)) {
  410. InterpolateRow = InterpolateRow_Any_AVX2;
  411. if (IS_ALIGNED(dst_width, 8)) {
  412. InterpolateRow = InterpolateRow_AVX2;
  413. }
  414. }
  415. #endif
  416. #if defined(HAS_INTERPOLATEROW_NEON)
  417. if (TestCpuFlag(kCpuHasNEON)) {
  418. InterpolateRow = InterpolateRow_Any_NEON;
  419. if (IS_ALIGNED(dst_width, 4)) {
  420. InterpolateRow = InterpolateRow_NEON;
  421. }
  422. }
  423. #endif
  424. #if defined(HAS_INTERPOLATEROW_MSA)
  425. if (TestCpuFlag(kCpuHasMSA)) {
  426. InterpolateRow = InterpolateRow_Any_MSA;
  427. if (IS_ALIGNED(dst_width, 8)) {
  428. InterpolateRow = InterpolateRow_MSA;
  429. }
  430. }
  431. #endif
  432. #if defined(HAS_INTERPOLATEROW_MMI)
  433. if (TestCpuFlag(kCpuHasMMI)) {
  434. InterpolateRow = InterpolateRow_Any_MMI;
  435. if (IS_ALIGNED(dst_width, 2)) {
  436. InterpolateRow = InterpolateRow_MMI;
  437. }
  438. }
  439. #endif
  440. if (src_width >= 32768) {
  441. ScaleARGBFilterCols =
  442. filtering ? ScaleARGBFilterCols64_C : ScaleARGBCols64_C;
  443. }
  444. #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
  445. if (filtering && TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
  446. ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
  447. }
  448. #endif
  449. #if defined(HAS_SCALEARGBFILTERCOLS_NEON)
  450. if (filtering && TestCpuFlag(kCpuHasNEON)) {
  451. ScaleARGBFilterCols = ScaleARGBFilterCols_Any_NEON;
  452. if (IS_ALIGNED(dst_width, 4)) {
  453. ScaleARGBFilterCols = ScaleARGBFilterCols_NEON;
  454. }
  455. }
  456. #endif
  457. #if defined(HAS_SCALEARGBFILTERCOLS_MSA)
  458. if (filtering && TestCpuFlag(kCpuHasMSA)) {
  459. ScaleARGBFilterCols = ScaleARGBFilterCols_Any_MSA;
  460. if (IS_ALIGNED(dst_width, 8)) {
  461. ScaleARGBFilterCols = ScaleARGBFilterCols_MSA;
  462. }
  463. }
  464. #endif
  465. #if defined(HAS_SCALEARGBCOLS_SSE2)
  466. if (!filtering && TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
  467. ScaleARGBFilterCols = ScaleARGBCols_SSE2;
  468. }
  469. #endif
  470. #if defined(HAS_SCALEARGBCOLS_NEON)
  471. if (!filtering && TestCpuFlag(kCpuHasNEON)) {
  472. ScaleARGBFilterCols = ScaleARGBCols_Any_NEON;
  473. if (IS_ALIGNED(dst_width, 8)) {
  474. ScaleARGBFilterCols = ScaleARGBCols_NEON;
  475. }
  476. }
  477. #endif
  478. #if defined(HAS_SCALEARGBCOLS_MSA)
  479. if (!filtering && TestCpuFlag(kCpuHasMSA)) {
  480. ScaleARGBFilterCols = ScaleARGBCols_Any_MSA;
  481. if (IS_ALIGNED(dst_width, 4)) {
  482. ScaleARGBFilterCols = ScaleARGBCols_MSA;
  483. }
  484. }
  485. #endif
  486. #if defined(HAS_SCALEARGBCOLS_MMI)
  487. if (!filtering && TestCpuFlag(kCpuHasMMI)) {
  488. ScaleARGBFilterCols = ScaleARGBCols_Any_MMI;
  489. if (IS_ALIGNED(dst_width, 1)) {
  490. ScaleARGBFilterCols = ScaleARGBCols_MMI;
  491. }
  492. }
  493. #endif
  494. if (!filtering && src_width * 2 == dst_width && x < 0x8000) {
  495. ScaleARGBFilterCols = ScaleARGBColsUp2_C;
  496. #if defined(HAS_SCALEARGBCOLSUP2_SSE2)
  497. if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
  498. ScaleARGBFilterCols = ScaleARGBColsUp2_SSE2;
  499. }
  500. #endif
  501. #if defined(HAS_SCALEARGBCOLSUP2_MMI)
  502. if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 4)) {
  503. ScaleARGBFilterCols = ScaleARGBColsUp2_MMI;
  504. }
  505. #endif
  506. }
  507. if (y > max_y) {
  508. y = max_y;
  509. }
  510. {
  511. int yi = y >> 16;
  512. const uint8_t* src = src_argb + yi * src_stride;
  513. // Allocate 2 rows of ARGB.
  514. const int kRowSize = (dst_width * 4 + 31) & ~31;
  515. align_buffer_64(row, kRowSize * 2);
  516. uint8_t* rowptr = row;
  517. int rowstride = kRowSize;
  518. int lasty = yi;
  519. ScaleARGBFilterCols(rowptr, src, dst_width, x, dx);
  520. if (src_height > 1) {
  521. src += src_stride;
  522. }
  523. ScaleARGBFilterCols(rowptr + rowstride, src, dst_width, x, dx);
  524. src += src_stride;
  525. for (j = 0; j < dst_height; ++j) {
  526. yi = y >> 16;
  527. if (yi != lasty) {
  528. if (y > max_y) {
  529. y = max_y;
  530. yi = y >> 16;
  531. src = src_argb + yi * src_stride;
  532. }
  533. if (yi != lasty) {
  534. ScaleARGBFilterCols(rowptr, src, dst_width, x, dx);
  535. rowptr += rowstride;
  536. rowstride = -rowstride;
  537. lasty = yi;
  538. src += src_stride;
  539. }
  540. }
  541. if (filtering == kFilterLinear) {
  542. InterpolateRow(dst_argb, rowptr, 0, dst_width * 4, 0);
  543. } else {
  544. int yf = (y >> 8) & 255;
  545. InterpolateRow(dst_argb, rowptr, rowstride, dst_width * 4, yf);
  546. }
  547. dst_argb += dst_stride;
  548. y += dy;
  549. }
  550. free_aligned_buffer_64(row);
  551. }
  552. }
  553. #ifdef YUVSCALEUP
  554. // Scale YUV to ARGB up with bilinear interpolation.
  555. static void ScaleYUVToARGBBilinearUp(int src_width,
  556. int src_height,
  557. int dst_width,
  558. int dst_height,
  559. int src_stride_y,
  560. int src_stride_u,
  561. int src_stride_v,
  562. int dst_stride_argb,
  563. const uint8_t* src_y,
  564. const uint8_t* src_u,
  565. const uint8_t* src_v,
  566. uint8_t* dst_argb,
  567. int x,
  568. int dx,
  569. int y,
  570. int dy,
  571. enum FilterMode filtering) {
  572. int j;
  573. void (*I422ToARGBRow)(const uint8_t* y_buf, const uint8_t* u_buf,
  574. const uint8_t* v_buf, uint8_t* rgb_buf, int width) =
  575. I422ToARGBRow_C;
  576. #if defined(HAS_I422TOARGBROW_SSSE3)
  577. if (TestCpuFlag(kCpuHasSSSE3)) {
  578. I422ToARGBRow = I422ToARGBRow_Any_SSSE3;
  579. if (IS_ALIGNED(src_width, 8)) {
  580. I422ToARGBRow = I422ToARGBRow_SSSE3;
  581. }
  582. }
  583. #endif
  584. #if defined(HAS_I422TOARGBROW_AVX2)
  585. if (TestCpuFlag(kCpuHasAVX2)) {
  586. I422ToARGBRow = I422ToARGBRow_Any_AVX2;
  587. if (IS_ALIGNED(src_width, 16)) {
  588. I422ToARGBRow = I422ToARGBRow_AVX2;
  589. }
  590. }
  591. #endif
  592. #if defined(HAS_I422TOARGBROW_NEON)
  593. if (TestCpuFlag(kCpuHasNEON)) {
  594. I422ToARGBRow = I422ToARGBRow_Any_NEON;
  595. if (IS_ALIGNED(src_width, 8)) {
  596. I422ToARGBRow = I422ToARGBRow_NEON;
  597. }
  598. }
  599. #endif
  600. #if defined(HAS_I422TOARGBROW_MSA)
  601. if (TestCpuFlag(kCpuHasMSA)) {
  602. I422ToARGBRow = I422ToARGBRow_Any_MSA;
  603. if (IS_ALIGNED(src_width, 8)) {
  604. I422ToARGBRow = I422ToARGBRow_MSA;
  605. }
  606. }
  607. #endif
  608. void (*InterpolateRow)(uint8_t * dst_argb, const uint8_t* src_argb,
  609. ptrdiff_t src_stride, int dst_width,
  610. int source_y_fraction) = InterpolateRow_C;
  611. #if defined(HAS_INTERPOLATEROW_SSSE3)
  612. if (TestCpuFlag(kCpuHasSSSE3)) {
  613. InterpolateRow = InterpolateRow_Any_SSSE3;
  614. if (IS_ALIGNED(dst_width, 4)) {
  615. InterpolateRow = InterpolateRow_SSSE3;
  616. }
  617. }
  618. #endif
  619. #if defined(HAS_INTERPOLATEROW_AVX2)
  620. if (TestCpuFlag(kCpuHasAVX2)) {
  621. InterpolateRow = InterpolateRow_Any_AVX2;
  622. if (IS_ALIGNED(dst_width, 8)) {
  623. InterpolateRow = InterpolateRow_AVX2;
  624. }
  625. }
  626. #endif
  627. #if defined(HAS_INTERPOLATEROW_NEON)
  628. if (TestCpuFlag(kCpuHasNEON)) {
  629. InterpolateRow = InterpolateRow_Any_NEON;
  630. if (IS_ALIGNED(dst_width, 4)) {
  631. InterpolateRow = InterpolateRow_NEON;
  632. }
  633. }
  634. #endif
  635. #if defined(HAS_INTERPOLATEROW_MSA)
  636. if (TestCpuFlag(kCpuHasMSA)) {
  637. InterpolateRow = InterpolateRow_Any_MSA;
  638. if (IS_ALIGNED(dst_width, 8)) {
  639. InterpolateRow = InterpolateRow_MSA;
  640. }
  641. }
  642. #endif
  643. void (*ScaleARGBFilterCols)(uint8_t * dst_argb, const uint8_t* src_argb,
  644. int dst_width, int x, int dx) =
  645. filtering ? ScaleARGBFilterCols_C : ScaleARGBCols_C;
  646. if (src_width >= 32768) {
  647. ScaleARGBFilterCols =
  648. filtering ? ScaleARGBFilterCols64_C : ScaleARGBCols64_C;
  649. }
  650. #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
  651. if (filtering && TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
  652. ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
  653. }
  654. #endif
  655. #if defined(HAS_SCALEARGBFILTERCOLS_NEON)
  656. if (filtering && TestCpuFlag(kCpuHasNEON)) {
  657. ScaleARGBFilterCols = ScaleARGBFilterCols_Any_NEON;
  658. if (IS_ALIGNED(dst_width, 4)) {
  659. ScaleARGBFilterCols = ScaleARGBFilterCols_NEON;
  660. }
  661. }
  662. #endif
  663. #if defined(HAS_SCALEARGBFILTERCOLS_MSA)
  664. if (filtering && TestCpuFlag(kCpuHasMSA)) {
  665. ScaleARGBFilterCols = ScaleARGBFilterCols_Any_MSA;
  666. if (IS_ALIGNED(dst_width, 8)) {
  667. ScaleARGBFilterCols = ScaleARGBFilterCols_MSA;
  668. }
  669. }
  670. #endif
  671. #if defined(HAS_SCALEARGBCOLS_SSE2)
  672. if (!filtering && TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
  673. ScaleARGBFilterCols = ScaleARGBCols_SSE2;
  674. }
  675. #endif
  676. #if defined(HAS_SCALEARGBCOLS_NEON)
  677. if (!filtering && TestCpuFlag(kCpuHasNEON)) {
  678. ScaleARGBFilterCols = ScaleARGBCols_Any_NEON;
  679. if (IS_ALIGNED(dst_width, 8)) {
  680. ScaleARGBFilterCols = ScaleARGBCols_NEON;
  681. }
  682. }
  683. #endif
  684. #if defined(HAS_SCALEARGBCOLS_MSA)
  685. if (!filtering && TestCpuFlag(kCpuHasMSA)) {
  686. ScaleARGBFilterCols = ScaleARGBCols_Any_MSA;
  687. if (IS_ALIGNED(dst_width, 4)) {
  688. ScaleARGBFilterCols = ScaleARGBCols_MSA;
  689. }
  690. }
  691. #endif
  692. #if defined(HAS_SCALEARGBCOLS_MMI)
  693. if (!filtering && TestCpuFlag(kCpuHasMMI)) {
  694. ScaleARGBFilterCols = ScaleARGBCols_Any_MMI;
  695. if (IS_ALIGNED(dst_width, 1)) {
  696. ScaleARGBFilterCols = ScaleARGBCols_MMI;
  697. }
  698. }
  699. #endif
  700. if (!filtering && src_width * 2 == dst_width && x < 0x8000) {
  701. ScaleARGBFilterCols = ScaleARGBColsUp2_C;
  702. #if defined(HAS_SCALEARGBCOLSUP2_SSE2)
  703. if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
  704. ScaleARGBFilterCols = ScaleARGBColsUp2_SSE2;
  705. }
  706. #endif
  707. #if defined(HAS_SCALEARGBCOLSUP2_MMI)
  708. if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 4)) {
  709. ScaleARGBFilterCols = ScaleARGBColsUp2_MMI;
  710. }
  711. #endif
  712. }
  713. const int max_y = (src_height - 1) << 16;
  714. if (y > max_y) {
  715. y = max_y;
  716. }
  717. const int kYShift = 1; // Shift Y by 1 to convert Y plane to UV coordinate.
  718. int yi = y >> 16;
  719. int uv_yi = yi >> kYShift;
  720. const uint8_t* src_row_y = src_y + yi * src_stride_y;
  721. const uint8_t* src_row_u = src_u + uv_yi * src_stride_u;
  722. const uint8_t* src_row_v = src_v + uv_yi * src_stride_v;
  723. // Allocate 2 rows of ARGB.
  724. const int kRowSize = (dst_width * 4 + 31) & ~31;
  725. align_buffer_64(row, kRowSize * 2);
  726. // Allocate 1 row of ARGB for source conversion.
  727. align_buffer_64(argb_row, src_width * 4);
  728. uint8_t* rowptr = row;
  729. int rowstride = kRowSize;
  730. int lasty = yi;
  731. // TODO(fbarchard): Convert first 2 rows of YUV to ARGB.
  732. ScaleARGBFilterCols(rowptr, src_row_y, dst_width, x, dx);
  733. if (src_height > 1) {
  734. src_row_y += src_stride_y;
  735. if (yi & 1) {
  736. src_row_u += src_stride_u;
  737. src_row_v += src_stride_v;
  738. }
  739. }
  740. ScaleARGBFilterCols(rowptr + rowstride, src_row_y, dst_width, x, dx);
  741. if (src_height > 2) {
  742. src_row_y += src_stride_y;
  743. if (!(yi & 1)) {
  744. src_row_u += src_stride_u;
  745. src_row_v += src_stride_v;
  746. }
  747. }
  748. for (j = 0; j < dst_height; ++j) {
  749. yi = y >> 16;
  750. if (yi != lasty) {
  751. if (y > max_y) {
  752. y = max_y;
  753. yi = y >> 16;
  754. uv_yi = yi >> kYShift;
  755. src_row_y = src_y + yi * src_stride_y;
  756. src_row_u = src_u + uv_yi * src_stride_u;
  757. src_row_v = src_v + uv_yi * src_stride_v;
  758. }
  759. if (yi != lasty) {
  760. // TODO(fbarchard): Convert the clipped region of row.
  761. I422ToARGBRow(src_row_y, src_row_u, src_row_v, argb_row, src_width);
  762. ScaleARGBFilterCols(rowptr, argb_row, dst_width, x, dx);
  763. rowptr += rowstride;
  764. rowstride = -rowstride;
  765. lasty = yi;
  766. src_row_y += src_stride_y;
  767. if (yi & 1) {
  768. src_row_u += src_stride_u;
  769. src_row_v += src_stride_v;
  770. }
  771. }
  772. }
  773. if (filtering == kFilterLinear) {
  774. InterpolateRow(dst_argb, rowptr, 0, dst_width * 4, 0);
  775. } else {
  776. int yf = (y >> 8) & 255;
  777. InterpolateRow(dst_argb, rowptr, rowstride, dst_width * 4, yf);
  778. }
  779. dst_argb += dst_stride_argb;
  780. y += dy;
  781. }
  782. free_aligned_buffer_64(row);
  783. free_aligned_buffer_64(row_argb);
  784. }
  785. #endif
  786. // Scale ARGB to/from any dimensions, without interpolation.
  787. // Fixed point math is used for performance: The upper 16 bits
  788. // of x and dx is the integer part of the source position and
  789. // the lower 16 bits are the fixed decimal part.
  790. static void ScaleARGBSimple(int src_width,
  791. int src_height,
  792. int dst_width,
  793. int dst_height,
  794. int src_stride,
  795. int dst_stride,
  796. const uint8_t* src_argb,
  797. uint8_t* dst_argb,
  798. int x,
  799. int dx,
  800. int y,
  801. int dy) {
  802. int j;
  803. void (*ScaleARGBCols)(uint8_t * dst_argb, const uint8_t* src_argb,
  804. int dst_width, int x, int dx) =
  805. (src_width >= 32768) ? ScaleARGBCols64_C : ScaleARGBCols_C;
  806. (void)src_height;
  807. #if defined(HAS_SCALEARGBCOLS_SSE2)
  808. if (TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
  809. ScaleARGBCols = ScaleARGBCols_SSE2;
  810. }
  811. #endif
  812. #if defined(HAS_SCALEARGBCOLS_NEON)
  813. if (TestCpuFlag(kCpuHasNEON)) {
  814. ScaleARGBCols = ScaleARGBCols_Any_NEON;
  815. if (IS_ALIGNED(dst_width, 8)) {
  816. ScaleARGBCols = ScaleARGBCols_NEON;
  817. }
  818. }
  819. #endif
  820. #if defined(HAS_SCALEARGBCOLS_MSA)
  821. if (TestCpuFlag(kCpuHasMSA)) {
  822. ScaleARGBCols = ScaleARGBCols_Any_MSA;
  823. if (IS_ALIGNED(dst_width, 4)) {
  824. ScaleARGBCols = ScaleARGBCols_MSA;
  825. }
  826. }
  827. #endif
  828. #if defined(HAS_SCALEARGBCOLS_MMI)
  829. if (TestCpuFlag(kCpuHasMMI)) {
  830. ScaleARGBCols = ScaleARGBCols_Any_MMI;
  831. if (IS_ALIGNED(dst_width, 1)) {
  832. ScaleARGBCols = ScaleARGBCols_MMI;
  833. }
  834. }
  835. #endif
  836. if (src_width * 2 == dst_width && x < 0x8000) {
  837. ScaleARGBCols = ScaleARGBColsUp2_C;
  838. #if defined(HAS_SCALEARGBCOLSUP2_SSE2)
  839. if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
  840. ScaleARGBCols = ScaleARGBColsUp2_SSE2;
  841. }
  842. #endif
  843. #if defined(HAS_SCALEARGBCOLSUP2_MMI)
  844. if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 4)) {
  845. ScaleARGBCols = ScaleARGBColsUp2_MMI;
  846. }
  847. #endif
  848. }
  849. for (j = 0; j < dst_height; ++j) {
  850. ScaleARGBCols(dst_argb, src_argb + (y >> 16) * src_stride, dst_width, x,
  851. dx);
  852. dst_argb += dst_stride;
  853. y += dy;
  854. }
  855. }
  856. // ScaleARGB a ARGB.
  857. // This function in turn calls a scaling function
  858. // suitable for handling the desired resolutions.
  859. static void ScaleARGB(const uint8_t* src,
  860. int src_stride,
  861. int src_width,
  862. int src_height,
  863. uint8_t* dst,
  864. int dst_stride,
  865. int dst_width,
  866. int dst_height,
  867. int clip_x,
  868. int clip_y,
  869. int clip_width,
  870. int clip_height,
  871. enum FilterMode filtering) {
  872. // Initial source x/y coordinate and step values as 16.16 fixed point.
  873. int x = 0;
  874. int y = 0;
  875. int dx = 0;
  876. int dy = 0;
  877. // ARGB does not support box filter yet, but allow the user to pass it.
  878. // Simplify filtering when possible.
  879. filtering = ScaleFilterReduce(src_width, src_height, dst_width, dst_height,
  880. filtering);
  881. // Negative src_height means invert the image.
  882. if (src_height < 0) {
  883. src_height = -src_height;
  884. src = src + (src_height - 1) * src_stride;
  885. src_stride = -src_stride;
  886. }
  887. ScaleSlope(src_width, src_height, dst_width, dst_height, filtering, &x, &y,
  888. &dx, &dy);
  889. src_width = Abs(src_width);
  890. if (clip_x) {
  891. int64_t clipf = (int64_t)(clip_x)*dx;
  892. x += (clipf & 0xffff);
  893. src += (clipf >> 16) * 4;
  894. dst += clip_x * 4;
  895. }
  896. if (clip_y) {
  897. int64_t clipf = (int64_t)(clip_y)*dy;
  898. y += (clipf & 0xffff);
  899. src += (clipf >> 16) * src_stride;
  900. dst += clip_y * dst_stride;
  901. }
  902. // Special case for integer step values.
  903. if (((dx | dy) & 0xffff) == 0) {
  904. if (!dx || !dy) { // 1 pixel wide and/or tall.
  905. filtering = kFilterNone;
  906. } else {
  907. // Optimized even scale down. ie 2, 4, 6, 8, 10x.
  908. if (!(dx & 0x10000) && !(dy & 0x10000)) {
  909. if (dx == 0x20000) {
  910. // Optimized 1/2 downsample.
  911. ScaleARGBDown2(src_width, src_height, clip_width, clip_height,
  912. src_stride, dst_stride, src, dst, x, dx, y, dy,
  913. filtering);
  914. return;
  915. }
  916. if (dx == 0x40000 && filtering == kFilterBox) {
  917. // Optimized 1/4 box downsample.
  918. ScaleARGBDown4Box(src_width, src_height, clip_width, clip_height,
  919. src_stride, dst_stride, src, dst, x, dx, y, dy);
  920. return;
  921. }
  922. ScaleARGBDownEven(src_width, src_height, clip_width, clip_height,
  923. src_stride, dst_stride, src, dst, x, dx, y, dy,
  924. filtering);
  925. return;
  926. }
  927. // Optimized odd scale down. ie 3, 5, 7, 9x.
  928. if ((dx & 0x10000) && (dy & 0x10000)) {
  929. filtering = kFilterNone;
  930. if (dx == 0x10000 && dy == 0x10000) {
  931. // Straight copy.
  932. ARGBCopy(src + (y >> 16) * src_stride + (x >> 16) * 4, src_stride,
  933. dst, dst_stride, clip_width, clip_height);
  934. return;
  935. }
  936. }
  937. }
  938. }
  939. if (dx == 0x10000 && (x & 0xffff) == 0) {
  940. // Arbitrary scale vertically, but unscaled vertically.
  941. ScalePlaneVertical(src_height, clip_width, clip_height, src_stride,
  942. dst_stride, src, dst, x, y, dy, 4, filtering);
  943. return;
  944. }
  945. if (filtering && dy < 65536) {
  946. ScaleARGBBilinearUp(src_width, src_height, clip_width, clip_height,
  947. src_stride, dst_stride, src, dst, x, dx, y, dy,
  948. filtering);
  949. return;
  950. }
  951. if (filtering) {
  952. ScaleARGBBilinearDown(src_width, src_height, clip_width, clip_height,
  953. src_stride, dst_stride, src, dst, x, dx, y, dy,
  954. filtering);
  955. return;
  956. }
  957. ScaleARGBSimple(src_width, src_height, clip_width, clip_height, src_stride,
  958. dst_stride, src, dst, x, dx, y, dy);
  959. }
  960. LIBYUV_API
  961. int ARGBScaleClip(const uint8_t* src_argb,
  962. int src_stride_argb,
  963. int src_width,
  964. int src_height,
  965. uint8_t* dst_argb,
  966. int dst_stride_argb,
  967. int dst_width,
  968. int dst_height,
  969. int clip_x,
  970. int clip_y,
  971. int clip_width,
  972. int clip_height,
  973. enum FilterMode filtering) {
  974. if (!src_argb || src_width == 0 || src_height == 0 || !dst_argb ||
  975. dst_width <= 0 || dst_height <= 0 || clip_x < 0 || clip_y < 0 ||
  976. clip_width > 32768 || clip_height > 32768 ||
  977. (clip_x + clip_width) > dst_width ||
  978. (clip_y + clip_height) > dst_height) {
  979. return -1;
  980. }
  981. ScaleARGB(src_argb, src_stride_argb, src_width, src_height, dst_argb,
  982. dst_stride_argb, dst_width, dst_height, clip_x, clip_y, clip_width,
  983. clip_height, filtering);
  984. return 0;
  985. }
  986. // Scale an ARGB image.
  987. LIBYUV_API
  988. int ARGBScale(const uint8_t* src_argb,
  989. int src_stride_argb,
  990. int src_width,
  991. int src_height,
  992. uint8_t* dst_argb,
  993. int dst_stride_argb,
  994. int dst_width,
  995. int dst_height,
  996. enum FilterMode filtering) {
  997. if (!src_argb || src_width == 0 || src_height == 0 || src_width > 32768 ||
  998. src_height > 32768 || !dst_argb || dst_width <= 0 || dst_height <= 0) {
  999. return -1;
  1000. }
  1001. ScaleARGB(src_argb, src_stride_argb, src_width, src_height, dst_argb,
  1002. dst_stride_argb, dst_width, dst_height, 0, 0, dst_width, dst_height,
  1003. filtering);
  1004. return 0;
  1005. }
  1006. // Scale with YUV conversion to ARGB and clipping.
  1007. LIBYUV_API
  1008. int YUVToARGBScaleClip(const uint8_t* src_y,
  1009. int src_stride_y,
  1010. const uint8_t* src_u,
  1011. int src_stride_u,
  1012. const uint8_t* src_v,
  1013. int src_stride_v,
  1014. uint32_t src_fourcc,
  1015. int src_width,
  1016. int src_height,
  1017. uint8_t* dst_argb,
  1018. int dst_stride_argb,
  1019. uint32_t dst_fourcc,
  1020. int dst_width,
  1021. int dst_height,
  1022. int clip_x,
  1023. int clip_y,
  1024. int clip_width,
  1025. int clip_height,
  1026. enum FilterMode filtering) {
  1027. uint8_t* argb_buffer = (uint8_t*)malloc(src_width * src_height * 4);
  1028. int r;
  1029. (void)src_fourcc; // TODO(fbarchard): implement and/or assert.
  1030. (void)dst_fourcc;
  1031. I420ToARGB(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
  1032. argb_buffer, src_width * 4, src_width, src_height);
  1033. r = ARGBScaleClip(argb_buffer, src_width * 4, src_width, src_height, dst_argb,
  1034. dst_stride_argb, dst_width, dst_height, clip_x, clip_y,
  1035. clip_width, clip_height, filtering);
  1036. free(argb_buffer);
  1037. return r;
  1038. }
  1039. #ifdef __cplusplus
  1040. } // extern "C"
  1041. } // namespace libyuv
  1042. #endif