2
0

convert_from_argb.cc 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163
  1. /*
  2. * Copyright 2012 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/convert_from_argb.h"
  11. #include "libyuv/basic_types.h"
  12. #include "libyuv/cpu_id.h"
  13. #include "libyuv/planar_functions.h"
  14. #include "libyuv/row.h"
  15. #ifdef __cplusplus
  16. namespace libyuv {
  17. extern "C" {
  18. #endif
  19. // ARGB little endian (bgra in memory) to I444
  20. LIBYUV_API
  21. int ARGBToI444(const uint8_t* src_argb,
  22. int src_stride_argb,
  23. uint8_t* dst_y,
  24. int dst_stride_y,
  25. uint8_t* dst_u,
  26. int dst_stride_u,
  27. uint8_t* dst_v,
  28. int dst_stride_v,
  29. int width,
  30. int height) {
  31. int y;
  32. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  33. ARGBToYRow_C;
  34. void (*ARGBToUV444Row)(const uint8_t* src_argb, uint8_t* dst_u,
  35. uint8_t* dst_v, int width) = ARGBToUV444Row_C;
  36. if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  37. return -1;
  38. }
  39. if (height < 0) {
  40. height = -height;
  41. src_argb = src_argb + (height - 1) * src_stride_argb;
  42. src_stride_argb = -src_stride_argb;
  43. }
  44. // Coalesce rows.
  45. if (src_stride_argb == width * 4 && dst_stride_y == width &&
  46. dst_stride_u == width && dst_stride_v == width) {
  47. width *= height;
  48. height = 1;
  49. src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
  50. }
  51. #if defined(HAS_ARGBTOUV444ROW_SSSE3)
  52. if (TestCpuFlag(kCpuHasSSSE3)) {
  53. ARGBToUV444Row = ARGBToUV444Row_Any_SSSE3;
  54. if (IS_ALIGNED(width, 16)) {
  55. ARGBToUV444Row = ARGBToUV444Row_SSSE3;
  56. }
  57. }
  58. #endif
  59. #if defined(HAS_ARGBTOUV444ROW_NEON)
  60. if (TestCpuFlag(kCpuHasNEON)) {
  61. ARGBToUV444Row = ARGBToUV444Row_Any_NEON;
  62. if (IS_ALIGNED(width, 8)) {
  63. ARGBToUV444Row = ARGBToUV444Row_NEON;
  64. }
  65. }
  66. #endif
  67. #if defined(HAS_ARGBTOUV444ROW_MSA)
  68. if (TestCpuFlag(kCpuHasMSA)) {
  69. ARGBToUV444Row = ARGBToUV444Row_Any_MSA;
  70. if (IS_ALIGNED(width, 16)) {
  71. ARGBToUV444Row = ARGBToUV444Row_MSA;
  72. }
  73. }
  74. #endif
  75. #if defined(HAS_ARGBTOUV444ROW_MMI)
  76. if (TestCpuFlag(kCpuHasMMI)) {
  77. ARGBToUV444Row = ARGBToUV444Row_Any_MMI;
  78. if (IS_ALIGNED(width, 8)) {
  79. ARGBToUV444Row = ARGBToUV444Row_MMI;
  80. }
  81. }
  82. #endif
  83. #if defined(HAS_ARGBTOYROW_SSSE3)
  84. if (TestCpuFlag(kCpuHasSSSE3)) {
  85. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  86. if (IS_ALIGNED(width, 16)) {
  87. ARGBToYRow = ARGBToYRow_SSSE3;
  88. }
  89. }
  90. #endif
  91. #if defined(HAS_ARGBTOYROW_AVX2)
  92. if (TestCpuFlag(kCpuHasAVX2)) {
  93. ARGBToYRow = ARGBToYRow_Any_AVX2;
  94. if (IS_ALIGNED(width, 32)) {
  95. ARGBToYRow = ARGBToYRow_AVX2;
  96. }
  97. }
  98. #endif
  99. #if defined(HAS_ARGBTOYROW_NEON)
  100. if (TestCpuFlag(kCpuHasNEON)) {
  101. ARGBToYRow = ARGBToYRow_Any_NEON;
  102. if (IS_ALIGNED(width, 8)) {
  103. ARGBToYRow = ARGBToYRow_NEON;
  104. }
  105. }
  106. #endif
  107. #if defined(HAS_ARGBTOYROW_MSA)
  108. if (TestCpuFlag(kCpuHasMSA)) {
  109. ARGBToYRow = ARGBToYRow_Any_MSA;
  110. if (IS_ALIGNED(width, 16)) {
  111. ARGBToYRow = ARGBToYRow_MSA;
  112. }
  113. }
  114. #endif
  115. #if defined(HAS_ARGBTOYROW_MMI)
  116. if (TestCpuFlag(kCpuHasMMI)) {
  117. ARGBToYRow = ARGBToYRow_Any_MMI;
  118. if (IS_ALIGNED(width, 8)) {
  119. ARGBToYRow = ARGBToYRow_MMI;
  120. }
  121. }
  122. #endif
  123. for (y = 0; y < height; ++y) {
  124. ARGBToUV444Row(src_argb, dst_u, dst_v, width);
  125. ARGBToYRow(src_argb, dst_y, width);
  126. src_argb += src_stride_argb;
  127. dst_y += dst_stride_y;
  128. dst_u += dst_stride_u;
  129. dst_v += dst_stride_v;
  130. }
  131. return 0;
  132. }
  133. // ARGB little endian (bgra in memory) to I422
  134. LIBYUV_API
  135. int ARGBToI422(const uint8_t* src_argb,
  136. int src_stride_argb,
  137. uint8_t* dst_y,
  138. int dst_stride_y,
  139. uint8_t* dst_u,
  140. int dst_stride_u,
  141. uint8_t* dst_v,
  142. int dst_stride_v,
  143. int width,
  144. int height) {
  145. int y;
  146. void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
  147. uint8_t* dst_u, uint8_t* dst_v, int width) =
  148. ARGBToUVRow_C;
  149. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  150. ARGBToYRow_C;
  151. if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  152. return -1;
  153. }
  154. // Negative height means invert the image.
  155. if (height < 0) {
  156. height = -height;
  157. src_argb = src_argb + (height - 1) * src_stride_argb;
  158. src_stride_argb = -src_stride_argb;
  159. }
  160. // Coalesce rows.
  161. if (src_stride_argb == width * 4 && dst_stride_y == width &&
  162. dst_stride_u * 2 == width && dst_stride_v * 2 == width) {
  163. width *= height;
  164. height = 1;
  165. src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
  166. }
  167. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  168. if (TestCpuFlag(kCpuHasSSSE3)) {
  169. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  170. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  171. if (IS_ALIGNED(width, 16)) {
  172. ARGBToUVRow = ARGBToUVRow_SSSE3;
  173. ARGBToYRow = ARGBToYRow_SSSE3;
  174. }
  175. }
  176. #endif
  177. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  178. if (TestCpuFlag(kCpuHasAVX2)) {
  179. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  180. ARGBToYRow = ARGBToYRow_Any_AVX2;
  181. if (IS_ALIGNED(width, 32)) {
  182. ARGBToUVRow = ARGBToUVRow_AVX2;
  183. ARGBToYRow = ARGBToYRow_AVX2;
  184. }
  185. }
  186. #endif
  187. #if defined(HAS_ARGBTOYROW_NEON)
  188. if (TestCpuFlag(kCpuHasNEON)) {
  189. ARGBToYRow = ARGBToYRow_Any_NEON;
  190. if (IS_ALIGNED(width, 8)) {
  191. ARGBToYRow = ARGBToYRow_NEON;
  192. }
  193. }
  194. #endif
  195. #if defined(HAS_ARGBTOUVROW_NEON)
  196. if (TestCpuFlag(kCpuHasNEON)) {
  197. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  198. if (IS_ALIGNED(width, 16)) {
  199. ARGBToUVRow = ARGBToUVRow_NEON;
  200. }
  201. }
  202. #endif
  203. #if defined(HAS_ARGBTOYROW_MSA)
  204. if (TestCpuFlag(kCpuHasMSA)) {
  205. ARGBToYRow = ARGBToYRow_Any_MSA;
  206. if (IS_ALIGNED(width, 16)) {
  207. ARGBToYRow = ARGBToYRow_MSA;
  208. }
  209. }
  210. #endif
  211. #if defined(HAS_ARGBTOUVROW_MSA)
  212. if (TestCpuFlag(kCpuHasMSA)) {
  213. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  214. if (IS_ALIGNED(width, 32)) {
  215. ARGBToUVRow = ARGBToUVRow_MSA;
  216. }
  217. }
  218. #endif
  219. #if defined(HAS_ARGBTOYROW_MMI)
  220. if (TestCpuFlag(kCpuHasMMI)) {
  221. ARGBToYRow = ARGBToYRow_Any_MMI;
  222. if (IS_ALIGNED(width, 8)) {
  223. ARGBToYRow = ARGBToYRow_MMI;
  224. }
  225. }
  226. #endif
  227. #if defined(HAS_ARGBTOUVROW_MMI)
  228. if (TestCpuFlag(kCpuHasMMI)) {
  229. ARGBToUVRow = ARGBToUVRow_Any_MMI;
  230. if (IS_ALIGNED(width, 16)) {
  231. ARGBToUVRow = ARGBToUVRow_MMI;
  232. }
  233. }
  234. #endif
  235. for (y = 0; y < height; ++y) {
  236. ARGBToUVRow(src_argb, 0, dst_u, dst_v, width);
  237. ARGBToYRow(src_argb, dst_y, width);
  238. src_argb += src_stride_argb;
  239. dst_y += dst_stride_y;
  240. dst_u += dst_stride_u;
  241. dst_v += dst_stride_v;
  242. }
  243. return 0;
  244. }
  245. LIBYUV_API
  246. int ARGBToNV12(const uint8_t* src_argb,
  247. int src_stride_argb,
  248. uint8_t* dst_y,
  249. int dst_stride_y,
  250. uint8_t* dst_uv,
  251. int dst_stride_uv,
  252. int width,
  253. int height) {
  254. int y;
  255. int halfwidth = (width + 1) >> 1;
  256. void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
  257. uint8_t* dst_u, uint8_t* dst_v, int width) =
  258. ARGBToUVRow_C;
  259. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  260. ARGBToYRow_C;
  261. void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
  262. uint8_t* dst_uv, int width) = MergeUVRow_C;
  263. if (!src_argb || !dst_y || !dst_uv || width <= 0 || height == 0) {
  264. return -1;
  265. }
  266. // Negative height means invert the image.
  267. if (height < 0) {
  268. height = -height;
  269. src_argb = src_argb + (height - 1) * src_stride_argb;
  270. src_stride_argb = -src_stride_argb;
  271. }
  272. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  273. if (TestCpuFlag(kCpuHasSSSE3)) {
  274. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  275. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  276. if (IS_ALIGNED(width, 16)) {
  277. ARGBToUVRow = ARGBToUVRow_SSSE3;
  278. ARGBToYRow = ARGBToYRow_SSSE3;
  279. }
  280. }
  281. #endif
  282. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  283. if (TestCpuFlag(kCpuHasAVX2)) {
  284. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  285. ARGBToYRow = ARGBToYRow_Any_AVX2;
  286. if (IS_ALIGNED(width, 32)) {
  287. ARGBToUVRow = ARGBToUVRow_AVX2;
  288. ARGBToYRow = ARGBToYRow_AVX2;
  289. }
  290. }
  291. #endif
  292. #if defined(HAS_ARGBTOYROW_NEON)
  293. if (TestCpuFlag(kCpuHasNEON)) {
  294. ARGBToYRow = ARGBToYRow_Any_NEON;
  295. if (IS_ALIGNED(width, 8)) {
  296. ARGBToYRow = ARGBToYRow_NEON;
  297. }
  298. }
  299. #endif
  300. #if defined(HAS_ARGBTOUVROW_NEON)
  301. if (TestCpuFlag(kCpuHasNEON)) {
  302. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  303. if (IS_ALIGNED(width, 16)) {
  304. ARGBToUVRow = ARGBToUVRow_NEON;
  305. }
  306. }
  307. #endif
  308. #if defined(HAS_ARGBTOYROW_MSA)
  309. if (TestCpuFlag(kCpuHasMSA)) {
  310. ARGBToYRow = ARGBToYRow_Any_MSA;
  311. if (IS_ALIGNED(width, 16)) {
  312. ARGBToYRow = ARGBToYRow_MSA;
  313. }
  314. }
  315. #endif
  316. #if defined(HAS_ARGBTOUVROW_MSA)
  317. if (TestCpuFlag(kCpuHasMSA)) {
  318. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  319. if (IS_ALIGNED(width, 32)) {
  320. ARGBToUVRow = ARGBToUVRow_MSA;
  321. }
  322. }
  323. #endif
  324. #if defined(HAS_ARGBTOYROW_MMI)
  325. if (TestCpuFlag(kCpuHasMMI)) {
  326. ARGBToYRow = ARGBToYRow_Any_MMI;
  327. if (IS_ALIGNED(width, 8)) {
  328. ARGBToYRow = ARGBToYRow_MMI;
  329. }
  330. }
  331. #endif
  332. #if defined(HAS_ARGBTOUVROW_MMI)
  333. if (TestCpuFlag(kCpuHasMMI)) {
  334. ARGBToUVRow = ARGBToUVRow_Any_MMI;
  335. if (IS_ALIGNED(width, 16)) {
  336. ARGBToUVRow = ARGBToUVRow_MMI;
  337. }
  338. }
  339. #endif
  340. #if defined(HAS_MERGEUVROW_SSE2)
  341. if (TestCpuFlag(kCpuHasSSE2)) {
  342. MergeUVRow_ = MergeUVRow_Any_SSE2;
  343. if (IS_ALIGNED(halfwidth, 16)) {
  344. MergeUVRow_ = MergeUVRow_SSE2;
  345. }
  346. }
  347. #endif
  348. #if defined(HAS_MERGEUVROW_AVX2)
  349. if (TestCpuFlag(kCpuHasAVX2)) {
  350. MergeUVRow_ = MergeUVRow_Any_AVX2;
  351. if (IS_ALIGNED(halfwidth, 32)) {
  352. MergeUVRow_ = MergeUVRow_AVX2;
  353. }
  354. }
  355. #endif
  356. #if defined(HAS_MERGEUVROW_NEON)
  357. if (TestCpuFlag(kCpuHasNEON)) {
  358. MergeUVRow_ = MergeUVRow_Any_NEON;
  359. if (IS_ALIGNED(halfwidth, 16)) {
  360. MergeUVRow_ = MergeUVRow_NEON;
  361. }
  362. }
  363. #endif
  364. #if defined(HAS_MERGEUVROW_MSA)
  365. if (TestCpuFlag(kCpuHasMSA)) {
  366. MergeUVRow_ = MergeUVRow_Any_MSA;
  367. if (IS_ALIGNED(halfwidth, 16)) {
  368. MergeUVRow_ = MergeUVRow_MSA;
  369. }
  370. }
  371. #endif
  372. #if defined(HAS_MERGEUVROW_MMI)
  373. if (TestCpuFlag(kCpuHasMMI)) {
  374. MergeUVRow_ = MergeUVRow_Any_MMI;
  375. if (IS_ALIGNED(halfwidth, 8)) {
  376. MergeUVRow_ = MergeUVRow_MMI;
  377. }
  378. }
  379. #endif
  380. {
  381. // Allocate a rows of uv.
  382. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  383. uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
  384. for (y = 0; y < height - 1; y += 2) {
  385. ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
  386. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  387. ARGBToYRow(src_argb, dst_y, width);
  388. ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
  389. src_argb += src_stride_argb * 2;
  390. dst_y += dst_stride_y * 2;
  391. dst_uv += dst_stride_uv;
  392. }
  393. if (height & 1) {
  394. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  395. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  396. ARGBToYRow(src_argb, dst_y, width);
  397. }
  398. free_aligned_buffer_64(row_u);
  399. }
  400. return 0;
  401. }
  402. // Same as NV12 but U and V swapped.
  403. LIBYUV_API
  404. int ARGBToNV21(const uint8_t* src_argb,
  405. int src_stride_argb,
  406. uint8_t* dst_y,
  407. int dst_stride_y,
  408. uint8_t* dst_vu,
  409. int dst_stride_vu,
  410. int width,
  411. int height) {
  412. int y;
  413. int halfwidth = (width + 1) >> 1;
  414. void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
  415. uint8_t* dst_u, uint8_t* dst_v, int width) =
  416. ARGBToUVRow_C;
  417. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  418. ARGBToYRow_C;
  419. void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
  420. uint8_t* dst_vu, int width) = MergeUVRow_C;
  421. if (!src_argb || !dst_y || !dst_vu || width <= 0 || height == 0) {
  422. return -1;
  423. }
  424. // Negative height means invert the image.
  425. if (height < 0) {
  426. height = -height;
  427. src_argb = src_argb + (height - 1) * src_stride_argb;
  428. src_stride_argb = -src_stride_argb;
  429. }
  430. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  431. if (TestCpuFlag(kCpuHasSSSE3)) {
  432. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  433. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  434. if (IS_ALIGNED(width, 16)) {
  435. ARGBToUVRow = ARGBToUVRow_SSSE3;
  436. ARGBToYRow = ARGBToYRow_SSSE3;
  437. }
  438. }
  439. #endif
  440. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  441. if (TestCpuFlag(kCpuHasAVX2)) {
  442. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  443. ARGBToYRow = ARGBToYRow_Any_AVX2;
  444. if (IS_ALIGNED(width, 32)) {
  445. ARGBToUVRow = ARGBToUVRow_AVX2;
  446. ARGBToYRow = ARGBToYRow_AVX2;
  447. }
  448. }
  449. #endif
  450. #if defined(HAS_ARGBTOYROW_NEON)
  451. if (TestCpuFlag(kCpuHasNEON)) {
  452. ARGBToYRow = ARGBToYRow_Any_NEON;
  453. if (IS_ALIGNED(width, 8)) {
  454. ARGBToYRow = ARGBToYRow_NEON;
  455. }
  456. }
  457. #endif
  458. #if defined(HAS_ARGBTOUVROW_NEON)
  459. if (TestCpuFlag(kCpuHasNEON)) {
  460. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  461. if (IS_ALIGNED(width, 16)) {
  462. ARGBToUVRow = ARGBToUVRow_NEON;
  463. }
  464. }
  465. #endif
  466. #if defined(HAS_ARGBTOYROW_MSA)
  467. if (TestCpuFlag(kCpuHasMSA)) {
  468. ARGBToYRow = ARGBToYRow_Any_MSA;
  469. if (IS_ALIGNED(width, 16)) {
  470. ARGBToYRow = ARGBToYRow_MSA;
  471. }
  472. }
  473. #endif
  474. #if defined(HAS_ARGBTOUVROW_MSA)
  475. if (TestCpuFlag(kCpuHasMSA)) {
  476. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  477. if (IS_ALIGNED(width, 32)) {
  478. ARGBToUVRow = ARGBToUVRow_MSA;
  479. }
  480. }
  481. #endif
  482. #if defined(HAS_ARGBTOYROW_MMI)
  483. if (TestCpuFlag(kCpuHasMMI)) {
  484. ARGBToYRow = ARGBToYRow_Any_MMI;
  485. if (IS_ALIGNED(width, 8)) {
  486. ARGBToYRow = ARGBToYRow_MMI;
  487. }
  488. }
  489. #endif
  490. #if defined(HAS_ARGBTOUVROW_MMI)
  491. if (TestCpuFlag(kCpuHasMMI)) {
  492. ARGBToUVRow = ARGBToUVRow_Any_MMI;
  493. if (IS_ALIGNED(width, 16)) {
  494. ARGBToUVRow = ARGBToUVRow_MMI;
  495. }
  496. }
  497. #endif
  498. #if defined(HAS_MERGEUVROW_SSE2)
  499. if (TestCpuFlag(kCpuHasSSE2)) {
  500. MergeUVRow_ = MergeUVRow_Any_SSE2;
  501. if (IS_ALIGNED(halfwidth, 16)) {
  502. MergeUVRow_ = MergeUVRow_SSE2;
  503. }
  504. }
  505. #endif
  506. #if defined(HAS_MERGEUVROW_AVX2)
  507. if (TestCpuFlag(kCpuHasAVX2)) {
  508. MergeUVRow_ = MergeUVRow_Any_AVX2;
  509. if (IS_ALIGNED(halfwidth, 32)) {
  510. MergeUVRow_ = MergeUVRow_AVX2;
  511. }
  512. }
  513. #endif
  514. #if defined(HAS_MERGEUVROW_NEON)
  515. if (TestCpuFlag(kCpuHasNEON)) {
  516. MergeUVRow_ = MergeUVRow_Any_NEON;
  517. if (IS_ALIGNED(halfwidth, 16)) {
  518. MergeUVRow_ = MergeUVRow_NEON;
  519. }
  520. }
  521. #endif
  522. #if defined(HAS_MERGEUVROW_MSA)
  523. if (TestCpuFlag(kCpuHasMSA)) {
  524. MergeUVRow_ = MergeUVRow_Any_MSA;
  525. if (IS_ALIGNED(halfwidth, 16)) {
  526. MergeUVRow_ = MergeUVRow_MSA;
  527. }
  528. }
  529. #endif
  530. #if defined(HAS_MERGEUVROW_MMI)
  531. if (TestCpuFlag(kCpuHasMMI)) {
  532. MergeUVRow_ = MergeUVRow_Any_MMI;
  533. if (IS_ALIGNED(halfwidth, 8)) {
  534. MergeUVRow_ = MergeUVRow_MMI;
  535. }
  536. }
  537. #endif
  538. {
  539. // Allocate a rows of uv.
  540. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  541. uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
  542. for (y = 0; y < height - 1; y += 2) {
  543. ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
  544. MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
  545. ARGBToYRow(src_argb, dst_y, width);
  546. ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
  547. src_argb += src_stride_argb * 2;
  548. dst_y += dst_stride_y * 2;
  549. dst_vu += dst_stride_vu;
  550. }
  551. if (height & 1) {
  552. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  553. MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
  554. ARGBToYRow(src_argb, dst_y, width);
  555. }
  556. free_aligned_buffer_64(row_u);
  557. }
  558. return 0;
  559. }
  560. LIBYUV_API
  561. int ABGRToNV12(const uint8_t* src_abgr,
  562. int src_stride_abgr,
  563. uint8_t* dst_y,
  564. int dst_stride_y,
  565. uint8_t* dst_uv,
  566. int dst_stride_uv,
  567. int width,
  568. int height) {
  569. int y;
  570. int halfwidth = (width + 1) >> 1;
  571. void (*ABGRToUVRow)(const uint8_t* src_abgr0, int src_stride_abgr,
  572. uint8_t* dst_u, uint8_t* dst_v, int width) =
  573. ABGRToUVRow_C;
  574. void (*ABGRToYRow)(const uint8_t* src_abgr, uint8_t* dst_y, int width) =
  575. ABGRToYRow_C;
  576. void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
  577. uint8_t* dst_uv, int width) = MergeUVRow_C;
  578. if (!src_abgr || !dst_y || !dst_uv || width <= 0 || height == 0) {
  579. return -1;
  580. }
  581. // Negative height means invert the image.
  582. if (height < 0) {
  583. height = -height;
  584. src_abgr = src_abgr + (height - 1) * src_stride_abgr;
  585. src_stride_abgr = -src_stride_abgr;
  586. }
  587. #if defined(HAS_ABGRTOYROW_SSSE3) && defined(HAS_ABGRTOUVROW_SSSE3)
  588. if (TestCpuFlag(kCpuHasSSSE3)) {
  589. ABGRToUVRow = ABGRToUVRow_Any_SSSE3;
  590. ABGRToYRow = ABGRToYRow_Any_SSSE3;
  591. if (IS_ALIGNED(width, 16)) {
  592. ABGRToUVRow = ABGRToUVRow_SSSE3;
  593. ABGRToYRow = ABGRToYRow_SSSE3;
  594. }
  595. }
  596. #endif
  597. #if defined(HAS_ABGRTOYROW_AVX2) && defined(HAS_ABGRTOUVROW_AVX2)
  598. if (TestCpuFlag(kCpuHasAVX2)) {
  599. ABGRToUVRow = ABGRToUVRow_Any_AVX2;
  600. ABGRToYRow = ABGRToYRow_Any_AVX2;
  601. if (IS_ALIGNED(width, 32)) {
  602. ABGRToUVRow = ABGRToUVRow_AVX2;
  603. ABGRToYRow = ABGRToYRow_AVX2;
  604. }
  605. }
  606. #endif
  607. #if defined(HAS_ABGRTOYROW_NEON)
  608. if (TestCpuFlag(kCpuHasNEON)) {
  609. ABGRToYRow = ABGRToYRow_Any_NEON;
  610. if (IS_ALIGNED(width, 8)) {
  611. ABGRToYRow = ABGRToYRow_NEON;
  612. }
  613. }
  614. #endif
  615. #if defined(HAS_ABGRTOUVROW_NEON)
  616. if (TestCpuFlag(kCpuHasNEON)) {
  617. ABGRToUVRow = ABGRToUVRow_Any_NEON;
  618. if (IS_ALIGNED(width, 16)) {
  619. ABGRToUVRow = ABGRToUVRow_NEON;
  620. }
  621. }
  622. #endif
  623. #if defined(HAS_ABGRTOYROW_MSA)
  624. if (TestCpuFlag(kCpuHasMSA)) {
  625. ABGRToYRow = ABGRToYRow_Any_MSA;
  626. if (IS_ALIGNED(width, 16)) {
  627. ABGRToYRow = ABGRToYRow_MSA;
  628. }
  629. }
  630. #endif
  631. #if defined(HAS_ABGRTOUVROW_MSA)
  632. if (TestCpuFlag(kCpuHasMSA)) {
  633. ABGRToUVRow = ABGRToUVRow_Any_MSA;
  634. if (IS_ALIGNED(width, 32)) {
  635. ABGRToUVRow = ABGRToUVRow_MSA;
  636. }
  637. }
  638. #endif
  639. #if defined(HAS_ABGRTOYROW_MMI)
  640. if (TestCpuFlag(kCpuHasMMI)) {
  641. ABGRToYRow = ABGRToYRow_Any_MMI;
  642. if (IS_ALIGNED(width, 8)) {
  643. ABGRToYRow = ABGRToYRow_MMI;
  644. }
  645. }
  646. #endif
  647. #if defined(HAS_ABGRTOUVROW_MMI)
  648. if (TestCpuFlag(kCpuHasMMI)) {
  649. ABGRToUVRow = ABGRToUVRow_Any_MMI;
  650. if (IS_ALIGNED(width, 16)) {
  651. ABGRToUVRow = ABGRToUVRow_MMI;
  652. }
  653. }
  654. #endif
  655. #if defined(HAS_MERGEUVROW_SSE2)
  656. if (TestCpuFlag(kCpuHasSSE2)) {
  657. MergeUVRow_ = MergeUVRow_Any_SSE2;
  658. if (IS_ALIGNED(halfwidth, 16)) {
  659. MergeUVRow_ = MergeUVRow_SSE2;
  660. }
  661. }
  662. #endif
  663. #if defined(HAS_MERGEUVROW_AVX2)
  664. if (TestCpuFlag(kCpuHasAVX2)) {
  665. MergeUVRow_ = MergeUVRow_Any_AVX2;
  666. if (IS_ALIGNED(halfwidth, 32)) {
  667. MergeUVRow_ = MergeUVRow_AVX2;
  668. }
  669. }
  670. #endif
  671. #if defined(HAS_MERGEUVROW_NEON)
  672. if (TestCpuFlag(kCpuHasNEON)) {
  673. MergeUVRow_ = MergeUVRow_Any_NEON;
  674. if (IS_ALIGNED(halfwidth, 16)) {
  675. MergeUVRow_ = MergeUVRow_NEON;
  676. }
  677. }
  678. #endif
  679. #if defined(HAS_MERGEUVROW_MSA)
  680. if (TestCpuFlag(kCpuHasMSA)) {
  681. MergeUVRow_ = MergeUVRow_Any_MSA;
  682. if (IS_ALIGNED(halfwidth, 16)) {
  683. MergeUVRow_ = MergeUVRow_MSA;
  684. }
  685. }
  686. #endif
  687. #if defined(HAS_MERGEUVROW_MMI)
  688. if (TestCpuFlag(kCpuHasMMI)) {
  689. MergeUVRow_ = MergeUVRow_Any_MMI;
  690. if (IS_ALIGNED(halfwidth, 8)) {
  691. MergeUVRow_ = MergeUVRow_MMI;
  692. }
  693. }
  694. #endif
  695. {
  696. // Allocate a rows of uv.
  697. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  698. uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
  699. for (y = 0; y < height - 1; y += 2) {
  700. ABGRToUVRow(src_abgr, src_stride_abgr, row_u, row_v, width);
  701. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  702. ABGRToYRow(src_abgr, dst_y, width);
  703. ABGRToYRow(src_abgr + src_stride_abgr, dst_y + dst_stride_y, width);
  704. src_abgr += src_stride_abgr * 2;
  705. dst_y += dst_stride_y * 2;
  706. dst_uv += dst_stride_uv;
  707. }
  708. if (height & 1) {
  709. ABGRToUVRow(src_abgr, 0, row_u, row_v, width);
  710. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  711. ABGRToYRow(src_abgr, dst_y, width);
  712. }
  713. free_aligned_buffer_64(row_u);
  714. }
  715. return 0;
  716. }
  717. // Same as NV12 but U and V swapped.
  718. LIBYUV_API
  719. int ABGRToNV21(const uint8_t* src_abgr,
  720. int src_stride_abgr,
  721. uint8_t* dst_y,
  722. int dst_stride_y,
  723. uint8_t* dst_vu,
  724. int dst_stride_vu,
  725. int width,
  726. int height) {
  727. int y;
  728. int halfwidth = (width + 1) >> 1;
  729. void (*ABGRToUVRow)(const uint8_t* src_abgr0, int src_stride_abgr,
  730. uint8_t* dst_u, uint8_t* dst_v, int width) =
  731. ABGRToUVRow_C;
  732. void (*ABGRToYRow)(const uint8_t* src_abgr, uint8_t* dst_y, int width) =
  733. ABGRToYRow_C;
  734. void (*MergeUVRow_)(const uint8_t* src_u, const uint8_t* src_v,
  735. uint8_t* dst_vu, int width) = MergeUVRow_C;
  736. if (!src_abgr || !dst_y || !dst_vu || width <= 0 || height == 0) {
  737. return -1;
  738. }
  739. // Negative height means invert the image.
  740. if (height < 0) {
  741. height = -height;
  742. src_abgr = src_abgr + (height - 1) * src_stride_abgr;
  743. src_stride_abgr = -src_stride_abgr;
  744. }
  745. #if defined(HAS_ABGRTOYROW_SSSE3) && defined(HAS_ABGRTOUVROW_SSSE3)
  746. if (TestCpuFlag(kCpuHasSSSE3)) {
  747. ABGRToUVRow = ABGRToUVRow_Any_SSSE3;
  748. ABGRToYRow = ABGRToYRow_Any_SSSE3;
  749. if (IS_ALIGNED(width, 16)) {
  750. ABGRToUVRow = ABGRToUVRow_SSSE3;
  751. ABGRToYRow = ABGRToYRow_SSSE3;
  752. }
  753. }
  754. #endif
  755. #if defined(HAS_ABGRTOYROW_AVX2) && defined(HAS_ABGRTOUVROW_AVX2)
  756. if (TestCpuFlag(kCpuHasAVX2)) {
  757. ABGRToUVRow = ABGRToUVRow_Any_AVX2;
  758. ABGRToYRow = ABGRToYRow_Any_AVX2;
  759. if (IS_ALIGNED(width, 32)) {
  760. ABGRToUVRow = ABGRToUVRow_AVX2;
  761. ABGRToYRow = ABGRToYRow_AVX2;
  762. }
  763. }
  764. #endif
  765. #if defined(HAS_ABGRTOYROW_NEON)
  766. if (TestCpuFlag(kCpuHasNEON)) {
  767. ABGRToYRow = ABGRToYRow_Any_NEON;
  768. if (IS_ALIGNED(width, 8)) {
  769. ABGRToYRow = ABGRToYRow_NEON;
  770. }
  771. }
  772. #endif
  773. #if defined(HAS_ABGRTOUVROW_NEON)
  774. if (TestCpuFlag(kCpuHasNEON)) {
  775. ABGRToUVRow = ABGRToUVRow_Any_NEON;
  776. if (IS_ALIGNED(width, 16)) {
  777. ABGRToUVRow = ABGRToUVRow_NEON;
  778. }
  779. }
  780. #endif
  781. #if defined(HAS_ABGRTOYROW_MSA)
  782. if (TestCpuFlag(kCpuHasMSA)) {
  783. ABGRToYRow = ABGRToYRow_Any_MSA;
  784. if (IS_ALIGNED(width, 16)) {
  785. ABGRToYRow = ABGRToYRow_MSA;
  786. }
  787. }
  788. #endif
  789. #if defined(HAS_ABGRTOUVROW_MSA)
  790. if (TestCpuFlag(kCpuHasMSA)) {
  791. ABGRToUVRow = ABGRToUVRow_Any_MSA;
  792. if (IS_ALIGNED(width, 32)) {
  793. ABGRToUVRow = ABGRToUVRow_MSA;
  794. }
  795. }
  796. #endif
  797. #if defined(HAS_ABGRTOYROW_MMI)
  798. if (TestCpuFlag(kCpuHasMMI)) {
  799. ABGRToYRow = ABGRToYRow_Any_MMI;
  800. if (IS_ALIGNED(width, 8)) {
  801. ABGRToYRow = ABGRToYRow_MMI;
  802. }
  803. }
  804. #endif
  805. #if defined(HAS_ABGRTOUVROW_MMI)
  806. if (TestCpuFlag(kCpuHasMMI)) {
  807. ABGRToUVRow = ABGRToUVRow_Any_MMI;
  808. if (IS_ALIGNED(width, 16)) {
  809. ABGRToUVRow = ABGRToUVRow_MMI;
  810. }
  811. }
  812. #endif
  813. #if defined(HAS_MERGEUVROW_SSE2)
  814. if (TestCpuFlag(kCpuHasSSE2)) {
  815. MergeUVRow_ = MergeUVRow_Any_SSE2;
  816. if (IS_ALIGNED(halfwidth, 16)) {
  817. MergeUVRow_ = MergeUVRow_SSE2;
  818. }
  819. }
  820. #endif
  821. #if defined(HAS_MERGEUVROW_AVX2)
  822. if (TestCpuFlag(kCpuHasAVX2)) {
  823. MergeUVRow_ = MergeUVRow_Any_AVX2;
  824. if (IS_ALIGNED(halfwidth, 32)) {
  825. MergeUVRow_ = MergeUVRow_AVX2;
  826. }
  827. }
  828. #endif
  829. #if defined(HAS_MERGEUVROW_NEON)
  830. if (TestCpuFlag(kCpuHasNEON)) {
  831. MergeUVRow_ = MergeUVRow_Any_NEON;
  832. if (IS_ALIGNED(halfwidth, 16)) {
  833. MergeUVRow_ = MergeUVRow_NEON;
  834. }
  835. }
  836. #endif
  837. #if defined(HAS_MERGEUVROW_MSA)
  838. if (TestCpuFlag(kCpuHasMSA)) {
  839. MergeUVRow_ = MergeUVRow_Any_MSA;
  840. if (IS_ALIGNED(halfwidth, 16)) {
  841. MergeUVRow_ = MergeUVRow_MSA;
  842. }
  843. }
  844. #endif
  845. #if defined(HAS_MERGEUVROW_MMI)
  846. if (TestCpuFlag(kCpuHasMMI)) {
  847. MergeUVRow_ = MergeUVRow_Any_MMI;
  848. if (IS_ALIGNED(halfwidth, 8)) {
  849. MergeUVRow_ = MergeUVRow_MMI;
  850. }
  851. }
  852. #endif
  853. {
  854. // Allocate a rows of uv.
  855. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  856. uint8_t* row_v = row_u + ((halfwidth + 31) & ~31);
  857. for (y = 0; y < height - 1; y += 2) {
  858. ABGRToUVRow(src_abgr, src_stride_abgr, row_u, row_v, width);
  859. MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
  860. ABGRToYRow(src_abgr, dst_y, width);
  861. ABGRToYRow(src_abgr + src_stride_abgr, dst_y + dst_stride_y, width);
  862. src_abgr += src_stride_abgr * 2;
  863. dst_y += dst_stride_y * 2;
  864. dst_vu += dst_stride_vu;
  865. }
  866. if (height & 1) {
  867. ABGRToUVRow(src_abgr, 0, row_u, row_v, width);
  868. MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
  869. ABGRToYRow(src_abgr, dst_y, width);
  870. }
  871. free_aligned_buffer_64(row_u);
  872. }
  873. return 0;
  874. }
  875. // Convert ARGB to YUY2.
  876. LIBYUV_API
  877. int ARGBToYUY2(const uint8_t* src_argb,
  878. int src_stride_argb,
  879. uint8_t* dst_yuy2,
  880. int dst_stride_yuy2,
  881. int width,
  882. int height) {
  883. int y;
  884. void (*ARGBToUVRow)(const uint8_t* src_argb, int src_stride_argb,
  885. uint8_t* dst_u, uint8_t* dst_v, int width) =
  886. ARGBToUVRow_C;
  887. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  888. ARGBToYRow_C;
  889. void (*I422ToYUY2Row)(const uint8_t* src_y, const uint8_t* src_u,
  890. const uint8_t* src_v, uint8_t* dst_yuy2, int width) =
  891. I422ToYUY2Row_C;
  892. if (!src_argb || !dst_yuy2 || width <= 0 || height == 0) {
  893. return -1;
  894. }
  895. // Negative height means invert the image.
  896. if (height < 0) {
  897. height = -height;
  898. dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
  899. dst_stride_yuy2 = -dst_stride_yuy2;
  900. }
  901. // Coalesce rows.
  902. if (src_stride_argb == width * 4 && dst_stride_yuy2 == width * 2) {
  903. width *= height;
  904. height = 1;
  905. src_stride_argb = dst_stride_yuy2 = 0;
  906. }
  907. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  908. if (TestCpuFlag(kCpuHasSSSE3)) {
  909. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  910. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  911. if (IS_ALIGNED(width, 16)) {
  912. ARGBToUVRow = ARGBToUVRow_SSSE3;
  913. ARGBToYRow = ARGBToYRow_SSSE3;
  914. }
  915. }
  916. #endif
  917. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  918. if (TestCpuFlag(kCpuHasAVX2)) {
  919. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  920. ARGBToYRow = ARGBToYRow_Any_AVX2;
  921. if (IS_ALIGNED(width, 32)) {
  922. ARGBToUVRow = ARGBToUVRow_AVX2;
  923. ARGBToYRow = ARGBToYRow_AVX2;
  924. }
  925. }
  926. #endif
  927. #if defined(HAS_ARGBTOYROW_NEON)
  928. if (TestCpuFlag(kCpuHasNEON)) {
  929. ARGBToYRow = ARGBToYRow_Any_NEON;
  930. if (IS_ALIGNED(width, 8)) {
  931. ARGBToYRow = ARGBToYRow_NEON;
  932. }
  933. }
  934. #endif
  935. #if defined(HAS_ARGBTOUVROW_NEON)
  936. if (TestCpuFlag(kCpuHasNEON)) {
  937. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  938. if (IS_ALIGNED(width, 16)) {
  939. ARGBToUVRow = ARGBToUVRow_NEON;
  940. }
  941. }
  942. #endif
  943. #if defined(HAS_ARGBTOYROW_MSA)
  944. if (TestCpuFlag(kCpuHasMSA)) {
  945. ARGBToYRow = ARGBToYRow_Any_MSA;
  946. if (IS_ALIGNED(width, 16)) {
  947. ARGBToYRow = ARGBToYRow_MSA;
  948. }
  949. }
  950. #endif
  951. #if defined(HAS_ARGBTOUVROW_MSA)
  952. if (TestCpuFlag(kCpuHasMSA)) {
  953. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  954. if (IS_ALIGNED(width, 32)) {
  955. ARGBToUVRow = ARGBToUVRow_MSA;
  956. }
  957. }
  958. #endif
  959. #if defined(HAS_ARGBTOYROW_MMI)
  960. if (TestCpuFlag(kCpuHasMMI)) {
  961. ARGBToYRow = ARGBToYRow_Any_MMI;
  962. if (IS_ALIGNED(width, 8)) {
  963. ARGBToYRow = ARGBToYRow_MMI;
  964. }
  965. }
  966. #endif
  967. #if defined(HAS_ARGBTOUVROW_MMI)
  968. if (TestCpuFlag(kCpuHasMMI)) {
  969. ARGBToUVRow = ARGBToUVRow_Any_MMI;
  970. if (IS_ALIGNED(width, 16)) {
  971. ARGBToUVRow = ARGBToUVRow_MMI;
  972. }
  973. }
  974. #endif
  975. #if defined(HAS_I422TOYUY2ROW_SSE2)
  976. if (TestCpuFlag(kCpuHasSSE2)) {
  977. I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
  978. if (IS_ALIGNED(width, 16)) {
  979. I422ToYUY2Row = I422ToYUY2Row_SSE2;
  980. }
  981. }
  982. #endif
  983. #if defined(HAS_I422TOYUY2ROW_AVX2)
  984. if (TestCpuFlag(kCpuHasAVX2)) {
  985. I422ToYUY2Row = I422ToYUY2Row_Any_AVX2;
  986. if (IS_ALIGNED(width, 32)) {
  987. I422ToYUY2Row = I422ToYUY2Row_AVX2;
  988. }
  989. }
  990. #endif
  991. #if defined(HAS_I422TOYUY2ROW_NEON)
  992. if (TestCpuFlag(kCpuHasNEON)) {
  993. I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
  994. if (IS_ALIGNED(width, 16)) {
  995. I422ToYUY2Row = I422ToYUY2Row_NEON;
  996. }
  997. }
  998. #endif
  999. #if defined(HAS_I422TOYUY2ROW_MSA)
  1000. if (TestCpuFlag(kCpuHasMSA)) {
  1001. I422ToYUY2Row = I422ToYUY2Row_Any_MSA;
  1002. if (IS_ALIGNED(width, 32)) {
  1003. I422ToYUY2Row = I422ToYUY2Row_MSA;
  1004. }
  1005. }
  1006. #endif
  1007. #if defined(HAS_I422TOYUY2ROW_MMI)
  1008. if (TestCpuFlag(kCpuHasMMI)) {
  1009. I422ToYUY2Row = I422ToYUY2Row_Any_MMI;
  1010. if (IS_ALIGNED(width, 8)) {
  1011. I422ToYUY2Row = I422ToYUY2Row_MMI;
  1012. }
  1013. }
  1014. #endif
  1015. {
  1016. // Allocate a rows of yuv.
  1017. align_buffer_64(row_y, ((width + 63) & ~63) * 2);
  1018. uint8_t* row_u = row_y + ((width + 63) & ~63);
  1019. uint8_t* row_v = row_u + ((width + 63) & ~63) / 2;
  1020. for (y = 0; y < height; ++y) {
  1021. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  1022. ARGBToYRow(src_argb, row_y, width);
  1023. I422ToYUY2Row(row_y, row_u, row_v, dst_yuy2, width);
  1024. src_argb += src_stride_argb;
  1025. dst_yuy2 += dst_stride_yuy2;
  1026. }
  1027. free_aligned_buffer_64(row_y);
  1028. }
  1029. return 0;
  1030. }
  1031. // Convert ARGB to UYVY.
  1032. LIBYUV_API
  1033. int ARGBToUYVY(const uint8_t* src_argb,
  1034. int src_stride_argb,
  1035. uint8_t* dst_uyvy,
  1036. int dst_stride_uyvy,
  1037. int width,
  1038. int height) {
  1039. int y;
  1040. void (*ARGBToUVRow)(const uint8_t* src_argb, int src_stride_argb,
  1041. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1042. ARGBToUVRow_C;
  1043. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  1044. ARGBToYRow_C;
  1045. void (*I422ToUYVYRow)(const uint8_t* src_y, const uint8_t* src_u,
  1046. const uint8_t* src_v, uint8_t* dst_uyvy, int width) =
  1047. I422ToUYVYRow_C;
  1048. if (!src_argb || !dst_uyvy || width <= 0 || height == 0) {
  1049. return -1;
  1050. }
  1051. // Negative height means invert the image.
  1052. if (height < 0) {
  1053. height = -height;
  1054. dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
  1055. dst_stride_uyvy = -dst_stride_uyvy;
  1056. }
  1057. // Coalesce rows.
  1058. if (src_stride_argb == width * 4 && dst_stride_uyvy == width * 2) {
  1059. width *= height;
  1060. height = 1;
  1061. src_stride_argb = dst_stride_uyvy = 0;
  1062. }
  1063. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  1064. if (TestCpuFlag(kCpuHasSSSE3)) {
  1065. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  1066. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  1067. if (IS_ALIGNED(width, 16)) {
  1068. ARGBToUVRow = ARGBToUVRow_SSSE3;
  1069. ARGBToYRow = ARGBToYRow_SSSE3;
  1070. }
  1071. }
  1072. #endif
  1073. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  1074. if (TestCpuFlag(kCpuHasAVX2)) {
  1075. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  1076. ARGBToYRow = ARGBToYRow_Any_AVX2;
  1077. if (IS_ALIGNED(width, 32)) {
  1078. ARGBToUVRow = ARGBToUVRow_AVX2;
  1079. ARGBToYRow = ARGBToYRow_AVX2;
  1080. }
  1081. }
  1082. #endif
  1083. #if defined(HAS_ARGBTOYROW_NEON)
  1084. if (TestCpuFlag(kCpuHasNEON)) {
  1085. ARGBToYRow = ARGBToYRow_Any_NEON;
  1086. if (IS_ALIGNED(width, 8)) {
  1087. ARGBToYRow = ARGBToYRow_NEON;
  1088. }
  1089. }
  1090. #endif
  1091. #if defined(HAS_ARGBTOUVROW_NEON)
  1092. if (TestCpuFlag(kCpuHasNEON)) {
  1093. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  1094. if (IS_ALIGNED(width, 16)) {
  1095. ARGBToUVRow = ARGBToUVRow_NEON;
  1096. }
  1097. }
  1098. #endif
  1099. #if defined(HAS_ARGBTOYROW_MSA)
  1100. if (TestCpuFlag(kCpuHasMSA)) {
  1101. ARGBToYRow = ARGBToYRow_Any_MSA;
  1102. if (IS_ALIGNED(width, 16)) {
  1103. ARGBToYRow = ARGBToYRow_MSA;
  1104. }
  1105. }
  1106. #endif
  1107. #if defined(HAS_ARGBTOUVROW_MSA)
  1108. if (TestCpuFlag(kCpuHasMSA)) {
  1109. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  1110. if (IS_ALIGNED(width, 32)) {
  1111. ARGBToUVRow = ARGBToUVRow_MSA;
  1112. }
  1113. }
  1114. #endif
  1115. #if defined(HAS_ARGBTOYROW_MMI)
  1116. if (TestCpuFlag(kCpuHasMMI)) {
  1117. ARGBToYRow = ARGBToYRow_Any_MMI;
  1118. if (IS_ALIGNED(width, 8)) {
  1119. ARGBToYRow = ARGBToYRow_MMI;
  1120. }
  1121. }
  1122. #endif
  1123. #if defined(HAS_ARGBTOUVROW_MMI)
  1124. if (TestCpuFlag(kCpuHasMMI)) {
  1125. ARGBToUVRow = ARGBToUVRow_Any_MMI;
  1126. if (IS_ALIGNED(width, 16)) {
  1127. ARGBToUVRow = ARGBToUVRow_MMI;
  1128. }
  1129. }
  1130. #endif
  1131. #if defined(HAS_I422TOUYVYROW_SSE2)
  1132. if (TestCpuFlag(kCpuHasSSE2)) {
  1133. I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
  1134. if (IS_ALIGNED(width, 16)) {
  1135. I422ToUYVYRow = I422ToUYVYRow_SSE2;
  1136. }
  1137. }
  1138. #endif
  1139. #if defined(HAS_I422TOUYVYROW_AVX2)
  1140. if (TestCpuFlag(kCpuHasAVX2)) {
  1141. I422ToUYVYRow = I422ToUYVYRow_Any_AVX2;
  1142. if (IS_ALIGNED(width, 32)) {
  1143. I422ToUYVYRow = I422ToUYVYRow_AVX2;
  1144. }
  1145. }
  1146. #endif
  1147. #if defined(HAS_I422TOUYVYROW_NEON)
  1148. if (TestCpuFlag(kCpuHasNEON)) {
  1149. I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
  1150. if (IS_ALIGNED(width, 16)) {
  1151. I422ToUYVYRow = I422ToUYVYRow_NEON;
  1152. }
  1153. }
  1154. #endif
  1155. #if defined(HAS_I422TOUYVYROW_MSA)
  1156. if (TestCpuFlag(kCpuHasMSA)) {
  1157. I422ToUYVYRow = I422ToUYVYRow_Any_MSA;
  1158. if (IS_ALIGNED(width, 32)) {
  1159. I422ToUYVYRow = I422ToUYVYRow_MSA;
  1160. }
  1161. }
  1162. #endif
  1163. #if defined(HAS_I422TOUYVYROW_MMI)
  1164. if (TestCpuFlag(kCpuHasMMI)) {
  1165. I422ToUYVYRow = I422ToUYVYRow_Any_MMI;
  1166. if (IS_ALIGNED(width, 8)) {
  1167. I422ToUYVYRow = I422ToUYVYRow_MMI;
  1168. }
  1169. }
  1170. #endif
  1171. {
  1172. // Allocate a rows of yuv.
  1173. align_buffer_64(row_y, ((width + 63) & ~63) * 2);
  1174. uint8_t* row_u = row_y + ((width + 63) & ~63);
  1175. uint8_t* row_v = row_u + ((width + 63) & ~63) / 2;
  1176. for (y = 0; y < height; ++y) {
  1177. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  1178. ARGBToYRow(src_argb, row_y, width);
  1179. I422ToUYVYRow(row_y, row_u, row_v, dst_uyvy, width);
  1180. src_argb += src_stride_argb;
  1181. dst_uyvy += dst_stride_uyvy;
  1182. }
  1183. free_aligned_buffer_64(row_y);
  1184. }
  1185. return 0;
  1186. }
  1187. // Convert ARGB to I400.
  1188. LIBYUV_API
  1189. int ARGBToI400(const uint8_t* src_argb,
  1190. int src_stride_argb,
  1191. uint8_t* dst_y,
  1192. int dst_stride_y,
  1193. int width,
  1194. int height) {
  1195. int y;
  1196. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  1197. ARGBToYRow_C;
  1198. if (!src_argb || !dst_y || width <= 0 || height == 0) {
  1199. return -1;
  1200. }
  1201. if (height < 0) {
  1202. height = -height;
  1203. src_argb = src_argb + (height - 1) * src_stride_argb;
  1204. src_stride_argb = -src_stride_argb;
  1205. }
  1206. // Coalesce rows.
  1207. if (src_stride_argb == width * 4 && dst_stride_y == width) {
  1208. width *= height;
  1209. height = 1;
  1210. src_stride_argb = dst_stride_y = 0;
  1211. }
  1212. #if defined(HAS_ARGBTOYROW_SSSE3)
  1213. if (TestCpuFlag(kCpuHasSSSE3)) {
  1214. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  1215. if (IS_ALIGNED(width, 16)) {
  1216. ARGBToYRow = ARGBToYRow_SSSE3;
  1217. }
  1218. }
  1219. #endif
  1220. #if defined(HAS_ARGBTOYROW_AVX2)
  1221. if (TestCpuFlag(kCpuHasAVX2)) {
  1222. ARGBToYRow = ARGBToYRow_Any_AVX2;
  1223. if (IS_ALIGNED(width, 32)) {
  1224. ARGBToYRow = ARGBToYRow_AVX2;
  1225. }
  1226. }
  1227. #endif
  1228. #if defined(HAS_ARGBTOYROW_NEON)
  1229. if (TestCpuFlag(kCpuHasNEON)) {
  1230. ARGBToYRow = ARGBToYRow_Any_NEON;
  1231. if (IS_ALIGNED(width, 8)) {
  1232. ARGBToYRow = ARGBToYRow_NEON;
  1233. }
  1234. }
  1235. #endif
  1236. #if defined(HAS_ARGBTOYROW_MSA)
  1237. if (TestCpuFlag(kCpuHasMSA)) {
  1238. ARGBToYRow = ARGBToYRow_Any_MSA;
  1239. if (IS_ALIGNED(width, 16)) {
  1240. ARGBToYRow = ARGBToYRow_MSA;
  1241. }
  1242. }
  1243. #endif
  1244. #if defined(HAS_ARGBTOYROW_MMI)
  1245. if (TestCpuFlag(kCpuHasMMI)) {
  1246. ARGBToYRow = ARGBToYRow_Any_MMI;
  1247. if (IS_ALIGNED(width, 8)) {
  1248. ARGBToYRow = ARGBToYRow_MMI;
  1249. }
  1250. }
  1251. #endif
  1252. for (y = 0; y < height; ++y) {
  1253. ARGBToYRow(src_argb, dst_y, width);
  1254. src_argb += src_stride_argb;
  1255. dst_y += dst_stride_y;
  1256. }
  1257. return 0;
  1258. }
  1259. // Shuffle table for converting ARGB to RGBA.
  1260. static const uvec8 kShuffleMaskARGBToRGBA = {
  1261. 3u, 0u, 1u, 2u, 7u, 4u, 5u, 6u, 11u, 8u, 9u, 10u, 15u, 12u, 13u, 14u};
  1262. // Convert ARGB to RGBA.
  1263. LIBYUV_API
  1264. int ARGBToRGBA(const uint8_t* src_argb,
  1265. int src_stride_argb,
  1266. uint8_t* dst_rgba,
  1267. int dst_stride_rgba,
  1268. int width,
  1269. int height) {
  1270. return ARGBShuffle(src_argb, src_stride_argb, dst_rgba, dst_stride_rgba,
  1271. (const uint8_t*)(&kShuffleMaskARGBToRGBA), width, height);
  1272. }
  1273. // Convert ARGB To RGB24.
  1274. LIBYUV_API
  1275. int ARGBToRGB24(const uint8_t* src_argb,
  1276. int src_stride_argb,
  1277. uint8_t* dst_rgb24,
  1278. int dst_stride_rgb24,
  1279. int width,
  1280. int height) {
  1281. int y;
  1282. void (*ARGBToRGB24Row)(const uint8_t* src_argb, uint8_t* dst_rgb, int width) =
  1283. ARGBToRGB24Row_C;
  1284. if (!src_argb || !dst_rgb24 || width <= 0 || height == 0) {
  1285. return -1;
  1286. }
  1287. if (height < 0) {
  1288. height = -height;
  1289. src_argb = src_argb + (height - 1) * src_stride_argb;
  1290. src_stride_argb = -src_stride_argb;
  1291. }
  1292. // Coalesce rows.
  1293. if (src_stride_argb == width * 4 && dst_stride_rgb24 == width * 3) {
  1294. width *= height;
  1295. height = 1;
  1296. src_stride_argb = dst_stride_rgb24 = 0;
  1297. }
  1298. #if defined(HAS_ARGBTORGB24ROW_SSSE3)
  1299. if (TestCpuFlag(kCpuHasSSSE3)) {
  1300. ARGBToRGB24Row = ARGBToRGB24Row_Any_SSSE3;
  1301. if (IS_ALIGNED(width, 16)) {
  1302. ARGBToRGB24Row = ARGBToRGB24Row_SSSE3;
  1303. }
  1304. }
  1305. #endif
  1306. #if defined(HAS_ARGBTORGB24ROW_AVX2)
  1307. if (TestCpuFlag(kCpuHasAVX2)) {
  1308. ARGBToRGB24Row = ARGBToRGB24Row_Any_AVX2;
  1309. if (IS_ALIGNED(width, 32)) {
  1310. ARGBToRGB24Row = ARGBToRGB24Row_AVX2;
  1311. }
  1312. }
  1313. #endif
  1314. #if defined(HAS_ARGBTORGB24ROW_AVX512VBMI)
  1315. if (TestCpuFlag(kCpuHasAVX512VBMI)) {
  1316. ARGBToRGB24Row = ARGBToRGB24Row_Any_AVX512VBMI;
  1317. if (IS_ALIGNED(width, 32)) {
  1318. ARGBToRGB24Row = ARGBToRGB24Row_AVX512VBMI;
  1319. }
  1320. }
  1321. #endif
  1322. #if defined(HAS_ARGBTORGB24ROW_NEON)
  1323. if (TestCpuFlag(kCpuHasNEON)) {
  1324. ARGBToRGB24Row = ARGBToRGB24Row_Any_NEON;
  1325. if (IS_ALIGNED(width, 8)) {
  1326. ARGBToRGB24Row = ARGBToRGB24Row_NEON;
  1327. }
  1328. }
  1329. #endif
  1330. #if defined(HAS_ARGBTORGB24ROW_MSA)
  1331. if (TestCpuFlag(kCpuHasMSA)) {
  1332. ARGBToRGB24Row = ARGBToRGB24Row_Any_MSA;
  1333. if (IS_ALIGNED(width, 16)) {
  1334. ARGBToRGB24Row = ARGBToRGB24Row_MSA;
  1335. }
  1336. }
  1337. #endif
  1338. #if defined(HAS_ARGBTORGB24ROW_MMI)
  1339. if (TestCpuFlag(kCpuHasMMI)) {
  1340. ARGBToRGB24Row = ARGBToRGB24Row_Any_MMI;
  1341. if (IS_ALIGNED(width, 4)) {
  1342. ARGBToRGB24Row = ARGBToRGB24Row_MMI;
  1343. }
  1344. }
  1345. #endif
  1346. for (y = 0; y < height; ++y) {
  1347. ARGBToRGB24Row(src_argb, dst_rgb24, width);
  1348. src_argb += src_stride_argb;
  1349. dst_rgb24 += dst_stride_rgb24;
  1350. }
  1351. return 0;
  1352. }
  1353. // Convert ARGB To RAW.
  1354. LIBYUV_API
  1355. int ARGBToRAW(const uint8_t* src_argb,
  1356. int src_stride_argb,
  1357. uint8_t* dst_raw,
  1358. int dst_stride_raw,
  1359. int width,
  1360. int height) {
  1361. int y;
  1362. void (*ARGBToRAWRow)(const uint8_t* src_argb, uint8_t* dst_rgb, int width) =
  1363. ARGBToRAWRow_C;
  1364. if (!src_argb || !dst_raw || width <= 0 || height == 0) {
  1365. return -1;
  1366. }
  1367. if (height < 0) {
  1368. height = -height;
  1369. src_argb = src_argb + (height - 1) * src_stride_argb;
  1370. src_stride_argb = -src_stride_argb;
  1371. }
  1372. // Coalesce rows.
  1373. if (src_stride_argb == width * 4 && dst_stride_raw == width * 3) {
  1374. width *= height;
  1375. height = 1;
  1376. src_stride_argb = dst_stride_raw = 0;
  1377. }
  1378. #if defined(HAS_ARGBTORAWROW_SSSE3)
  1379. if (TestCpuFlag(kCpuHasSSSE3)) {
  1380. ARGBToRAWRow = ARGBToRAWRow_Any_SSSE3;
  1381. if (IS_ALIGNED(width, 16)) {
  1382. ARGBToRAWRow = ARGBToRAWRow_SSSE3;
  1383. }
  1384. }
  1385. #endif
  1386. #if defined(HAS_ARGBTORAWROW_AVX2)
  1387. if (TestCpuFlag(kCpuHasAVX2)) {
  1388. ARGBToRAWRow = ARGBToRAWRow_Any_AVX2;
  1389. if (IS_ALIGNED(width, 32)) {
  1390. ARGBToRAWRow = ARGBToRAWRow_AVX2;
  1391. }
  1392. }
  1393. #endif
  1394. #if defined(HAS_ARGBTORAWROW_NEON)
  1395. if (TestCpuFlag(kCpuHasNEON)) {
  1396. ARGBToRAWRow = ARGBToRAWRow_Any_NEON;
  1397. if (IS_ALIGNED(width, 8)) {
  1398. ARGBToRAWRow = ARGBToRAWRow_NEON;
  1399. }
  1400. }
  1401. #endif
  1402. #if defined(HAS_ARGBTORAWROW_MSA)
  1403. if (TestCpuFlag(kCpuHasMSA)) {
  1404. ARGBToRAWRow = ARGBToRAWRow_Any_MSA;
  1405. if (IS_ALIGNED(width, 16)) {
  1406. ARGBToRAWRow = ARGBToRAWRow_MSA;
  1407. }
  1408. }
  1409. #endif
  1410. #if defined(HAS_ARGBTORAWROW_MMI)
  1411. if (TestCpuFlag(kCpuHasMMI)) {
  1412. ARGBToRAWRow = ARGBToRAWRow_Any_MMI;
  1413. if (IS_ALIGNED(width, 4)) {
  1414. ARGBToRAWRow = ARGBToRAWRow_MMI;
  1415. }
  1416. }
  1417. #endif
  1418. for (y = 0; y < height; ++y) {
  1419. ARGBToRAWRow(src_argb, dst_raw, width);
  1420. src_argb += src_stride_argb;
  1421. dst_raw += dst_stride_raw;
  1422. }
  1423. return 0;
  1424. }
  1425. // Ordered 8x8 dither for 888 to 565. Values from 0 to 7.
  1426. static const uint8_t kDither565_4x4[16] = {
  1427. 0, 4, 1, 5, 6, 2, 7, 3, 1, 5, 0, 4, 7, 3, 6, 2,
  1428. };
  1429. // Convert ARGB To RGB565 with 4x4 dither matrix (16 bytes).
  1430. LIBYUV_API
  1431. int ARGBToRGB565Dither(const uint8_t* src_argb,
  1432. int src_stride_argb,
  1433. uint8_t* dst_rgb565,
  1434. int dst_stride_rgb565,
  1435. const uint8_t* dither4x4,
  1436. int width,
  1437. int height) {
  1438. int y;
  1439. void (*ARGBToRGB565DitherRow)(const uint8_t* src_argb, uint8_t* dst_rgb,
  1440. const uint32_t dither4, int width) =
  1441. ARGBToRGB565DitherRow_C;
  1442. if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
  1443. return -1;
  1444. }
  1445. if (height < 0) {
  1446. height = -height;
  1447. src_argb = src_argb + (height - 1) * src_stride_argb;
  1448. src_stride_argb = -src_stride_argb;
  1449. }
  1450. if (!dither4x4) {
  1451. dither4x4 = kDither565_4x4;
  1452. }
  1453. #if defined(HAS_ARGBTORGB565DITHERROW_SSE2)
  1454. if (TestCpuFlag(kCpuHasSSE2)) {
  1455. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_SSE2;
  1456. if (IS_ALIGNED(width, 4)) {
  1457. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_SSE2;
  1458. }
  1459. }
  1460. #endif
  1461. #if defined(HAS_ARGBTORGB565DITHERROW_AVX2)
  1462. if (TestCpuFlag(kCpuHasAVX2)) {
  1463. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_AVX2;
  1464. if (IS_ALIGNED(width, 8)) {
  1465. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_AVX2;
  1466. }
  1467. }
  1468. #endif
  1469. #if defined(HAS_ARGBTORGB565DITHERROW_NEON)
  1470. if (TestCpuFlag(kCpuHasNEON)) {
  1471. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_NEON;
  1472. if (IS_ALIGNED(width, 8)) {
  1473. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_NEON;
  1474. }
  1475. }
  1476. #endif
  1477. #if defined(HAS_ARGBTORGB565DITHERROW_MSA)
  1478. if (TestCpuFlag(kCpuHasMSA)) {
  1479. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_MSA;
  1480. if (IS_ALIGNED(width, 8)) {
  1481. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_MSA;
  1482. }
  1483. }
  1484. #endif
  1485. #if defined(HAS_ARGBTORGB565DITHERROW_MMI)
  1486. if (TestCpuFlag(kCpuHasMMI)) {
  1487. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_MMI;
  1488. if (IS_ALIGNED(width, 4)) {
  1489. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_MMI;
  1490. }
  1491. }
  1492. #endif
  1493. for (y = 0; y < height; ++y) {
  1494. ARGBToRGB565DitherRow(src_argb, dst_rgb565,
  1495. *(const uint32_t*)(dither4x4 + ((y & 3) << 2)),
  1496. width);
  1497. src_argb += src_stride_argb;
  1498. dst_rgb565 += dst_stride_rgb565;
  1499. }
  1500. return 0;
  1501. }
  1502. // Convert ARGB To RGB565.
  1503. // TODO(fbarchard): Consider using dither function low level with zeros.
  1504. LIBYUV_API
  1505. int ARGBToRGB565(const uint8_t* src_argb,
  1506. int src_stride_argb,
  1507. uint8_t* dst_rgb565,
  1508. int dst_stride_rgb565,
  1509. int width,
  1510. int height) {
  1511. int y;
  1512. void (*ARGBToRGB565Row)(const uint8_t* src_argb, uint8_t* dst_rgb,
  1513. int width) = ARGBToRGB565Row_C;
  1514. if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
  1515. return -1;
  1516. }
  1517. if (height < 0) {
  1518. height = -height;
  1519. src_argb = src_argb + (height - 1) * src_stride_argb;
  1520. src_stride_argb = -src_stride_argb;
  1521. }
  1522. // Coalesce rows.
  1523. if (src_stride_argb == width * 4 && dst_stride_rgb565 == width * 2) {
  1524. width *= height;
  1525. height = 1;
  1526. src_stride_argb = dst_stride_rgb565 = 0;
  1527. }
  1528. #if defined(HAS_ARGBTORGB565ROW_SSE2)
  1529. if (TestCpuFlag(kCpuHasSSE2)) {
  1530. ARGBToRGB565Row = ARGBToRGB565Row_Any_SSE2;
  1531. if (IS_ALIGNED(width, 4)) {
  1532. ARGBToRGB565Row = ARGBToRGB565Row_SSE2;
  1533. }
  1534. }
  1535. #endif
  1536. #if defined(HAS_ARGBTORGB565ROW_AVX2)
  1537. if (TestCpuFlag(kCpuHasAVX2)) {
  1538. ARGBToRGB565Row = ARGBToRGB565Row_Any_AVX2;
  1539. if (IS_ALIGNED(width, 8)) {
  1540. ARGBToRGB565Row = ARGBToRGB565Row_AVX2;
  1541. }
  1542. }
  1543. #endif
  1544. #if defined(HAS_ARGBTORGB565ROW_NEON)
  1545. if (TestCpuFlag(kCpuHasNEON)) {
  1546. ARGBToRGB565Row = ARGBToRGB565Row_Any_NEON;
  1547. if (IS_ALIGNED(width, 8)) {
  1548. ARGBToRGB565Row = ARGBToRGB565Row_NEON;
  1549. }
  1550. }
  1551. #endif
  1552. #if defined(HAS_ARGBTORGB565ROW_MSA)
  1553. if (TestCpuFlag(kCpuHasMSA)) {
  1554. ARGBToRGB565Row = ARGBToRGB565Row_Any_MSA;
  1555. if (IS_ALIGNED(width, 8)) {
  1556. ARGBToRGB565Row = ARGBToRGB565Row_MSA;
  1557. }
  1558. }
  1559. #endif
  1560. #if defined(HAS_ARGBTORGB565ROW_MMI)
  1561. if (TestCpuFlag(kCpuHasMMI)) {
  1562. ARGBToRGB565Row = ARGBToRGB565Row_Any_MMI;
  1563. if (IS_ALIGNED(width, 4)) {
  1564. ARGBToRGB565Row = ARGBToRGB565Row_MMI;
  1565. }
  1566. }
  1567. #endif
  1568. for (y = 0; y < height; ++y) {
  1569. ARGBToRGB565Row(src_argb, dst_rgb565, width);
  1570. src_argb += src_stride_argb;
  1571. dst_rgb565 += dst_stride_rgb565;
  1572. }
  1573. return 0;
  1574. }
  1575. // Convert ARGB To ARGB1555.
  1576. LIBYUV_API
  1577. int ARGBToARGB1555(const uint8_t* src_argb,
  1578. int src_stride_argb,
  1579. uint8_t* dst_argb1555,
  1580. int dst_stride_argb1555,
  1581. int width,
  1582. int height) {
  1583. int y;
  1584. void (*ARGBToARGB1555Row)(const uint8_t* src_argb, uint8_t* dst_rgb,
  1585. int width) = ARGBToARGB1555Row_C;
  1586. if (!src_argb || !dst_argb1555 || width <= 0 || height == 0) {
  1587. return -1;
  1588. }
  1589. if (height < 0) {
  1590. height = -height;
  1591. src_argb = src_argb + (height - 1) * src_stride_argb;
  1592. src_stride_argb = -src_stride_argb;
  1593. }
  1594. // Coalesce rows.
  1595. if (src_stride_argb == width * 4 && dst_stride_argb1555 == width * 2) {
  1596. width *= height;
  1597. height = 1;
  1598. src_stride_argb = dst_stride_argb1555 = 0;
  1599. }
  1600. #if defined(HAS_ARGBTOARGB1555ROW_SSE2)
  1601. if (TestCpuFlag(kCpuHasSSE2)) {
  1602. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_SSE2;
  1603. if (IS_ALIGNED(width, 4)) {
  1604. ARGBToARGB1555Row = ARGBToARGB1555Row_SSE2;
  1605. }
  1606. }
  1607. #endif
  1608. #if defined(HAS_ARGBTOARGB1555ROW_AVX2)
  1609. if (TestCpuFlag(kCpuHasAVX2)) {
  1610. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_AVX2;
  1611. if (IS_ALIGNED(width, 8)) {
  1612. ARGBToARGB1555Row = ARGBToARGB1555Row_AVX2;
  1613. }
  1614. }
  1615. #endif
  1616. #if defined(HAS_ARGBTOARGB1555ROW_NEON)
  1617. if (TestCpuFlag(kCpuHasNEON)) {
  1618. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_NEON;
  1619. if (IS_ALIGNED(width, 8)) {
  1620. ARGBToARGB1555Row = ARGBToARGB1555Row_NEON;
  1621. }
  1622. }
  1623. #endif
  1624. #if defined(HAS_ARGBTOARGB1555ROW_MSA)
  1625. if (TestCpuFlag(kCpuHasMSA)) {
  1626. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_MSA;
  1627. if (IS_ALIGNED(width, 8)) {
  1628. ARGBToARGB1555Row = ARGBToARGB1555Row_MSA;
  1629. }
  1630. }
  1631. #endif
  1632. #if defined(HAS_ARGBTOARGB1555ROW_MMI)
  1633. if (TestCpuFlag(kCpuHasMMI)) {
  1634. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_MMI;
  1635. if (IS_ALIGNED(width, 4)) {
  1636. ARGBToARGB1555Row = ARGBToARGB1555Row_MMI;
  1637. }
  1638. }
  1639. #endif
  1640. for (y = 0; y < height; ++y) {
  1641. ARGBToARGB1555Row(src_argb, dst_argb1555, width);
  1642. src_argb += src_stride_argb;
  1643. dst_argb1555 += dst_stride_argb1555;
  1644. }
  1645. return 0;
  1646. }
  1647. // Convert ARGB To ARGB4444.
  1648. LIBYUV_API
  1649. int ARGBToARGB4444(const uint8_t* src_argb,
  1650. int src_stride_argb,
  1651. uint8_t* dst_argb4444,
  1652. int dst_stride_argb4444,
  1653. int width,
  1654. int height) {
  1655. int y;
  1656. void (*ARGBToARGB4444Row)(const uint8_t* src_argb, uint8_t* dst_rgb,
  1657. int width) = ARGBToARGB4444Row_C;
  1658. if (!src_argb || !dst_argb4444 || width <= 0 || height == 0) {
  1659. return -1;
  1660. }
  1661. if (height < 0) {
  1662. height = -height;
  1663. src_argb = src_argb + (height - 1) * src_stride_argb;
  1664. src_stride_argb = -src_stride_argb;
  1665. }
  1666. // Coalesce rows.
  1667. if (src_stride_argb == width * 4 && dst_stride_argb4444 == width * 2) {
  1668. width *= height;
  1669. height = 1;
  1670. src_stride_argb = dst_stride_argb4444 = 0;
  1671. }
  1672. #if defined(HAS_ARGBTOARGB4444ROW_SSE2)
  1673. if (TestCpuFlag(kCpuHasSSE2)) {
  1674. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_SSE2;
  1675. if (IS_ALIGNED(width, 4)) {
  1676. ARGBToARGB4444Row = ARGBToARGB4444Row_SSE2;
  1677. }
  1678. }
  1679. #endif
  1680. #if defined(HAS_ARGBTOARGB4444ROW_AVX2)
  1681. if (TestCpuFlag(kCpuHasAVX2)) {
  1682. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_AVX2;
  1683. if (IS_ALIGNED(width, 8)) {
  1684. ARGBToARGB4444Row = ARGBToARGB4444Row_AVX2;
  1685. }
  1686. }
  1687. #endif
  1688. #if defined(HAS_ARGBTOARGB4444ROW_NEON)
  1689. if (TestCpuFlag(kCpuHasNEON)) {
  1690. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_NEON;
  1691. if (IS_ALIGNED(width, 8)) {
  1692. ARGBToARGB4444Row = ARGBToARGB4444Row_NEON;
  1693. }
  1694. }
  1695. #endif
  1696. #if defined(HAS_ARGBTOARGB4444ROW_MSA)
  1697. if (TestCpuFlag(kCpuHasMSA)) {
  1698. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_MSA;
  1699. if (IS_ALIGNED(width, 8)) {
  1700. ARGBToARGB4444Row = ARGBToARGB4444Row_MSA;
  1701. }
  1702. }
  1703. #endif
  1704. #if defined(HAS_ARGBTOARGB4444ROW_MMI)
  1705. if (TestCpuFlag(kCpuHasMMI)) {
  1706. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_MMI;
  1707. if (IS_ALIGNED(width, 4)) {
  1708. ARGBToARGB4444Row = ARGBToARGB4444Row_MMI;
  1709. }
  1710. }
  1711. #endif
  1712. for (y = 0; y < height; ++y) {
  1713. ARGBToARGB4444Row(src_argb, dst_argb4444, width);
  1714. src_argb += src_stride_argb;
  1715. dst_argb4444 += dst_stride_argb4444;
  1716. }
  1717. return 0;
  1718. }
  1719. // Convert ABGR To AR30.
  1720. LIBYUV_API
  1721. int ABGRToAR30(const uint8_t* src_abgr,
  1722. int src_stride_abgr,
  1723. uint8_t* dst_ar30,
  1724. int dst_stride_ar30,
  1725. int width,
  1726. int height) {
  1727. int y;
  1728. void (*ABGRToAR30Row)(const uint8_t* src_abgr, uint8_t* dst_rgb, int width) =
  1729. ABGRToAR30Row_C;
  1730. if (!src_abgr || !dst_ar30 || width <= 0 || height == 0) {
  1731. return -1;
  1732. }
  1733. if (height < 0) {
  1734. height = -height;
  1735. src_abgr = src_abgr + (height - 1) * src_stride_abgr;
  1736. src_stride_abgr = -src_stride_abgr;
  1737. }
  1738. // Coalesce rows.
  1739. if (src_stride_abgr == width * 4 && dst_stride_ar30 == width * 4) {
  1740. width *= height;
  1741. height = 1;
  1742. src_stride_abgr = dst_stride_ar30 = 0;
  1743. }
  1744. #if defined(HAS_ABGRTOAR30ROW_SSSE3)
  1745. if (TestCpuFlag(kCpuHasSSSE3)) {
  1746. ABGRToAR30Row = ABGRToAR30Row_Any_SSSE3;
  1747. if (IS_ALIGNED(width, 4)) {
  1748. ABGRToAR30Row = ABGRToAR30Row_SSSE3;
  1749. }
  1750. }
  1751. #endif
  1752. #if defined(HAS_ABGRTOAR30ROW_AVX2)
  1753. if (TestCpuFlag(kCpuHasAVX2)) {
  1754. ABGRToAR30Row = ABGRToAR30Row_Any_AVX2;
  1755. if (IS_ALIGNED(width, 8)) {
  1756. ABGRToAR30Row = ABGRToAR30Row_AVX2;
  1757. }
  1758. }
  1759. #endif
  1760. for (y = 0; y < height; ++y) {
  1761. ABGRToAR30Row(src_abgr, dst_ar30, width);
  1762. src_abgr += src_stride_abgr;
  1763. dst_ar30 += dst_stride_ar30;
  1764. }
  1765. return 0;
  1766. }
  1767. // Convert ARGB To AR30.
  1768. LIBYUV_API
  1769. int ARGBToAR30(const uint8_t* src_argb,
  1770. int src_stride_argb,
  1771. uint8_t* dst_ar30,
  1772. int dst_stride_ar30,
  1773. int width,
  1774. int height) {
  1775. int y;
  1776. void (*ARGBToAR30Row)(const uint8_t* src_argb, uint8_t* dst_rgb, int width) =
  1777. ARGBToAR30Row_C;
  1778. if (!src_argb || !dst_ar30 || width <= 0 || height == 0) {
  1779. return -1;
  1780. }
  1781. if (height < 0) {
  1782. height = -height;
  1783. src_argb = src_argb + (height - 1) * src_stride_argb;
  1784. src_stride_argb = -src_stride_argb;
  1785. }
  1786. // Coalesce rows.
  1787. if (src_stride_argb == width * 4 && dst_stride_ar30 == width * 4) {
  1788. width *= height;
  1789. height = 1;
  1790. src_stride_argb = dst_stride_ar30 = 0;
  1791. }
  1792. #if defined(HAS_ARGBTOAR30ROW_SSSE3)
  1793. if (TestCpuFlag(kCpuHasSSSE3)) {
  1794. ARGBToAR30Row = ARGBToAR30Row_Any_SSSE3;
  1795. if (IS_ALIGNED(width, 4)) {
  1796. ARGBToAR30Row = ARGBToAR30Row_SSSE3;
  1797. }
  1798. }
  1799. #endif
  1800. #if defined(HAS_ARGBTOAR30ROW_AVX2)
  1801. if (TestCpuFlag(kCpuHasAVX2)) {
  1802. ARGBToAR30Row = ARGBToAR30Row_Any_AVX2;
  1803. if (IS_ALIGNED(width, 8)) {
  1804. ARGBToAR30Row = ARGBToAR30Row_AVX2;
  1805. }
  1806. }
  1807. #endif
  1808. for (y = 0; y < height; ++y) {
  1809. ARGBToAR30Row(src_argb, dst_ar30, width);
  1810. src_argb += src_stride_argb;
  1811. dst_ar30 += dst_stride_ar30;
  1812. }
  1813. return 0;
  1814. }
  1815. // Convert ARGB to J420. (JPeg full range I420).
  1816. LIBYUV_API
  1817. int ARGBToJ420(const uint8_t* src_argb,
  1818. int src_stride_argb,
  1819. uint8_t* dst_yj,
  1820. int dst_stride_yj,
  1821. uint8_t* dst_u,
  1822. int dst_stride_u,
  1823. uint8_t* dst_v,
  1824. int dst_stride_v,
  1825. int width,
  1826. int height) {
  1827. int y;
  1828. void (*ARGBToUVJRow)(const uint8_t* src_argb0, int src_stride_argb,
  1829. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1830. ARGBToUVJRow_C;
  1831. void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
  1832. ARGBToYJRow_C;
  1833. if (!src_argb || !dst_yj || !dst_u || !dst_v || width <= 0 || height == 0) {
  1834. return -1;
  1835. }
  1836. // Negative height means invert the image.
  1837. if (height < 0) {
  1838. height = -height;
  1839. src_argb = src_argb + (height - 1) * src_stride_argb;
  1840. src_stride_argb = -src_stride_argb;
  1841. }
  1842. #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
  1843. if (TestCpuFlag(kCpuHasSSSE3)) {
  1844. ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
  1845. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1846. if (IS_ALIGNED(width, 16)) {
  1847. ARGBToUVJRow = ARGBToUVJRow_SSSE3;
  1848. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1849. }
  1850. }
  1851. #endif
  1852. #if defined(HAS_ARGBTOYJROW_AVX2)
  1853. if (TestCpuFlag(kCpuHasAVX2)) {
  1854. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1855. if (IS_ALIGNED(width, 32)) {
  1856. ARGBToYJRow = ARGBToYJRow_AVX2;
  1857. }
  1858. }
  1859. #endif
  1860. #if defined(HAS_ARGBTOYJROW_NEON)
  1861. if (TestCpuFlag(kCpuHasNEON)) {
  1862. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1863. if (IS_ALIGNED(width, 8)) {
  1864. ARGBToYJRow = ARGBToYJRow_NEON;
  1865. }
  1866. }
  1867. #endif
  1868. #if defined(HAS_ARGBTOUVJROW_NEON)
  1869. if (TestCpuFlag(kCpuHasNEON)) {
  1870. ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
  1871. if (IS_ALIGNED(width, 16)) {
  1872. ARGBToUVJRow = ARGBToUVJRow_NEON;
  1873. }
  1874. }
  1875. #endif
  1876. #if defined(HAS_ARGBTOYJROW_MSA)
  1877. if (TestCpuFlag(kCpuHasMSA)) {
  1878. ARGBToYJRow = ARGBToYJRow_Any_MSA;
  1879. if (IS_ALIGNED(width, 16)) {
  1880. ARGBToYJRow = ARGBToYJRow_MSA;
  1881. }
  1882. }
  1883. #endif
  1884. #if defined(HAS_ARGBTOYJROW_MMI)
  1885. if (TestCpuFlag(kCpuHasMMI)) {
  1886. ARGBToYJRow = ARGBToYJRow_Any_MMI;
  1887. if (IS_ALIGNED(width, 8)) {
  1888. ARGBToYJRow = ARGBToYJRow_MMI;
  1889. }
  1890. }
  1891. #endif
  1892. #if defined(HAS_ARGBTOUVJROW_MSA)
  1893. if (TestCpuFlag(kCpuHasMSA)) {
  1894. ARGBToUVJRow = ARGBToUVJRow_Any_MSA;
  1895. if (IS_ALIGNED(width, 32)) {
  1896. ARGBToUVJRow = ARGBToUVJRow_MSA;
  1897. }
  1898. }
  1899. #endif
  1900. #if defined(HAS_ARGBTOUVJROW_MMI)
  1901. if (TestCpuFlag(kCpuHasMMI)) {
  1902. ARGBToUVJRow = ARGBToUVJRow_Any_MMI;
  1903. if (IS_ALIGNED(width, 16)) {
  1904. ARGBToUVJRow = ARGBToUVJRow_MMI;
  1905. }
  1906. }
  1907. #endif
  1908. for (y = 0; y < height - 1; y += 2) {
  1909. ARGBToUVJRow(src_argb, src_stride_argb, dst_u, dst_v, width);
  1910. ARGBToYJRow(src_argb, dst_yj, width);
  1911. ARGBToYJRow(src_argb + src_stride_argb, dst_yj + dst_stride_yj, width);
  1912. src_argb += src_stride_argb * 2;
  1913. dst_yj += dst_stride_yj * 2;
  1914. dst_u += dst_stride_u;
  1915. dst_v += dst_stride_v;
  1916. }
  1917. if (height & 1) {
  1918. ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
  1919. ARGBToYJRow(src_argb, dst_yj, width);
  1920. }
  1921. return 0;
  1922. }
  1923. // Convert ARGB to J422. (JPeg full range I422).
  1924. LIBYUV_API
  1925. int ARGBToJ422(const uint8_t* src_argb,
  1926. int src_stride_argb,
  1927. uint8_t* dst_yj,
  1928. int dst_stride_yj,
  1929. uint8_t* dst_u,
  1930. int dst_stride_u,
  1931. uint8_t* dst_v,
  1932. int dst_stride_v,
  1933. int width,
  1934. int height) {
  1935. int y;
  1936. void (*ARGBToUVJRow)(const uint8_t* src_argb0, int src_stride_argb,
  1937. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1938. ARGBToUVJRow_C;
  1939. void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
  1940. ARGBToYJRow_C;
  1941. if (!src_argb || !dst_yj || !dst_u || !dst_v || width <= 0 || height == 0) {
  1942. return -1;
  1943. }
  1944. // Negative height means invert the image.
  1945. if (height < 0) {
  1946. height = -height;
  1947. src_argb = src_argb + (height - 1) * src_stride_argb;
  1948. src_stride_argb = -src_stride_argb;
  1949. }
  1950. // Coalesce rows.
  1951. if (src_stride_argb == width * 4 && dst_stride_yj == width &&
  1952. dst_stride_u * 2 == width && dst_stride_v * 2 == width) {
  1953. width *= height;
  1954. height = 1;
  1955. src_stride_argb = dst_stride_yj = dst_stride_u = dst_stride_v = 0;
  1956. }
  1957. #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
  1958. if (TestCpuFlag(kCpuHasSSSE3)) {
  1959. ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
  1960. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1961. if (IS_ALIGNED(width, 16)) {
  1962. ARGBToUVJRow = ARGBToUVJRow_SSSE3;
  1963. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1964. }
  1965. }
  1966. #endif
  1967. #if defined(HAS_ARGBTOYJROW_AVX2)
  1968. if (TestCpuFlag(kCpuHasAVX2)) {
  1969. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1970. if (IS_ALIGNED(width, 32)) {
  1971. ARGBToYJRow = ARGBToYJRow_AVX2;
  1972. }
  1973. }
  1974. #endif
  1975. #if defined(HAS_ARGBTOYJROW_NEON)
  1976. if (TestCpuFlag(kCpuHasNEON)) {
  1977. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1978. if (IS_ALIGNED(width, 8)) {
  1979. ARGBToYJRow = ARGBToYJRow_NEON;
  1980. }
  1981. }
  1982. #endif
  1983. #if defined(HAS_ARGBTOUVJROW_NEON)
  1984. if (TestCpuFlag(kCpuHasNEON)) {
  1985. ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
  1986. if (IS_ALIGNED(width, 16)) {
  1987. ARGBToUVJRow = ARGBToUVJRow_NEON;
  1988. }
  1989. }
  1990. #endif
  1991. #if defined(HAS_ARGBTOYJROW_MSA)
  1992. if (TestCpuFlag(kCpuHasMSA)) {
  1993. ARGBToYJRow = ARGBToYJRow_Any_MSA;
  1994. if (IS_ALIGNED(width, 16)) {
  1995. ARGBToYJRow = ARGBToYJRow_MSA;
  1996. }
  1997. }
  1998. #endif
  1999. #if defined(HAS_ARGBTOYJROW_MMI)
  2000. if (TestCpuFlag(kCpuHasMMI)) {
  2001. ARGBToYJRow = ARGBToYJRow_Any_MMI;
  2002. if (IS_ALIGNED(width, 8)) {
  2003. ARGBToYJRow = ARGBToYJRow_MMI;
  2004. }
  2005. }
  2006. #endif
  2007. #if defined(HAS_ARGBTOUVJROW_MSA)
  2008. if (TestCpuFlag(kCpuHasMSA)) {
  2009. ARGBToUVJRow = ARGBToUVJRow_Any_MSA;
  2010. if (IS_ALIGNED(width, 32)) {
  2011. ARGBToUVJRow = ARGBToUVJRow_MSA;
  2012. }
  2013. }
  2014. #endif
  2015. #if defined(HAS_ARGBTOUVJROW_MMI)
  2016. if (TestCpuFlag(kCpuHasMMI)) {
  2017. ARGBToUVJRow = ARGBToUVJRow_Any_MMI;
  2018. if (IS_ALIGNED(width, 16)) {
  2019. ARGBToUVJRow = ARGBToUVJRow_MMI;
  2020. }
  2021. }
  2022. #endif
  2023. for (y = 0; y < height; ++y) {
  2024. ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
  2025. ARGBToYJRow(src_argb, dst_yj, width);
  2026. src_argb += src_stride_argb;
  2027. dst_yj += dst_stride_yj;
  2028. dst_u += dst_stride_u;
  2029. dst_v += dst_stride_v;
  2030. }
  2031. return 0;
  2032. }
  2033. // Convert ARGB to J400.
  2034. LIBYUV_API
  2035. int ARGBToJ400(const uint8_t* src_argb,
  2036. int src_stride_argb,
  2037. uint8_t* dst_yj,
  2038. int dst_stride_yj,
  2039. int width,
  2040. int height) {
  2041. int y;
  2042. void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
  2043. ARGBToYJRow_C;
  2044. if (!src_argb || !dst_yj || width <= 0 || height == 0) {
  2045. return -1;
  2046. }
  2047. if (height < 0) {
  2048. height = -height;
  2049. src_argb = src_argb + (height - 1) * src_stride_argb;
  2050. src_stride_argb = -src_stride_argb;
  2051. }
  2052. // Coalesce rows.
  2053. if (src_stride_argb == width * 4 && dst_stride_yj == width) {
  2054. width *= height;
  2055. height = 1;
  2056. src_stride_argb = dst_stride_yj = 0;
  2057. }
  2058. #if defined(HAS_ARGBTOYJROW_SSSE3)
  2059. if (TestCpuFlag(kCpuHasSSSE3)) {
  2060. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  2061. if (IS_ALIGNED(width, 16)) {
  2062. ARGBToYJRow = ARGBToYJRow_SSSE3;
  2063. }
  2064. }
  2065. #endif
  2066. #if defined(HAS_ARGBTOYJROW_AVX2)
  2067. if (TestCpuFlag(kCpuHasAVX2)) {
  2068. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  2069. if (IS_ALIGNED(width, 32)) {
  2070. ARGBToYJRow = ARGBToYJRow_AVX2;
  2071. }
  2072. }
  2073. #endif
  2074. #if defined(HAS_ARGBTOYJROW_NEON)
  2075. if (TestCpuFlag(kCpuHasNEON)) {
  2076. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  2077. if (IS_ALIGNED(width, 8)) {
  2078. ARGBToYJRow = ARGBToYJRow_NEON;
  2079. }
  2080. }
  2081. #endif
  2082. #if defined(HAS_ARGBTOYJROW_MSA)
  2083. if (TestCpuFlag(kCpuHasMSA)) {
  2084. ARGBToYJRow = ARGBToYJRow_Any_MSA;
  2085. if (IS_ALIGNED(width, 16)) {
  2086. ARGBToYJRow = ARGBToYJRow_MSA;
  2087. }
  2088. }
  2089. #endif
  2090. #if defined(HAS_ARGBTOYJROW_MMI)
  2091. if (TestCpuFlag(kCpuHasMMI)) {
  2092. ARGBToYJRow = ARGBToYJRow_Any_MMI;
  2093. if (IS_ALIGNED(width, 8)) {
  2094. ARGBToYJRow = ARGBToYJRow_MMI;
  2095. }
  2096. }
  2097. #endif
  2098. for (y = 0; y < height; ++y) {
  2099. ARGBToYJRow(src_argb, dst_yj, width);
  2100. src_argb += src_stride_argb;
  2101. dst_yj += dst_stride_yj;
  2102. }
  2103. return 0;
  2104. }
  2105. #ifdef __cplusplus
  2106. } // extern "C"
  2107. } // namespace libyuv
  2108. #endif