convert_from_argb.cc 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286
  1. /*
  2. * Copyright 2012 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/convert_from_argb.h"
  11. #include "libyuv/basic_types.h"
  12. #include "libyuv/cpu_id.h"
  13. #include "libyuv/planar_functions.h"
  14. #include "libyuv/row.h"
  15. #ifdef __cplusplus
  16. namespace libyuv {
  17. extern "C" {
  18. #endif
  19. // ARGB little endian (bgra in memory) to I444
  20. LIBYUV_API
  21. int ARGBToI444(const uint8* src_argb, int src_stride_argb,
  22. uint8* dst_y, int dst_stride_y,
  23. uint8* dst_u, int dst_stride_u,
  24. uint8* dst_v, int dst_stride_v,
  25. int width, int height) {
  26. int y;
  27. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  28. ARGBToYRow_C;
  29. void (*ARGBToUV444Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
  30. int width) = ARGBToUV444Row_C;
  31. if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  32. return -1;
  33. }
  34. if (height < 0) {
  35. height = -height;
  36. src_argb = src_argb + (height - 1) * src_stride_argb;
  37. src_stride_argb = -src_stride_argb;
  38. }
  39. // Coalesce rows.
  40. if (src_stride_argb == width * 4 &&
  41. dst_stride_y == width &&
  42. dst_stride_u == width &&
  43. dst_stride_v == width) {
  44. width *= height;
  45. height = 1;
  46. src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
  47. }
  48. #if defined(HAS_ARGBTOUV444ROW_SSSE3)
  49. if (TestCpuFlag(kCpuHasSSSE3)) {
  50. ARGBToUV444Row = ARGBToUV444Row_Any_SSSE3;
  51. if (IS_ALIGNED(width, 16)) {
  52. ARGBToUV444Row = ARGBToUV444Row_SSSE3;
  53. }
  54. }
  55. #endif
  56. #if defined(HAS_ARGBTOUV444ROW_NEON)
  57. if (TestCpuFlag(kCpuHasNEON)) {
  58. ARGBToUV444Row = ARGBToUV444Row_Any_NEON;
  59. if (IS_ALIGNED(width, 8)) {
  60. ARGBToUV444Row = ARGBToUV444Row_NEON;
  61. }
  62. }
  63. #endif
  64. #if defined(HAS_ARGBTOYROW_SSSE3)
  65. if (TestCpuFlag(kCpuHasSSSE3)) {
  66. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  67. if (IS_ALIGNED(width, 16)) {
  68. ARGBToYRow = ARGBToYRow_SSSE3;
  69. }
  70. }
  71. #endif
  72. #if defined(HAS_ARGBTOYROW_AVX2)
  73. if (TestCpuFlag(kCpuHasAVX2)) {
  74. ARGBToYRow = ARGBToYRow_Any_AVX2;
  75. if (IS_ALIGNED(width, 32)) {
  76. ARGBToYRow = ARGBToYRow_AVX2;
  77. }
  78. }
  79. #endif
  80. #if defined(HAS_ARGBTOYROW_NEON)
  81. if (TestCpuFlag(kCpuHasNEON)) {
  82. ARGBToYRow = ARGBToYRow_Any_NEON;
  83. if (IS_ALIGNED(width, 8)) {
  84. ARGBToYRow = ARGBToYRow_NEON;
  85. }
  86. }
  87. #endif
  88. for (y = 0; y < height; ++y) {
  89. ARGBToUV444Row(src_argb, dst_u, dst_v, width);
  90. ARGBToYRow(src_argb, dst_y, width);
  91. src_argb += src_stride_argb;
  92. dst_y += dst_stride_y;
  93. dst_u += dst_stride_u;
  94. dst_v += dst_stride_v;
  95. }
  96. return 0;
  97. }
  98. // ARGB little endian (bgra in memory) to I422
  99. LIBYUV_API
  100. int ARGBToI422(const uint8* src_argb, int src_stride_argb,
  101. uint8* dst_y, int dst_stride_y,
  102. uint8* dst_u, int dst_stride_u,
  103. uint8* dst_v, int dst_stride_v,
  104. int width, int height) {
  105. int y;
  106. void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
  107. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
  108. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  109. ARGBToYRow_C;
  110. if (!src_argb ||
  111. !dst_y || !dst_u || !dst_v ||
  112. width <= 0 || height == 0) {
  113. return -1;
  114. }
  115. // Negative height means invert the image.
  116. if (height < 0) {
  117. height = -height;
  118. src_argb = src_argb + (height - 1) * src_stride_argb;
  119. src_stride_argb = -src_stride_argb;
  120. }
  121. // Coalesce rows.
  122. if (src_stride_argb == width * 4 &&
  123. dst_stride_y == width &&
  124. dst_stride_u * 2 == width &&
  125. dst_stride_v * 2 == width) {
  126. width *= height;
  127. height = 1;
  128. src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
  129. }
  130. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  131. if (TestCpuFlag(kCpuHasSSSE3)) {
  132. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  133. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  134. if (IS_ALIGNED(width, 16)) {
  135. ARGBToUVRow = ARGBToUVRow_SSSE3;
  136. ARGBToYRow = ARGBToYRow_SSSE3;
  137. }
  138. }
  139. #endif
  140. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  141. if (TestCpuFlag(kCpuHasAVX2)) {
  142. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  143. ARGBToYRow = ARGBToYRow_Any_AVX2;
  144. if (IS_ALIGNED(width, 32)) {
  145. ARGBToUVRow = ARGBToUVRow_AVX2;
  146. ARGBToYRow = ARGBToYRow_AVX2;
  147. }
  148. }
  149. #endif
  150. #if defined(HAS_ARGBTOYROW_NEON)
  151. if (TestCpuFlag(kCpuHasNEON)) {
  152. ARGBToYRow = ARGBToYRow_Any_NEON;
  153. if (IS_ALIGNED(width, 8)) {
  154. ARGBToYRow = ARGBToYRow_NEON;
  155. }
  156. }
  157. #endif
  158. #if defined(HAS_ARGBTOUVROW_NEON)
  159. if (TestCpuFlag(kCpuHasNEON)) {
  160. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  161. if (IS_ALIGNED(width, 16)) {
  162. ARGBToUVRow = ARGBToUVRow_NEON;
  163. }
  164. }
  165. #endif
  166. for (y = 0; y < height; ++y) {
  167. ARGBToUVRow(src_argb, 0, dst_u, dst_v, width);
  168. ARGBToYRow(src_argb, dst_y, width);
  169. src_argb += src_stride_argb;
  170. dst_y += dst_stride_y;
  171. dst_u += dst_stride_u;
  172. dst_v += dst_stride_v;
  173. }
  174. return 0;
  175. }
  176. // ARGB little endian (bgra in memory) to I411
  177. LIBYUV_API
  178. int ARGBToI411(const uint8* src_argb, int src_stride_argb,
  179. uint8* dst_y, int dst_stride_y,
  180. uint8* dst_u, int dst_stride_u,
  181. uint8* dst_v, int dst_stride_v,
  182. int width, int height) {
  183. int y;
  184. void (*ARGBToUV411Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
  185. int width) = ARGBToUV411Row_C;
  186. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  187. ARGBToYRow_C;
  188. if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  189. return -1;
  190. }
  191. if (height < 0) {
  192. height = -height;
  193. src_argb = src_argb + (height - 1) * src_stride_argb;
  194. src_stride_argb = -src_stride_argb;
  195. }
  196. // Coalesce rows.
  197. if (src_stride_argb == width * 4 &&
  198. dst_stride_y == width &&
  199. dst_stride_u * 4 == width &&
  200. dst_stride_v * 4 == width) {
  201. width *= height;
  202. height = 1;
  203. src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
  204. }
  205. #if defined(HAS_ARGBTOYROW_SSSE3)
  206. if (TestCpuFlag(kCpuHasSSSE3)) {
  207. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  208. if (IS_ALIGNED(width, 16)) {
  209. ARGBToYRow = ARGBToYRow_SSSE3;
  210. }
  211. }
  212. #endif
  213. #if defined(HAS_ARGBTOYROW_AVX2)
  214. if (TestCpuFlag(kCpuHasAVX2)) {
  215. ARGBToYRow = ARGBToYRow_Any_AVX2;
  216. if (IS_ALIGNED(width, 32)) {
  217. ARGBToYRow = ARGBToYRow_AVX2;
  218. }
  219. }
  220. #endif
  221. #if defined(HAS_ARGBTOYROW_NEON)
  222. if (TestCpuFlag(kCpuHasNEON)) {
  223. ARGBToYRow = ARGBToYRow_Any_NEON;
  224. if (IS_ALIGNED(width, 8)) {
  225. ARGBToYRow = ARGBToYRow_NEON;
  226. }
  227. }
  228. #endif
  229. #if defined(HAS_ARGBTOUV411ROW_NEON)
  230. if (TestCpuFlag(kCpuHasNEON)) {
  231. ARGBToUV411Row = ARGBToUV411Row_Any_NEON;
  232. if (IS_ALIGNED(width, 32)) {
  233. ARGBToUV411Row = ARGBToUV411Row_NEON;
  234. }
  235. }
  236. #endif
  237. for (y = 0; y < height; ++y) {
  238. ARGBToUV411Row(src_argb, dst_u, dst_v, width);
  239. ARGBToYRow(src_argb, dst_y, width);
  240. src_argb += src_stride_argb;
  241. dst_y += dst_stride_y;
  242. dst_u += dst_stride_u;
  243. dst_v += dst_stride_v;
  244. }
  245. return 0;
  246. }
  247. LIBYUV_API
  248. int ARGBToNV12(const uint8* src_argb, int src_stride_argb,
  249. uint8* dst_y, int dst_stride_y,
  250. uint8* dst_uv, int dst_stride_uv,
  251. int width, int height) {
  252. int y;
  253. int halfwidth = (width + 1) >> 1;
  254. void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
  255. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
  256. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  257. ARGBToYRow_C;
  258. void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
  259. int width) = MergeUVRow_C;
  260. if (!src_argb ||
  261. !dst_y || !dst_uv ||
  262. width <= 0 || height == 0) {
  263. return -1;
  264. }
  265. // Negative height means invert the image.
  266. if (height < 0) {
  267. height = -height;
  268. src_argb = src_argb + (height - 1) * src_stride_argb;
  269. src_stride_argb = -src_stride_argb;
  270. }
  271. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  272. if (TestCpuFlag(kCpuHasSSSE3)) {
  273. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  274. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  275. if (IS_ALIGNED(width, 16)) {
  276. ARGBToUVRow = ARGBToUVRow_SSSE3;
  277. ARGBToYRow = ARGBToYRow_SSSE3;
  278. }
  279. }
  280. #endif
  281. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  282. if (TestCpuFlag(kCpuHasAVX2)) {
  283. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  284. ARGBToYRow = ARGBToYRow_Any_AVX2;
  285. if (IS_ALIGNED(width, 32)) {
  286. ARGBToUVRow = ARGBToUVRow_AVX2;
  287. ARGBToYRow = ARGBToYRow_AVX2;
  288. }
  289. }
  290. #endif
  291. #if defined(HAS_ARGBTOYROW_NEON)
  292. if (TestCpuFlag(kCpuHasNEON)) {
  293. ARGBToYRow = ARGBToYRow_Any_NEON;
  294. if (IS_ALIGNED(width, 8)) {
  295. ARGBToYRow = ARGBToYRow_NEON;
  296. }
  297. }
  298. #endif
  299. #if defined(HAS_ARGBTOUVROW_NEON)
  300. if (TestCpuFlag(kCpuHasNEON)) {
  301. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  302. if (IS_ALIGNED(width, 16)) {
  303. ARGBToUVRow = ARGBToUVRow_NEON;
  304. }
  305. }
  306. #endif
  307. #if defined(HAS_MERGEUVROW_SSE2)
  308. if (TestCpuFlag(kCpuHasSSE2)) {
  309. MergeUVRow_ = MergeUVRow_Any_SSE2;
  310. if (IS_ALIGNED(halfwidth, 16)) {
  311. MergeUVRow_ = MergeUVRow_SSE2;
  312. }
  313. }
  314. #endif
  315. #if defined(HAS_MERGEUVROW_AVX2)
  316. if (TestCpuFlag(kCpuHasAVX2)) {
  317. MergeUVRow_ = MergeUVRow_Any_AVX2;
  318. if (IS_ALIGNED(halfwidth, 32)) {
  319. MergeUVRow_ = MergeUVRow_AVX2;
  320. }
  321. }
  322. #endif
  323. #if defined(HAS_MERGEUVROW_NEON)
  324. if (TestCpuFlag(kCpuHasNEON)) {
  325. MergeUVRow_ = MergeUVRow_Any_NEON;
  326. if (IS_ALIGNED(halfwidth, 16)) {
  327. MergeUVRow_ = MergeUVRow_NEON;
  328. }
  329. }
  330. #endif
  331. {
  332. // Allocate a rows of uv.
  333. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  334. uint8* row_v = row_u + ((halfwidth + 31) & ~31);
  335. for (y = 0; y < height - 1; y += 2) {
  336. ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
  337. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  338. ARGBToYRow(src_argb, dst_y, width);
  339. ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
  340. src_argb += src_stride_argb * 2;
  341. dst_y += dst_stride_y * 2;
  342. dst_uv += dst_stride_uv;
  343. }
  344. if (height & 1) {
  345. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  346. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  347. ARGBToYRow(src_argb, dst_y, width);
  348. }
  349. free_aligned_buffer_64(row_u);
  350. }
  351. return 0;
  352. }
  353. // Same as NV12 but U and V swapped.
  354. LIBYUV_API
  355. int ARGBToNV21(const uint8* src_argb, int src_stride_argb,
  356. uint8* dst_y, int dst_stride_y,
  357. uint8* dst_uv, int dst_stride_uv,
  358. int width, int height) {
  359. int y;
  360. int halfwidth = (width + 1) >> 1;
  361. void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
  362. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
  363. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  364. ARGBToYRow_C;
  365. void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
  366. int width) = MergeUVRow_C;
  367. if (!src_argb ||
  368. !dst_y || !dst_uv ||
  369. width <= 0 || height == 0) {
  370. return -1;
  371. }
  372. // Negative height means invert the image.
  373. if (height < 0) {
  374. height = -height;
  375. src_argb = src_argb + (height - 1) * src_stride_argb;
  376. src_stride_argb = -src_stride_argb;
  377. }
  378. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  379. if (TestCpuFlag(kCpuHasSSSE3)) {
  380. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  381. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  382. if (IS_ALIGNED(width, 16)) {
  383. ARGBToUVRow = ARGBToUVRow_SSSE3;
  384. ARGBToYRow = ARGBToYRow_SSSE3;
  385. }
  386. }
  387. #endif
  388. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  389. if (TestCpuFlag(kCpuHasAVX2)) {
  390. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  391. ARGBToYRow = ARGBToYRow_Any_AVX2;
  392. if (IS_ALIGNED(width, 32)) {
  393. ARGBToUVRow = ARGBToUVRow_AVX2;
  394. ARGBToYRow = ARGBToYRow_AVX2;
  395. }
  396. }
  397. #endif
  398. #if defined(HAS_ARGBTOYROW_NEON)
  399. if (TestCpuFlag(kCpuHasNEON)) {
  400. ARGBToYRow = ARGBToYRow_Any_NEON;
  401. if (IS_ALIGNED(width, 8)) {
  402. ARGBToYRow = ARGBToYRow_NEON;
  403. }
  404. }
  405. #endif
  406. #if defined(HAS_ARGBTOUVROW_NEON)
  407. if (TestCpuFlag(kCpuHasNEON)) {
  408. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  409. if (IS_ALIGNED(width, 16)) {
  410. ARGBToUVRow = ARGBToUVRow_NEON;
  411. }
  412. }
  413. #endif
  414. #if defined(HAS_MERGEUVROW_SSE2)
  415. if (TestCpuFlag(kCpuHasSSE2)) {
  416. MergeUVRow_ = MergeUVRow_Any_SSE2;
  417. if (IS_ALIGNED(halfwidth, 16)) {
  418. MergeUVRow_ = MergeUVRow_SSE2;
  419. }
  420. }
  421. #endif
  422. #if defined(HAS_MERGEUVROW_AVX2)
  423. if (TestCpuFlag(kCpuHasAVX2)) {
  424. MergeUVRow_ = MergeUVRow_Any_AVX2;
  425. if (IS_ALIGNED(halfwidth, 32)) {
  426. MergeUVRow_ = MergeUVRow_AVX2;
  427. }
  428. }
  429. #endif
  430. #if defined(HAS_MERGEUVROW_NEON)
  431. if (TestCpuFlag(kCpuHasNEON)) {
  432. MergeUVRow_ = MergeUVRow_Any_NEON;
  433. if (IS_ALIGNED(halfwidth, 16)) {
  434. MergeUVRow_ = MergeUVRow_NEON;
  435. }
  436. }
  437. #endif
  438. {
  439. // Allocate a rows of uv.
  440. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  441. uint8* row_v = row_u + ((halfwidth + 31) & ~31);
  442. for (y = 0; y < height - 1; y += 2) {
  443. ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
  444. MergeUVRow_(row_v, row_u, dst_uv, halfwidth);
  445. ARGBToYRow(src_argb, dst_y, width);
  446. ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
  447. src_argb += src_stride_argb * 2;
  448. dst_y += dst_stride_y * 2;
  449. dst_uv += dst_stride_uv;
  450. }
  451. if (height & 1) {
  452. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  453. MergeUVRow_(row_v, row_u, dst_uv, halfwidth);
  454. ARGBToYRow(src_argb, dst_y, width);
  455. }
  456. free_aligned_buffer_64(row_u);
  457. }
  458. return 0;
  459. }
  460. // Convert ARGB to YUY2.
  461. LIBYUV_API
  462. int ARGBToYUY2(const uint8* src_argb, int src_stride_argb,
  463. uint8* dst_yuy2, int dst_stride_yuy2,
  464. int width, int height) {
  465. int y;
  466. void (*ARGBToUVRow)(const uint8* src_argb, int src_stride_argb,
  467. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
  468. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  469. ARGBToYRow_C;
  470. void (*I422ToYUY2Row)(const uint8* src_y, const uint8* src_u,
  471. const uint8* src_v, uint8* dst_yuy2, int width) = I422ToYUY2Row_C;
  472. if (!src_argb || !dst_yuy2 ||
  473. width <= 0 || height == 0) {
  474. return -1;
  475. }
  476. // Negative height means invert the image.
  477. if (height < 0) {
  478. height = -height;
  479. dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
  480. dst_stride_yuy2 = -dst_stride_yuy2;
  481. }
  482. // Coalesce rows.
  483. if (src_stride_argb == width * 4 &&
  484. dst_stride_yuy2 == width * 2) {
  485. width *= height;
  486. height = 1;
  487. src_stride_argb = dst_stride_yuy2 = 0;
  488. }
  489. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  490. if (TestCpuFlag(kCpuHasSSSE3)) {
  491. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  492. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  493. if (IS_ALIGNED(width, 16)) {
  494. ARGBToUVRow = ARGBToUVRow_SSSE3;
  495. ARGBToYRow = ARGBToYRow_SSSE3;
  496. }
  497. }
  498. #endif
  499. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  500. if (TestCpuFlag(kCpuHasAVX2)) {
  501. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  502. ARGBToYRow = ARGBToYRow_Any_AVX2;
  503. if (IS_ALIGNED(width, 32)) {
  504. ARGBToUVRow = ARGBToUVRow_AVX2;
  505. ARGBToYRow = ARGBToYRow_AVX2;
  506. }
  507. }
  508. #endif
  509. #if defined(HAS_ARGBTOYROW_NEON)
  510. if (TestCpuFlag(kCpuHasNEON)) {
  511. ARGBToYRow = ARGBToYRow_Any_NEON;
  512. if (IS_ALIGNED(width, 8)) {
  513. ARGBToYRow = ARGBToYRow_NEON;
  514. }
  515. }
  516. #endif
  517. #if defined(HAS_ARGBTOUVROW_NEON)
  518. if (TestCpuFlag(kCpuHasNEON)) {
  519. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  520. if (IS_ALIGNED(width, 16)) {
  521. ARGBToUVRow = ARGBToUVRow_NEON;
  522. }
  523. }
  524. #endif
  525. #if defined(HAS_I422TOYUY2ROW_SSE2)
  526. if (TestCpuFlag(kCpuHasSSE2)) {
  527. I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
  528. if (IS_ALIGNED(width, 16)) {
  529. I422ToYUY2Row = I422ToYUY2Row_SSE2;
  530. }
  531. }
  532. #endif
  533. #if defined(HAS_I422TOYUY2ROW_NEON)
  534. if (TestCpuFlag(kCpuHasNEON)) {
  535. I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
  536. if (IS_ALIGNED(width, 16)) {
  537. I422ToYUY2Row = I422ToYUY2Row_NEON;
  538. }
  539. }
  540. #endif
  541. {
  542. // Allocate a rows of yuv.
  543. align_buffer_64(row_y, ((width + 63) & ~63) * 2);
  544. uint8* row_u = row_y + ((width + 63) & ~63);
  545. uint8* row_v = row_u + ((width + 63) & ~63) / 2;
  546. for (y = 0; y < height; ++y) {
  547. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  548. ARGBToYRow(src_argb, row_y, width);
  549. I422ToYUY2Row(row_y, row_u, row_v, dst_yuy2, width);
  550. src_argb += src_stride_argb;
  551. dst_yuy2 += dst_stride_yuy2;
  552. }
  553. free_aligned_buffer_64(row_y);
  554. }
  555. return 0;
  556. }
  557. // Convert ARGB to UYVY.
  558. LIBYUV_API
  559. int ARGBToUYVY(const uint8* src_argb, int src_stride_argb,
  560. uint8* dst_uyvy, int dst_stride_uyvy,
  561. int width, int height) {
  562. int y;
  563. void (*ARGBToUVRow)(const uint8* src_argb, int src_stride_argb,
  564. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
  565. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  566. ARGBToYRow_C;
  567. void (*I422ToUYVYRow)(const uint8* src_y, const uint8* src_u,
  568. const uint8* src_v, uint8* dst_uyvy, int width) = I422ToUYVYRow_C;
  569. if (!src_argb || !dst_uyvy ||
  570. width <= 0 || height == 0) {
  571. return -1;
  572. }
  573. // Negative height means invert the image.
  574. if (height < 0) {
  575. height = -height;
  576. dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
  577. dst_stride_uyvy = -dst_stride_uyvy;
  578. }
  579. // Coalesce rows.
  580. if (src_stride_argb == width * 4 &&
  581. dst_stride_uyvy == width * 2) {
  582. width *= height;
  583. height = 1;
  584. src_stride_argb = dst_stride_uyvy = 0;
  585. }
  586. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  587. if (TestCpuFlag(kCpuHasSSSE3)) {
  588. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  589. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  590. if (IS_ALIGNED(width, 16)) {
  591. ARGBToUVRow = ARGBToUVRow_SSSE3;
  592. ARGBToYRow = ARGBToYRow_SSSE3;
  593. }
  594. }
  595. #endif
  596. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  597. if (TestCpuFlag(kCpuHasAVX2)) {
  598. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  599. ARGBToYRow = ARGBToYRow_Any_AVX2;
  600. if (IS_ALIGNED(width, 32)) {
  601. ARGBToUVRow = ARGBToUVRow_AVX2;
  602. ARGBToYRow = ARGBToYRow_AVX2;
  603. }
  604. }
  605. #endif
  606. #if defined(HAS_ARGBTOYROW_NEON)
  607. if (TestCpuFlag(kCpuHasNEON)) {
  608. ARGBToYRow = ARGBToYRow_Any_NEON;
  609. if (IS_ALIGNED(width, 8)) {
  610. ARGBToYRow = ARGBToYRow_NEON;
  611. }
  612. }
  613. #endif
  614. #if defined(HAS_ARGBTOUVROW_NEON)
  615. if (TestCpuFlag(kCpuHasNEON)) {
  616. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  617. if (IS_ALIGNED(width, 16)) {
  618. ARGBToUVRow = ARGBToUVRow_NEON;
  619. }
  620. }
  621. #endif
  622. #if defined(HAS_I422TOUYVYROW_SSE2)
  623. if (TestCpuFlag(kCpuHasSSE2)) {
  624. I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
  625. if (IS_ALIGNED(width, 16)) {
  626. I422ToUYVYRow = I422ToUYVYRow_SSE2;
  627. }
  628. }
  629. #endif
  630. #if defined(HAS_I422TOUYVYROW_NEON)
  631. if (TestCpuFlag(kCpuHasNEON)) {
  632. I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
  633. if (IS_ALIGNED(width, 16)) {
  634. I422ToUYVYRow = I422ToUYVYRow_NEON;
  635. }
  636. }
  637. #endif
  638. {
  639. // Allocate a rows of yuv.
  640. align_buffer_64(row_y, ((width + 63) & ~63) * 2);
  641. uint8* row_u = row_y + ((width + 63) & ~63);
  642. uint8* row_v = row_u + ((width + 63) & ~63) / 2;
  643. for (y = 0; y < height; ++y) {
  644. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  645. ARGBToYRow(src_argb, row_y, width);
  646. I422ToUYVYRow(row_y, row_u, row_v, dst_uyvy, width);
  647. src_argb += src_stride_argb;
  648. dst_uyvy += dst_stride_uyvy;
  649. }
  650. free_aligned_buffer_64(row_y);
  651. }
  652. return 0;
  653. }
  654. // Convert ARGB to I400.
  655. LIBYUV_API
  656. int ARGBToI400(const uint8* src_argb, int src_stride_argb,
  657. uint8* dst_y, int dst_stride_y,
  658. int width, int height) {
  659. int y;
  660. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  661. ARGBToYRow_C;
  662. if (!src_argb || !dst_y || width <= 0 || height == 0) {
  663. return -1;
  664. }
  665. if (height < 0) {
  666. height = -height;
  667. src_argb = src_argb + (height - 1) * src_stride_argb;
  668. src_stride_argb = -src_stride_argb;
  669. }
  670. // Coalesce rows.
  671. if (src_stride_argb == width * 4 &&
  672. dst_stride_y == width) {
  673. width *= height;
  674. height = 1;
  675. src_stride_argb = dst_stride_y = 0;
  676. }
  677. #if defined(HAS_ARGBTOYROW_SSSE3)
  678. if (TestCpuFlag(kCpuHasSSSE3)) {
  679. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  680. if (IS_ALIGNED(width, 16)) {
  681. ARGBToYRow = ARGBToYRow_SSSE3;
  682. }
  683. }
  684. #endif
  685. #if defined(HAS_ARGBTOYROW_AVX2)
  686. if (TestCpuFlag(kCpuHasAVX2)) {
  687. ARGBToYRow = ARGBToYRow_Any_AVX2;
  688. if (IS_ALIGNED(width, 32)) {
  689. ARGBToYRow = ARGBToYRow_AVX2;
  690. }
  691. }
  692. #endif
  693. #if defined(HAS_ARGBTOYROW_NEON)
  694. if (TestCpuFlag(kCpuHasNEON)) {
  695. ARGBToYRow = ARGBToYRow_Any_NEON;
  696. if (IS_ALIGNED(width, 8)) {
  697. ARGBToYRow = ARGBToYRow_NEON;
  698. }
  699. }
  700. #endif
  701. for (y = 0; y < height; ++y) {
  702. ARGBToYRow(src_argb, dst_y, width);
  703. src_argb += src_stride_argb;
  704. dst_y += dst_stride_y;
  705. }
  706. return 0;
  707. }
  708. // Shuffle table for converting ARGB to RGBA.
  709. static uvec8 kShuffleMaskARGBToRGBA = {
  710. 3u, 0u, 1u, 2u, 7u, 4u, 5u, 6u, 11u, 8u, 9u, 10u, 15u, 12u, 13u, 14u
  711. };
  712. // Convert ARGB to RGBA.
  713. LIBYUV_API
  714. int ARGBToRGBA(const uint8* src_argb, int src_stride_argb,
  715. uint8* dst_rgba, int dst_stride_rgba,
  716. int width, int height) {
  717. return ARGBShuffle(src_argb, src_stride_argb,
  718. dst_rgba, dst_stride_rgba,
  719. (const uint8*)(&kShuffleMaskARGBToRGBA),
  720. width, height);
  721. }
  722. // Convert ARGB To RGB24.
  723. LIBYUV_API
  724. int ARGBToRGB24(const uint8* src_argb, int src_stride_argb,
  725. uint8* dst_rgb24, int dst_stride_rgb24,
  726. int width, int height) {
  727. int y;
  728. void (*ARGBToRGB24Row)(const uint8* src_argb, uint8* dst_rgb, int width) =
  729. ARGBToRGB24Row_C;
  730. if (!src_argb || !dst_rgb24 || width <= 0 || height == 0) {
  731. return -1;
  732. }
  733. if (height < 0) {
  734. height = -height;
  735. src_argb = src_argb + (height - 1) * src_stride_argb;
  736. src_stride_argb = -src_stride_argb;
  737. }
  738. // Coalesce rows.
  739. if (src_stride_argb == width * 4 &&
  740. dst_stride_rgb24 == width * 3) {
  741. width *= height;
  742. height = 1;
  743. src_stride_argb = dst_stride_rgb24 = 0;
  744. }
  745. #if defined(HAS_ARGBTORGB24ROW_SSSE3)
  746. if (TestCpuFlag(kCpuHasSSSE3)) {
  747. ARGBToRGB24Row = ARGBToRGB24Row_Any_SSSE3;
  748. if (IS_ALIGNED(width, 16)) {
  749. ARGBToRGB24Row = ARGBToRGB24Row_SSSE3;
  750. }
  751. }
  752. #endif
  753. #if defined(HAS_ARGBTORGB24ROW_NEON)
  754. if (TestCpuFlag(kCpuHasNEON)) {
  755. ARGBToRGB24Row = ARGBToRGB24Row_Any_NEON;
  756. if (IS_ALIGNED(width, 8)) {
  757. ARGBToRGB24Row = ARGBToRGB24Row_NEON;
  758. }
  759. }
  760. #endif
  761. for (y = 0; y < height; ++y) {
  762. ARGBToRGB24Row(src_argb, dst_rgb24, width);
  763. src_argb += src_stride_argb;
  764. dst_rgb24 += dst_stride_rgb24;
  765. }
  766. return 0;
  767. }
  768. // Convert ARGB To RAW.
  769. LIBYUV_API
  770. int ARGBToRAW(const uint8* src_argb, int src_stride_argb,
  771. uint8* dst_raw, int dst_stride_raw,
  772. int width, int height) {
  773. int y;
  774. void (*ARGBToRAWRow)(const uint8* src_argb, uint8* dst_rgb, int width) =
  775. ARGBToRAWRow_C;
  776. if (!src_argb || !dst_raw || width <= 0 || height == 0) {
  777. return -1;
  778. }
  779. if (height < 0) {
  780. height = -height;
  781. src_argb = src_argb + (height - 1) * src_stride_argb;
  782. src_stride_argb = -src_stride_argb;
  783. }
  784. // Coalesce rows.
  785. if (src_stride_argb == width * 4 &&
  786. dst_stride_raw == width * 3) {
  787. width *= height;
  788. height = 1;
  789. src_stride_argb = dst_stride_raw = 0;
  790. }
  791. #if defined(HAS_ARGBTORAWROW_SSSE3)
  792. if (TestCpuFlag(kCpuHasSSSE3)) {
  793. ARGBToRAWRow = ARGBToRAWRow_Any_SSSE3;
  794. if (IS_ALIGNED(width, 16)) {
  795. ARGBToRAWRow = ARGBToRAWRow_SSSE3;
  796. }
  797. }
  798. #endif
  799. #if defined(HAS_ARGBTORAWROW_NEON)
  800. if (TestCpuFlag(kCpuHasNEON)) {
  801. ARGBToRAWRow = ARGBToRAWRow_Any_NEON;
  802. if (IS_ALIGNED(width, 8)) {
  803. ARGBToRAWRow = ARGBToRAWRow_NEON;
  804. }
  805. }
  806. #endif
  807. for (y = 0; y < height; ++y) {
  808. ARGBToRAWRow(src_argb, dst_raw, width);
  809. src_argb += src_stride_argb;
  810. dst_raw += dst_stride_raw;
  811. }
  812. return 0;
  813. }
  814. // Ordered 8x8 dither for 888 to 565. Values from 0 to 7.
  815. static const uint8 kDither565_4x4[16] = {
  816. 0, 4, 1, 5,
  817. 6, 2, 7, 3,
  818. 1, 5, 0, 4,
  819. 7, 3, 6, 2,
  820. };
  821. // Convert ARGB To RGB565 with 4x4 dither matrix (16 bytes).
  822. LIBYUV_API
  823. int ARGBToRGB565Dither(const uint8* src_argb, int src_stride_argb,
  824. uint8* dst_rgb565, int dst_stride_rgb565,
  825. const uint8* dither4x4, int width, int height) {
  826. int y;
  827. void (*ARGBToRGB565DitherRow)(const uint8* src_argb, uint8* dst_rgb,
  828. const uint32 dither4, int width) = ARGBToRGB565DitherRow_C;
  829. if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
  830. return -1;
  831. }
  832. if (height < 0) {
  833. height = -height;
  834. src_argb = src_argb + (height - 1) * src_stride_argb;
  835. src_stride_argb = -src_stride_argb;
  836. }
  837. if (!dither4x4) {
  838. dither4x4 = kDither565_4x4;
  839. }
  840. #if defined(HAS_ARGBTORGB565DITHERROW_SSE2)
  841. if (TestCpuFlag(kCpuHasSSE2)) {
  842. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_SSE2;
  843. if (IS_ALIGNED(width, 4)) {
  844. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_SSE2;
  845. }
  846. }
  847. #endif
  848. #if defined(HAS_ARGBTORGB565DITHERROW_AVX2)
  849. if (TestCpuFlag(kCpuHasAVX2)) {
  850. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_AVX2;
  851. if (IS_ALIGNED(width, 8)) {
  852. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_AVX2;
  853. }
  854. }
  855. #endif
  856. #if defined(HAS_ARGBTORGB565DITHERROW_NEON)
  857. if (TestCpuFlag(kCpuHasNEON)) {
  858. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_NEON;
  859. if (IS_ALIGNED(width, 8)) {
  860. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_NEON;
  861. }
  862. }
  863. #endif
  864. for (y = 0; y < height; ++y) {
  865. ARGBToRGB565DitherRow(src_argb, dst_rgb565,
  866. *(uint32*)(dither4x4 + ((y & 3) << 2)), width);
  867. src_argb += src_stride_argb;
  868. dst_rgb565 += dst_stride_rgb565;
  869. }
  870. return 0;
  871. }
  872. // Convert ARGB To RGB565.
  873. // TODO(fbarchard): Consider using dither function low level with zeros.
  874. LIBYUV_API
  875. int ARGBToRGB565(const uint8* src_argb, int src_stride_argb,
  876. uint8* dst_rgb565, int dst_stride_rgb565,
  877. int width, int height) {
  878. int y;
  879. void (*ARGBToRGB565Row)(const uint8* src_argb, uint8* dst_rgb, int width) =
  880. ARGBToRGB565Row_C;
  881. if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
  882. return -1;
  883. }
  884. if (height < 0) {
  885. height = -height;
  886. src_argb = src_argb + (height - 1) * src_stride_argb;
  887. src_stride_argb = -src_stride_argb;
  888. }
  889. // Coalesce rows.
  890. if (src_stride_argb == width * 4 &&
  891. dst_stride_rgb565 == width * 2) {
  892. width *= height;
  893. height = 1;
  894. src_stride_argb = dst_stride_rgb565 = 0;
  895. }
  896. #if defined(HAS_ARGBTORGB565ROW_SSE2)
  897. if (TestCpuFlag(kCpuHasSSE2)) {
  898. ARGBToRGB565Row = ARGBToRGB565Row_Any_SSE2;
  899. if (IS_ALIGNED(width, 4)) {
  900. ARGBToRGB565Row = ARGBToRGB565Row_SSE2;
  901. }
  902. }
  903. #endif
  904. #if defined(HAS_ARGBTORGB565ROW_AVX2)
  905. if (TestCpuFlag(kCpuHasAVX2)) {
  906. ARGBToRGB565Row = ARGBToRGB565Row_Any_AVX2;
  907. if (IS_ALIGNED(width, 8)) {
  908. ARGBToRGB565Row = ARGBToRGB565Row_AVX2;
  909. }
  910. }
  911. #endif
  912. #if defined(HAS_ARGBTORGB565ROW_NEON)
  913. if (TestCpuFlag(kCpuHasNEON)) {
  914. ARGBToRGB565Row = ARGBToRGB565Row_Any_NEON;
  915. if (IS_ALIGNED(width, 8)) {
  916. ARGBToRGB565Row = ARGBToRGB565Row_NEON;
  917. }
  918. }
  919. #endif
  920. for (y = 0; y < height; ++y) {
  921. ARGBToRGB565Row(src_argb, dst_rgb565, width);
  922. src_argb += src_stride_argb;
  923. dst_rgb565 += dst_stride_rgb565;
  924. }
  925. return 0;
  926. }
  927. // Convert ARGB To ARGB1555.
  928. LIBYUV_API
  929. int ARGBToARGB1555(const uint8* src_argb, int src_stride_argb,
  930. uint8* dst_argb1555, int dst_stride_argb1555,
  931. int width, int height) {
  932. int y;
  933. void (*ARGBToARGB1555Row)(const uint8* src_argb, uint8* dst_rgb, int width) =
  934. ARGBToARGB1555Row_C;
  935. if (!src_argb || !dst_argb1555 || width <= 0 || height == 0) {
  936. return -1;
  937. }
  938. if (height < 0) {
  939. height = -height;
  940. src_argb = src_argb + (height - 1) * src_stride_argb;
  941. src_stride_argb = -src_stride_argb;
  942. }
  943. // Coalesce rows.
  944. if (src_stride_argb == width * 4 &&
  945. dst_stride_argb1555 == width * 2) {
  946. width *= height;
  947. height = 1;
  948. src_stride_argb = dst_stride_argb1555 = 0;
  949. }
  950. #if defined(HAS_ARGBTOARGB1555ROW_SSE2)
  951. if (TestCpuFlag(kCpuHasSSE2)) {
  952. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_SSE2;
  953. if (IS_ALIGNED(width, 4)) {
  954. ARGBToARGB1555Row = ARGBToARGB1555Row_SSE2;
  955. }
  956. }
  957. #endif
  958. #if defined(HAS_ARGBTOARGB1555ROW_AVX2)
  959. if (TestCpuFlag(kCpuHasAVX2)) {
  960. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_AVX2;
  961. if (IS_ALIGNED(width, 8)) {
  962. ARGBToARGB1555Row = ARGBToARGB1555Row_AVX2;
  963. }
  964. }
  965. #endif
  966. #if defined(HAS_ARGBTOARGB1555ROW_NEON)
  967. if (TestCpuFlag(kCpuHasNEON)) {
  968. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_NEON;
  969. if (IS_ALIGNED(width, 8)) {
  970. ARGBToARGB1555Row = ARGBToARGB1555Row_NEON;
  971. }
  972. }
  973. #endif
  974. for (y = 0; y < height; ++y) {
  975. ARGBToARGB1555Row(src_argb, dst_argb1555, width);
  976. src_argb += src_stride_argb;
  977. dst_argb1555 += dst_stride_argb1555;
  978. }
  979. return 0;
  980. }
  981. // Convert ARGB To ARGB4444.
  982. LIBYUV_API
  983. int ARGBToARGB4444(const uint8* src_argb, int src_stride_argb,
  984. uint8* dst_argb4444, int dst_stride_argb4444,
  985. int width, int height) {
  986. int y;
  987. void (*ARGBToARGB4444Row)(const uint8* src_argb, uint8* dst_rgb, int width) =
  988. ARGBToARGB4444Row_C;
  989. if (!src_argb || !dst_argb4444 || width <= 0 || height == 0) {
  990. return -1;
  991. }
  992. if (height < 0) {
  993. height = -height;
  994. src_argb = src_argb + (height - 1) * src_stride_argb;
  995. src_stride_argb = -src_stride_argb;
  996. }
  997. // Coalesce rows.
  998. if (src_stride_argb == width * 4 &&
  999. dst_stride_argb4444 == width * 2) {
  1000. width *= height;
  1001. height = 1;
  1002. src_stride_argb = dst_stride_argb4444 = 0;
  1003. }
  1004. #if defined(HAS_ARGBTOARGB4444ROW_SSE2)
  1005. if (TestCpuFlag(kCpuHasSSE2)) {
  1006. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_SSE2;
  1007. if (IS_ALIGNED(width, 4)) {
  1008. ARGBToARGB4444Row = ARGBToARGB4444Row_SSE2;
  1009. }
  1010. }
  1011. #endif
  1012. #if defined(HAS_ARGBTOARGB4444ROW_AVX2)
  1013. if (TestCpuFlag(kCpuHasAVX2)) {
  1014. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_AVX2;
  1015. if (IS_ALIGNED(width, 8)) {
  1016. ARGBToARGB4444Row = ARGBToARGB4444Row_AVX2;
  1017. }
  1018. }
  1019. #endif
  1020. #if defined(HAS_ARGBTOARGB4444ROW_NEON)
  1021. if (TestCpuFlag(kCpuHasNEON)) {
  1022. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_NEON;
  1023. if (IS_ALIGNED(width, 8)) {
  1024. ARGBToARGB4444Row = ARGBToARGB4444Row_NEON;
  1025. }
  1026. }
  1027. #endif
  1028. for (y = 0; y < height; ++y) {
  1029. ARGBToARGB4444Row(src_argb, dst_argb4444, width);
  1030. src_argb += src_stride_argb;
  1031. dst_argb4444 += dst_stride_argb4444;
  1032. }
  1033. return 0;
  1034. }
  1035. // Convert ARGB to J420. (JPeg full range I420).
  1036. LIBYUV_API
  1037. int ARGBToJ420(const uint8* src_argb, int src_stride_argb,
  1038. uint8* dst_yj, int dst_stride_yj,
  1039. uint8* dst_u, int dst_stride_u,
  1040. uint8* dst_v, int dst_stride_v,
  1041. int width, int height) {
  1042. int y;
  1043. void (*ARGBToUVJRow)(const uint8* src_argb0, int src_stride_argb,
  1044. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVJRow_C;
  1045. void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) =
  1046. ARGBToYJRow_C;
  1047. if (!src_argb ||
  1048. !dst_yj || !dst_u || !dst_v ||
  1049. width <= 0 || height == 0) {
  1050. return -1;
  1051. }
  1052. // Negative height means invert the image.
  1053. if (height < 0) {
  1054. height = -height;
  1055. src_argb = src_argb + (height - 1) * src_stride_argb;
  1056. src_stride_argb = -src_stride_argb;
  1057. }
  1058. #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
  1059. if (TestCpuFlag(kCpuHasSSSE3)) {
  1060. ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
  1061. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1062. if (IS_ALIGNED(width, 16)) {
  1063. ARGBToUVJRow = ARGBToUVJRow_SSSE3;
  1064. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1065. }
  1066. }
  1067. #endif
  1068. #if defined(HAS_ARGBTOYJROW_AVX2)
  1069. if (TestCpuFlag(kCpuHasAVX2)) {
  1070. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1071. if (IS_ALIGNED(width, 32)) {
  1072. ARGBToYJRow = ARGBToYJRow_AVX2;
  1073. }
  1074. }
  1075. #endif
  1076. #if defined(HAS_ARGBTOYJROW_NEON)
  1077. if (TestCpuFlag(kCpuHasNEON)) {
  1078. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1079. if (IS_ALIGNED(width, 8)) {
  1080. ARGBToYJRow = ARGBToYJRow_NEON;
  1081. }
  1082. }
  1083. #endif
  1084. #if defined(HAS_ARGBTOUVJROW_NEON)
  1085. if (TestCpuFlag(kCpuHasNEON)) {
  1086. ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
  1087. if (IS_ALIGNED(width, 16)) {
  1088. ARGBToUVJRow = ARGBToUVJRow_NEON;
  1089. }
  1090. }
  1091. #endif
  1092. for (y = 0; y < height - 1; y += 2) {
  1093. ARGBToUVJRow(src_argb, src_stride_argb, dst_u, dst_v, width);
  1094. ARGBToYJRow(src_argb, dst_yj, width);
  1095. ARGBToYJRow(src_argb + src_stride_argb, dst_yj + dst_stride_yj, width);
  1096. src_argb += src_stride_argb * 2;
  1097. dst_yj += dst_stride_yj * 2;
  1098. dst_u += dst_stride_u;
  1099. dst_v += dst_stride_v;
  1100. }
  1101. if (height & 1) {
  1102. ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
  1103. ARGBToYJRow(src_argb, dst_yj, width);
  1104. }
  1105. return 0;
  1106. }
  1107. // Convert ARGB to J422. (JPeg full range I422).
  1108. LIBYUV_API
  1109. int ARGBToJ422(const uint8* src_argb, int src_stride_argb,
  1110. uint8* dst_yj, int dst_stride_yj,
  1111. uint8* dst_u, int dst_stride_u,
  1112. uint8* dst_v, int dst_stride_v,
  1113. int width, int height) {
  1114. int y;
  1115. void (*ARGBToUVJRow)(const uint8* src_argb0, int src_stride_argb,
  1116. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVJRow_C;
  1117. void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) =
  1118. ARGBToYJRow_C;
  1119. if (!src_argb ||
  1120. !dst_yj || !dst_u || !dst_v ||
  1121. width <= 0 || height == 0) {
  1122. return -1;
  1123. }
  1124. // Negative height means invert the image.
  1125. if (height < 0) {
  1126. height = -height;
  1127. src_argb = src_argb + (height - 1) * src_stride_argb;
  1128. src_stride_argb = -src_stride_argb;
  1129. }
  1130. // Coalesce rows.
  1131. if (src_stride_argb == width * 4 &&
  1132. dst_stride_yj == width &&
  1133. dst_stride_u * 2 == width &&
  1134. dst_stride_v * 2 == width) {
  1135. width *= height;
  1136. height = 1;
  1137. src_stride_argb = dst_stride_yj = dst_stride_u = dst_stride_v = 0;
  1138. }
  1139. #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
  1140. if (TestCpuFlag(kCpuHasSSSE3)) {
  1141. ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
  1142. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1143. if (IS_ALIGNED(width, 16)) {
  1144. ARGBToUVJRow = ARGBToUVJRow_SSSE3;
  1145. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1146. }
  1147. }
  1148. #endif
  1149. #if defined(HAS_ARGBTOYJROW_AVX2)
  1150. if (TestCpuFlag(kCpuHasAVX2)) {
  1151. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1152. if (IS_ALIGNED(width, 32)) {
  1153. ARGBToYJRow = ARGBToYJRow_AVX2;
  1154. }
  1155. }
  1156. #endif
  1157. #if defined(HAS_ARGBTOYJROW_NEON)
  1158. if (TestCpuFlag(kCpuHasNEON)) {
  1159. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1160. if (IS_ALIGNED(width, 8)) {
  1161. ARGBToYJRow = ARGBToYJRow_NEON;
  1162. }
  1163. }
  1164. #endif
  1165. #if defined(HAS_ARGBTOUVJROW_NEON)
  1166. if (TestCpuFlag(kCpuHasNEON)) {
  1167. ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
  1168. if (IS_ALIGNED(width, 16)) {
  1169. ARGBToUVJRow = ARGBToUVJRow_NEON;
  1170. }
  1171. }
  1172. #endif
  1173. for (y = 0; y < height; ++y) {
  1174. ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
  1175. ARGBToYJRow(src_argb, dst_yj, width);
  1176. src_argb += src_stride_argb;
  1177. dst_yj += dst_stride_yj;
  1178. dst_u += dst_stride_u;
  1179. dst_v += dst_stride_v;
  1180. }
  1181. return 0;
  1182. }
  1183. // Convert ARGB to J400.
  1184. LIBYUV_API
  1185. int ARGBToJ400(const uint8* src_argb, int src_stride_argb,
  1186. uint8* dst_yj, int dst_stride_yj,
  1187. int width, int height) {
  1188. int y;
  1189. void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) =
  1190. ARGBToYJRow_C;
  1191. if (!src_argb || !dst_yj || width <= 0 || height == 0) {
  1192. return -1;
  1193. }
  1194. if (height < 0) {
  1195. height = -height;
  1196. src_argb = src_argb + (height - 1) * src_stride_argb;
  1197. src_stride_argb = -src_stride_argb;
  1198. }
  1199. // Coalesce rows.
  1200. if (src_stride_argb == width * 4 &&
  1201. dst_stride_yj == width) {
  1202. width *= height;
  1203. height = 1;
  1204. src_stride_argb = dst_stride_yj = 0;
  1205. }
  1206. #if defined(HAS_ARGBTOYJROW_SSSE3)
  1207. if (TestCpuFlag(kCpuHasSSSE3)) {
  1208. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1209. if (IS_ALIGNED(width, 16)) {
  1210. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1211. }
  1212. }
  1213. #endif
  1214. #if defined(HAS_ARGBTOYJROW_AVX2)
  1215. if (TestCpuFlag(kCpuHasAVX2)) {
  1216. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1217. if (IS_ALIGNED(width, 32)) {
  1218. ARGBToYJRow = ARGBToYJRow_AVX2;
  1219. }
  1220. }
  1221. #endif
  1222. #if defined(HAS_ARGBTOYJROW_NEON)
  1223. if (TestCpuFlag(kCpuHasNEON)) {
  1224. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1225. if (IS_ALIGNED(width, 8)) {
  1226. ARGBToYJRow = ARGBToYJRow_NEON;
  1227. }
  1228. }
  1229. #endif
  1230. for (y = 0; y < height; ++y) {
  1231. ARGBToYJRow(src_argb, dst_yj, width);
  1232. src_argb += src_stride_argb;
  1233. dst_yj += dst_stride_yj;
  1234. }
  1235. return 0;
  1236. }
  1237. #ifdef __cplusplus
  1238. } // extern "C"
  1239. } // namespace libyuv
  1240. #endif