convert.cc 78 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576
  1. /*
  2. * Copyright 2011 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/convert.h"
  11. #include "libyuv/basic_types.h"
  12. #include "libyuv/cpu_id.h"
  13. #include "libyuv/planar_functions.h"
  14. #include "libyuv/rotate.h"
  15. #include "libyuv/row.h"
  16. #include "libyuv/scale.h" // For ScalePlane()
  17. #ifdef __cplusplus
  18. namespace libyuv {
  19. extern "C" {
  20. #endif
  21. #define SUBSAMPLE(v, a, s) (v < 0) ? (-((-v + a) >> s)) : ((v + a) >> s)
  22. static __inline int Abs(int v) {
  23. return v >= 0 ? v : -v;
  24. }
  25. // Any I4xx To I420 format with mirroring.
  26. static int I4xxToI420(const uint8_t* src_y,
  27. int src_stride_y,
  28. const uint8_t* src_u,
  29. int src_stride_u,
  30. const uint8_t* src_v,
  31. int src_stride_v,
  32. uint8_t* dst_y,
  33. int dst_stride_y,
  34. uint8_t* dst_u,
  35. int dst_stride_u,
  36. uint8_t* dst_v,
  37. int dst_stride_v,
  38. int src_y_width,
  39. int src_y_height,
  40. int src_uv_width,
  41. int src_uv_height) {
  42. const int dst_y_width = Abs(src_y_width);
  43. const int dst_y_height = Abs(src_y_height);
  44. const int dst_uv_width = SUBSAMPLE(dst_y_width, 1, 1);
  45. const int dst_uv_height = SUBSAMPLE(dst_y_height, 1, 1);
  46. if (src_uv_width == 0 || src_uv_height == 0) {
  47. return -1;
  48. }
  49. if (dst_y) {
  50. ScalePlane(src_y, src_stride_y, src_y_width, src_y_height, dst_y,
  51. dst_stride_y, dst_y_width, dst_y_height, kFilterBilinear);
  52. }
  53. ScalePlane(src_u, src_stride_u, src_uv_width, src_uv_height, dst_u,
  54. dst_stride_u, dst_uv_width, dst_uv_height, kFilterBilinear);
  55. ScalePlane(src_v, src_stride_v, src_uv_width, src_uv_height, dst_v,
  56. dst_stride_v, dst_uv_width, dst_uv_height, kFilterBilinear);
  57. return 0;
  58. }
  59. // Copy I420 with optional flipping.
  60. // TODO(fbarchard): Use Scale plane which supports mirroring, but ensure
  61. // is does row coalescing.
  62. LIBYUV_API
  63. int I420Copy(const uint8_t* src_y,
  64. int src_stride_y,
  65. const uint8_t* src_u,
  66. int src_stride_u,
  67. const uint8_t* src_v,
  68. int src_stride_v,
  69. uint8_t* dst_y,
  70. int dst_stride_y,
  71. uint8_t* dst_u,
  72. int dst_stride_u,
  73. uint8_t* dst_v,
  74. int dst_stride_v,
  75. int width,
  76. int height) {
  77. int halfwidth = (width + 1) >> 1;
  78. int halfheight = (height + 1) >> 1;
  79. if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) {
  80. return -1;
  81. }
  82. // Negative height means invert the image.
  83. if (height < 0) {
  84. height = -height;
  85. halfheight = (height + 1) >> 1;
  86. src_y = src_y + (height - 1) * src_stride_y;
  87. src_u = src_u + (halfheight - 1) * src_stride_u;
  88. src_v = src_v + (halfheight - 1) * src_stride_v;
  89. src_stride_y = -src_stride_y;
  90. src_stride_u = -src_stride_u;
  91. src_stride_v = -src_stride_v;
  92. }
  93. if (dst_y) {
  94. CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
  95. }
  96. // Copy UV planes.
  97. CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, halfheight);
  98. CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, halfheight);
  99. return 0;
  100. }
  101. // Copy I010 with optional flipping.
  102. LIBYUV_API
  103. int I010Copy(const uint16_t* src_y,
  104. int src_stride_y,
  105. const uint16_t* src_u,
  106. int src_stride_u,
  107. const uint16_t* src_v,
  108. int src_stride_v,
  109. uint16_t* dst_y,
  110. int dst_stride_y,
  111. uint16_t* dst_u,
  112. int dst_stride_u,
  113. uint16_t* dst_v,
  114. int dst_stride_v,
  115. int width,
  116. int height) {
  117. int halfwidth = (width + 1) >> 1;
  118. int halfheight = (height + 1) >> 1;
  119. if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) {
  120. return -1;
  121. }
  122. // Negative height means invert the image.
  123. if (height < 0) {
  124. height = -height;
  125. halfheight = (height + 1) >> 1;
  126. src_y = src_y + (height - 1) * src_stride_y;
  127. src_u = src_u + (halfheight - 1) * src_stride_u;
  128. src_v = src_v + (halfheight - 1) * src_stride_v;
  129. src_stride_y = -src_stride_y;
  130. src_stride_u = -src_stride_u;
  131. src_stride_v = -src_stride_v;
  132. }
  133. if (dst_y) {
  134. CopyPlane_16(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
  135. }
  136. // Copy UV planes.
  137. CopyPlane_16(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, halfheight);
  138. CopyPlane_16(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, halfheight);
  139. return 0;
  140. }
  141. // Convert 10 bit YUV to 8 bit.
  142. LIBYUV_API
  143. int I010ToI420(const uint16_t* src_y,
  144. int src_stride_y,
  145. const uint16_t* src_u,
  146. int src_stride_u,
  147. const uint16_t* src_v,
  148. int src_stride_v,
  149. uint8_t* dst_y,
  150. int dst_stride_y,
  151. uint8_t* dst_u,
  152. int dst_stride_u,
  153. uint8_t* dst_v,
  154. int dst_stride_v,
  155. int width,
  156. int height) {
  157. int halfwidth = (width + 1) >> 1;
  158. int halfheight = (height + 1) >> 1;
  159. if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) {
  160. return -1;
  161. }
  162. // Negative height means invert the image.
  163. if (height < 0) {
  164. height = -height;
  165. halfheight = (height + 1) >> 1;
  166. src_y = src_y + (height - 1) * src_stride_y;
  167. src_u = src_u + (halfheight - 1) * src_stride_u;
  168. src_v = src_v + (halfheight - 1) * src_stride_v;
  169. src_stride_y = -src_stride_y;
  170. src_stride_u = -src_stride_u;
  171. src_stride_v = -src_stride_v;
  172. }
  173. // Convert Y plane.
  174. Convert16To8Plane(src_y, src_stride_y, dst_y, dst_stride_y, 16384, width,
  175. height);
  176. // Convert UV planes.
  177. Convert16To8Plane(src_u, src_stride_u, dst_u, dst_stride_u, 16384, halfwidth,
  178. halfheight);
  179. Convert16To8Plane(src_v, src_stride_v, dst_v, dst_stride_v, 16384, halfwidth,
  180. halfheight);
  181. return 0;
  182. }
  183. // 422 chroma is 1/2 width, 1x height
  184. // 420 chroma is 1/2 width, 1/2 height
  185. LIBYUV_API
  186. int I422ToI420(const uint8_t* src_y,
  187. int src_stride_y,
  188. const uint8_t* src_u,
  189. int src_stride_u,
  190. const uint8_t* src_v,
  191. int src_stride_v,
  192. uint8_t* dst_y,
  193. int dst_stride_y,
  194. uint8_t* dst_u,
  195. int dst_stride_u,
  196. uint8_t* dst_v,
  197. int dst_stride_v,
  198. int width,
  199. int height) {
  200. const int src_uv_width = SUBSAMPLE(width, 1, 1);
  201. return I4xxToI420(src_y, src_stride_y, src_u, src_stride_u, src_v,
  202. src_stride_v, dst_y, dst_stride_y, dst_u, dst_stride_u,
  203. dst_v, dst_stride_v, width, height, src_uv_width, height);
  204. }
  205. // TODO(fbarchard): Implement row conversion.
  206. LIBYUV_API
  207. int I422ToNV21(const uint8_t* src_y,
  208. int src_stride_y,
  209. const uint8_t* src_u,
  210. int src_stride_u,
  211. const uint8_t* src_v,
  212. int src_stride_v,
  213. uint8_t* dst_y,
  214. int dst_stride_y,
  215. uint8_t* dst_vu,
  216. int dst_stride_vu,
  217. int width,
  218. int height) {
  219. int halfwidth = (width + 1) >> 1;
  220. int halfheight = (height + 1) >> 1;
  221. // Negative height means invert the image.
  222. if (height < 0) {
  223. height = -height;
  224. halfheight = (height + 1) >> 1;
  225. src_y = src_y + (height - 1) * src_stride_y;
  226. src_u = src_u + (height - 1) * src_stride_u;
  227. src_v = src_v + (height - 1) * src_stride_v;
  228. src_stride_y = -src_stride_y;
  229. src_stride_u = -src_stride_u;
  230. src_stride_v = -src_stride_v;
  231. }
  232. // Allocate u and v buffers
  233. align_buffer_64(plane_u, halfwidth * halfheight * 2);
  234. uint8_t* plane_v = plane_u + halfwidth * halfheight;
  235. I422ToI420(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
  236. dst_y, dst_stride_y, plane_u, halfwidth, plane_v, halfwidth, width,
  237. height);
  238. MergeUVPlane(plane_v, halfwidth, plane_u, halfwidth, dst_vu, dst_stride_vu,
  239. halfwidth, halfheight);
  240. free_aligned_buffer_64(plane_u);
  241. return 0;
  242. }
  243. #ifdef I422TONV21_ROW_VERSION
  244. // Unittest fails for this version.
  245. // 422 chroma is 1/2 width, 1x height
  246. // 420 chroma is 1/2 width, 1/2 height
  247. // Swap src_u and src_v to implement I422ToNV12
  248. LIBYUV_API
  249. int I422ToNV21(const uint8_t* src_y,
  250. int src_stride_y,
  251. const uint8_t* src_u,
  252. int src_stride_u,
  253. const uint8_t* src_v,
  254. int src_stride_v,
  255. uint8_t* dst_y,
  256. int dst_stride_y,
  257. uint8_t* dst_vu,
  258. int dst_stride_vu,
  259. int width,
  260. int height) {
  261. int y;
  262. void (*MergeUVRow)(const uint8_t* src_u, const uint8_t* src_v,
  263. uint8_t* dst_uv, int width) = MergeUVRow_C;
  264. void (*InterpolateRow)(uint8_t * dst_ptr, const uint8_t* src_ptr,
  265. ptrdiff_t src_stride, int dst_width,
  266. int source_y_fraction) = InterpolateRow_C;
  267. int halfwidth = (width + 1) >> 1;
  268. int halfheight = (height + 1) >> 1;
  269. if (!src_u || !src_v || !dst_vu || width <= 0 || height == 0) {
  270. return -1;
  271. }
  272. // Negative height means invert the image.
  273. if (height < 0) {
  274. height = -height;
  275. halfheight = (height + 1) >> 1;
  276. src_y = src_y + (height - 1) * src_stride_y;
  277. src_u = src_u + (halfheight - 1) * src_stride_u;
  278. src_v = src_v + (halfheight - 1) * src_stride_v;
  279. src_stride_y = -src_stride_y;
  280. src_stride_u = -src_stride_u;
  281. src_stride_v = -src_stride_v;
  282. }
  283. #if defined(HAS_MERGEUVROW_SSE2)
  284. if (TestCpuFlag(kCpuHasSSE2)) {
  285. MergeUVRow = MergeUVRow_Any_SSE2;
  286. if (IS_ALIGNED(halfwidth, 16)) {
  287. MergeUVRow = MergeUVRow_SSE2;
  288. }
  289. }
  290. #endif
  291. #if defined(HAS_MERGEUVROW_AVX2)
  292. if (TestCpuFlag(kCpuHasAVX2)) {
  293. MergeUVRow = MergeUVRow_Any_AVX2;
  294. if (IS_ALIGNED(halfwidth, 32)) {
  295. MergeUVRow = MergeUVRow_AVX2;
  296. }
  297. }
  298. #endif
  299. #if defined(HAS_MERGEUVROW_NEON)
  300. if (TestCpuFlag(kCpuHasNEON)) {
  301. MergeUVRow = MergeUVRow_Any_NEON;
  302. if (IS_ALIGNED(halfwidth, 16)) {
  303. MergeUVRow = MergeUVRow_NEON;
  304. }
  305. }
  306. #endif
  307. #if defined(HAS_MERGEUVROW_MSA)
  308. if (TestCpuFlag(kCpuHasMSA)) {
  309. MergeUVRow = MergeUVRow_Any_MSA;
  310. if (IS_ALIGNED(halfwidth, 16)) {
  311. MergeUVRow = MergeUVRow_MSA;
  312. }
  313. }
  314. #endif
  315. #if defined(HAS_MERGEUVROW_MMI)
  316. if (TestCpuFlag(kCpuHasMMI)) {
  317. MergeUVRow = MergeUVRow_Any_MMI;
  318. if (IS_ALIGNED(halfwidth, 8)) {
  319. MergeUVRow = MergeUVRow_MMI;
  320. }
  321. }
  322. #endif
  323. #if defined(HAS_INTERPOLATEROW_SSSE3)
  324. if (TestCpuFlag(kCpuHasSSSE3)) {
  325. InterpolateRow = InterpolateRow_Any_SSSE3;
  326. if (IS_ALIGNED(width, 16)) {
  327. InterpolateRow = InterpolateRow_SSSE3;
  328. }
  329. }
  330. #endif
  331. #if defined(HAS_INTERPOLATEROW_AVX2)
  332. if (TestCpuFlag(kCpuHasAVX2)) {
  333. InterpolateRow = InterpolateRow_Any_AVX2;
  334. if (IS_ALIGNED(width, 32)) {
  335. InterpolateRow = InterpolateRow_AVX2;
  336. }
  337. }
  338. #endif
  339. #if defined(HAS_INTERPOLATEROW_NEON)
  340. if (TestCpuFlag(kCpuHasNEON)) {
  341. InterpolateRow = InterpolateRow_Any_NEON;
  342. if (IS_ALIGNED(width, 16)) {
  343. InterpolateRow = InterpolateRow_NEON;
  344. }
  345. }
  346. #endif
  347. #if defined(HAS_INTERPOLATEROW_MSA)
  348. if (TestCpuFlag(kCpuHasMSA)) {
  349. InterpolateRow = InterpolateRow_Any_MSA;
  350. if (IS_ALIGNED(width, 32)) {
  351. InterpolateRow = InterpolateRow_MSA;
  352. }
  353. }
  354. #endif
  355. #if defined(HAS_INTERPOLATEROW_MMI)
  356. if (TestCpuFlag(kCpuHasMMI)) {
  357. InterpolateRow = InterpolateRow_Any_MMI;
  358. if (IS_ALIGNED(width, 8)) {
  359. InterpolateRow = InterpolateRow_MMI;
  360. }
  361. }
  362. #endif
  363. if (dst_y) {
  364. CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, halfwidth, height);
  365. }
  366. {
  367. // Allocate 2 rows of vu.
  368. int awidth = halfwidth * 2;
  369. align_buffer_64(row_vu_0, awidth * 2);
  370. uint8_t* row_vu_1 = row_vu_0 + awidth;
  371. for (y = 0; y < height - 1; y += 2) {
  372. MergeUVRow(src_v, src_u, row_vu_0, halfwidth);
  373. MergeUVRow(src_v + src_stride_v, src_u + src_stride_u, row_vu_1,
  374. halfwidth);
  375. InterpolateRow(dst_vu, row_vu_0, awidth, awidth, 128);
  376. src_u += src_stride_u * 2;
  377. src_v += src_stride_v * 2;
  378. dst_vu += dst_stride_vu;
  379. }
  380. if (height & 1) {
  381. MergeUVRow(src_v, src_u, dst_vu, halfwidth);
  382. }
  383. free_aligned_buffer_64(row_vu_0);
  384. }
  385. return 0;
  386. }
  387. #endif // I422TONV21_ROW_VERSION
  388. // 444 chroma is 1x width, 1x height
  389. // 420 chroma is 1/2 width, 1/2 height
  390. LIBYUV_API
  391. int I444ToI420(const uint8_t* src_y,
  392. int src_stride_y,
  393. const uint8_t* src_u,
  394. int src_stride_u,
  395. const uint8_t* src_v,
  396. int src_stride_v,
  397. uint8_t* dst_y,
  398. int dst_stride_y,
  399. uint8_t* dst_u,
  400. int dst_stride_u,
  401. uint8_t* dst_v,
  402. int dst_stride_v,
  403. int width,
  404. int height) {
  405. return I4xxToI420(src_y, src_stride_y, src_u, src_stride_u, src_v,
  406. src_stride_v, dst_y, dst_stride_y, dst_u, dst_stride_u,
  407. dst_v, dst_stride_v, width, height, width, height);
  408. }
  409. // TODO(fbarchard): Implement row conversion.
  410. LIBYUV_API
  411. int I444ToNV21(const uint8_t* src_y,
  412. int src_stride_y,
  413. const uint8_t* src_u,
  414. int src_stride_u,
  415. const uint8_t* src_v,
  416. int src_stride_v,
  417. uint8_t* dst_y,
  418. int dst_stride_y,
  419. uint8_t* dst_vu,
  420. int dst_stride_vu,
  421. int width,
  422. int height) {
  423. int halfwidth = (width + 1) >> 1;
  424. int halfheight = (height + 1) >> 1;
  425. // Negative height means invert the image.
  426. if (height < 0) {
  427. height = -height;
  428. halfheight = (height + 1) >> 1;
  429. src_y = src_y + (height - 1) * src_stride_y;
  430. src_u = src_u + (height - 1) * src_stride_u;
  431. src_v = src_v + (height - 1) * src_stride_v;
  432. src_stride_y = -src_stride_y;
  433. src_stride_u = -src_stride_u;
  434. src_stride_v = -src_stride_v;
  435. }
  436. // Allocate u and v buffers
  437. align_buffer_64(plane_u, halfwidth * halfheight * 2);
  438. uint8_t* plane_v = plane_u + halfwidth * halfheight;
  439. I444ToI420(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
  440. dst_y, dst_stride_y, plane_u, halfwidth, plane_v, halfwidth, width,
  441. height);
  442. MergeUVPlane(plane_v, halfwidth, plane_u, halfwidth, dst_vu, dst_stride_vu,
  443. halfwidth, halfheight);
  444. free_aligned_buffer_64(plane_u);
  445. return 0;
  446. }
  447. // I400 is greyscale typically used in MJPG
  448. LIBYUV_API
  449. int I400ToI420(const uint8_t* src_y,
  450. int src_stride_y,
  451. uint8_t* dst_y,
  452. int dst_stride_y,
  453. uint8_t* dst_u,
  454. int dst_stride_u,
  455. uint8_t* dst_v,
  456. int dst_stride_v,
  457. int width,
  458. int height) {
  459. int halfwidth = (width + 1) >> 1;
  460. int halfheight = (height + 1) >> 1;
  461. if (!dst_u || !dst_v || width <= 0 || height == 0) {
  462. return -1;
  463. }
  464. // Negative height means invert the image.
  465. if (height < 0) {
  466. height = -height;
  467. halfheight = (height + 1) >> 1;
  468. src_y = src_y + (height - 1) * src_stride_y;
  469. src_stride_y = -src_stride_y;
  470. }
  471. if (dst_y) {
  472. CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
  473. }
  474. SetPlane(dst_u, dst_stride_u, halfwidth, halfheight, 128);
  475. SetPlane(dst_v, dst_stride_v, halfwidth, halfheight, 128);
  476. return 0;
  477. }
  478. // I400 is greyscale typically used in MJPG
  479. LIBYUV_API
  480. int I400ToNV21(const uint8_t* src_y,
  481. int src_stride_y,
  482. uint8_t* dst_y,
  483. int dst_stride_y,
  484. uint8_t* dst_vu,
  485. int dst_stride_vu,
  486. int width,
  487. int height) {
  488. int halfwidth = (width + 1) >> 1;
  489. int halfheight = (height + 1) >> 1;
  490. if (!dst_vu || width <= 0 || height == 0) {
  491. return -1;
  492. }
  493. // Negative height means invert the image.
  494. if (height < 0) {
  495. height = -height;
  496. halfheight = (height + 1) >> 1;
  497. src_y = src_y + (height - 1) * src_stride_y;
  498. src_stride_y = -src_stride_y;
  499. }
  500. if (dst_y) {
  501. CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
  502. }
  503. SetPlane(dst_vu, dst_stride_vu, halfwidth * 2, halfheight, 128);
  504. return 0;
  505. }
  506. static void CopyPlane2(const uint8_t* src,
  507. int src_stride_0,
  508. int src_stride_1,
  509. uint8_t* dst,
  510. int dst_stride,
  511. int width,
  512. int height) {
  513. int y;
  514. void (*CopyRow)(const uint8_t* src, uint8_t* dst, int width) = CopyRow_C;
  515. #if defined(HAS_COPYROW_SSE2)
  516. if (TestCpuFlag(kCpuHasSSE2)) {
  517. CopyRow = IS_ALIGNED(width, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
  518. }
  519. #endif
  520. #if defined(HAS_COPYROW_AVX)
  521. if (TestCpuFlag(kCpuHasAVX)) {
  522. CopyRow = IS_ALIGNED(width, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
  523. }
  524. #endif
  525. #if defined(HAS_COPYROW_ERMS)
  526. if (TestCpuFlag(kCpuHasERMS)) {
  527. CopyRow = CopyRow_ERMS;
  528. }
  529. #endif
  530. #if defined(HAS_COPYROW_NEON)
  531. if (TestCpuFlag(kCpuHasNEON)) {
  532. CopyRow = IS_ALIGNED(width, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
  533. }
  534. #endif
  535. // Copy plane
  536. for (y = 0; y < height - 1; y += 2) {
  537. CopyRow(src, dst, width);
  538. CopyRow(src + src_stride_0, dst + dst_stride, width);
  539. src += src_stride_0 + src_stride_1;
  540. dst += dst_stride * 2;
  541. }
  542. if (height & 1) {
  543. CopyRow(src, dst, width);
  544. }
  545. }
  546. // Support converting from FOURCC_M420
  547. // Useful for bandwidth constrained transports like USB 1.0 and 2.0 and for
  548. // easy conversion to I420.
  549. // M420 format description:
  550. // M420 is row biplanar 420: 2 rows of Y and 1 row of UV.
  551. // Chroma is half width / half height. (420)
  552. // src_stride_m420 is row planar. Normally this will be the width in pixels.
  553. // The UV plane is half width, but 2 values, so src_stride_m420 applies to
  554. // this as well as the two Y planes.
  555. static int X420ToI420(const uint8_t* src_y,
  556. int src_stride_y0,
  557. int src_stride_y1,
  558. const uint8_t* src_uv,
  559. int src_stride_uv,
  560. uint8_t* dst_y,
  561. int dst_stride_y,
  562. uint8_t* dst_u,
  563. int dst_stride_u,
  564. uint8_t* dst_v,
  565. int dst_stride_v,
  566. int width,
  567. int height) {
  568. int halfwidth = (width + 1) >> 1;
  569. int halfheight = (height + 1) >> 1;
  570. if (!src_uv || !dst_u || !dst_v || width <= 0 || height == 0) {
  571. return -1;
  572. }
  573. // Negative height means invert the image.
  574. if (height < 0) {
  575. height = -height;
  576. halfheight = (height + 1) >> 1;
  577. if (dst_y) {
  578. dst_y = dst_y + (height - 1) * dst_stride_y;
  579. }
  580. dst_u = dst_u + (halfheight - 1) * dst_stride_u;
  581. dst_v = dst_v + (halfheight - 1) * dst_stride_v;
  582. dst_stride_y = -dst_stride_y;
  583. dst_stride_u = -dst_stride_u;
  584. dst_stride_v = -dst_stride_v;
  585. }
  586. // Coalesce rows.
  587. if (src_stride_y0 == width && src_stride_y1 == width &&
  588. dst_stride_y == width) {
  589. width *= height;
  590. height = 1;
  591. src_stride_y0 = src_stride_y1 = dst_stride_y = 0;
  592. }
  593. // Coalesce rows.
  594. if (src_stride_uv == halfwidth * 2 && dst_stride_u == halfwidth &&
  595. dst_stride_v == halfwidth) {
  596. halfwidth *= halfheight;
  597. halfheight = 1;
  598. src_stride_uv = dst_stride_u = dst_stride_v = 0;
  599. }
  600. if (dst_y) {
  601. if (src_stride_y0 == src_stride_y1) {
  602. CopyPlane(src_y, src_stride_y0, dst_y, dst_stride_y, width, height);
  603. } else {
  604. CopyPlane2(src_y, src_stride_y0, src_stride_y1, dst_y, dst_stride_y,
  605. width, height);
  606. }
  607. }
  608. // Split UV plane - NV12 / NV21
  609. SplitUVPlane(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, dst_stride_v,
  610. halfwidth, halfheight);
  611. return 0;
  612. }
  613. // Convert NV12 to I420.
  614. LIBYUV_API
  615. int NV12ToI420(const uint8_t* src_y,
  616. int src_stride_y,
  617. const uint8_t* src_uv,
  618. int src_stride_uv,
  619. uint8_t* dst_y,
  620. int dst_stride_y,
  621. uint8_t* dst_u,
  622. int dst_stride_u,
  623. uint8_t* dst_v,
  624. int dst_stride_v,
  625. int width,
  626. int height) {
  627. return X420ToI420(src_y, src_stride_y, src_stride_y, src_uv, src_stride_uv,
  628. dst_y, dst_stride_y, dst_u, dst_stride_u, dst_v,
  629. dst_stride_v, width, height);
  630. }
  631. // Convert NV21 to I420. Same as NV12 but u and v pointers swapped.
  632. LIBYUV_API
  633. int NV21ToI420(const uint8_t* src_y,
  634. int src_stride_y,
  635. const uint8_t* src_vu,
  636. int src_stride_vu,
  637. uint8_t* dst_y,
  638. int dst_stride_y,
  639. uint8_t* dst_u,
  640. int dst_stride_u,
  641. uint8_t* dst_v,
  642. int dst_stride_v,
  643. int width,
  644. int height) {
  645. return X420ToI420(src_y, src_stride_y, src_stride_y, src_vu, src_stride_vu,
  646. dst_y, dst_stride_y, dst_v, dst_stride_v, dst_u,
  647. dst_stride_u, width, height);
  648. }
  649. // Convert M420 to I420.
  650. LIBYUV_API
  651. int M420ToI420(const uint8_t* src_m420,
  652. int src_stride_m420,
  653. uint8_t* dst_y,
  654. int dst_stride_y,
  655. uint8_t* dst_u,
  656. int dst_stride_u,
  657. uint8_t* dst_v,
  658. int dst_stride_v,
  659. int width,
  660. int height) {
  661. return X420ToI420(src_m420, src_stride_m420, src_stride_m420 * 2,
  662. src_m420 + src_stride_m420 * 2, src_stride_m420 * 3, dst_y,
  663. dst_stride_y, dst_u, dst_stride_u, dst_v, dst_stride_v,
  664. width, height);
  665. }
  666. // Convert YUY2 to I420.
  667. LIBYUV_API
  668. int YUY2ToI420(const uint8_t* src_yuy2,
  669. int src_stride_yuy2,
  670. uint8_t* dst_y,
  671. int dst_stride_y,
  672. uint8_t* dst_u,
  673. int dst_stride_u,
  674. uint8_t* dst_v,
  675. int dst_stride_v,
  676. int width,
  677. int height) {
  678. int y;
  679. void (*YUY2ToUVRow)(const uint8_t* src_yuy2, int src_stride_yuy2,
  680. uint8_t* dst_u, uint8_t* dst_v, int width) =
  681. YUY2ToUVRow_C;
  682. void (*YUY2ToYRow)(const uint8_t* src_yuy2, uint8_t* dst_y, int width) =
  683. YUY2ToYRow_C;
  684. // Negative height means invert the image.
  685. if (height < 0) {
  686. height = -height;
  687. src_yuy2 = src_yuy2 + (height - 1) * src_stride_yuy2;
  688. src_stride_yuy2 = -src_stride_yuy2;
  689. }
  690. #if defined(HAS_YUY2TOYROW_SSE2)
  691. if (TestCpuFlag(kCpuHasSSE2)) {
  692. YUY2ToUVRow = YUY2ToUVRow_Any_SSE2;
  693. YUY2ToYRow = YUY2ToYRow_Any_SSE2;
  694. if (IS_ALIGNED(width, 16)) {
  695. YUY2ToUVRow = YUY2ToUVRow_SSE2;
  696. YUY2ToYRow = YUY2ToYRow_SSE2;
  697. }
  698. }
  699. #endif
  700. #if defined(HAS_YUY2TOYROW_AVX2)
  701. if (TestCpuFlag(kCpuHasAVX2)) {
  702. YUY2ToUVRow = YUY2ToUVRow_Any_AVX2;
  703. YUY2ToYRow = YUY2ToYRow_Any_AVX2;
  704. if (IS_ALIGNED(width, 32)) {
  705. YUY2ToUVRow = YUY2ToUVRow_AVX2;
  706. YUY2ToYRow = YUY2ToYRow_AVX2;
  707. }
  708. }
  709. #endif
  710. #if defined(HAS_YUY2TOYROW_NEON)
  711. if (TestCpuFlag(kCpuHasNEON)) {
  712. YUY2ToYRow = YUY2ToYRow_Any_NEON;
  713. YUY2ToUVRow = YUY2ToUVRow_Any_NEON;
  714. if (IS_ALIGNED(width, 16)) {
  715. YUY2ToYRow = YUY2ToYRow_NEON;
  716. YUY2ToUVRow = YUY2ToUVRow_NEON;
  717. }
  718. }
  719. #endif
  720. #if defined(HAS_YUY2TOYROW_MSA)
  721. if (TestCpuFlag(kCpuHasMSA)) {
  722. YUY2ToYRow = YUY2ToYRow_Any_MSA;
  723. YUY2ToUVRow = YUY2ToUVRow_Any_MSA;
  724. if (IS_ALIGNED(width, 32)) {
  725. YUY2ToYRow = YUY2ToYRow_MSA;
  726. YUY2ToUVRow = YUY2ToUVRow_MSA;
  727. }
  728. }
  729. #endif
  730. #if defined(HAS_YUY2TOYROW_MMI)
  731. if (TestCpuFlag(kCpuHasMMI)) {
  732. YUY2ToYRow = YUY2ToYRow_Any_MMI;
  733. YUY2ToUVRow = YUY2ToUVRow_Any_MMI;
  734. if (IS_ALIGNED(width, 8)) {
  735. YUY2ToYRow = YUY2ToYRow_MMI;
  736. if (IS_ALIGNED(width, 16)) {
  737. YUY2ToUVRow = YUY2ToUVRow_MMI;
  738. }
  739. }
  740. }
  741. #endif
  742. for (y = 0; y < height - 1; y += 2) {
  743. YUY2ToUVRow(src_yuy2, src_stride_yuy2, dst_u, dst_v, width);
  744. YUY2ToYRow(src_yuy2, dst_y, width);
  745. YUY2ToYRow(src_yuy2 + src_stride_yuy2, dst_y + dst_stride_y, width);
  746. src_yuy2 += src_stride_yuy2 * 2;
  747. dst_y += dst_stride_y * 2;
  748. dst_u += dst_stride_u;
  749. dst_v += dst_stride_v;
  750. }
  751. if (height & 1) {
  752. YUY2ToUVRow(src_yuy2, 0, dst_u, dst_v, width);
  753. YUY2ToYRow(src_yuy2, dst_y, width);
  754. }
  755. return 0;
  756. }
  757. // Convert UYVY to I420.
  758. LIBYUV_API
  759. int UYVYToI420(const uint8_t* src_uyvy,
  760. int src_stride_uyvy,
  761. uint8_t* dst_y,
  762. int dst_stride_y,
  763. uint8_t* dst_u,
  764. int dst_stride_u,
  765. uint8_t* dst_v,
  766. int dst_stride_v,
  767. int width,
  768. int height) {
  769. int y;
  770. void (*UYVYToUVRow)(const uint8_t* src_uyvy, int src_stride_uyvy,
  771. uint8_t* dst_u, uint8_t* dst_v, int width) =
  772. UYVYToUVRow_C;
  773. void (*UYVYToYRow)(const uint8_t* src_uyvy, uint8_t* dst_y, int width) =
  774. UYVYToYRow_C;
  775. // Negative height means invert the image.
  776. if (height < 0) {
  777. height = -height;
  778. src_uyvy = src_uyvy + (height - 1) * src_stride_uyvy;
  779. src_stride_uyvy = -src_stride_uyvy;
  780. }
  781. #if defined(HAS_UYVYTOYROW_SSE2)
  782. if (TestCpuFlag(kCpuHasSSE2)) {
  783. UYVYToUVRow = UYVYToUVRow_Any_SSE2;
  784. UYVYToYRow = UYVYToYRow_Any_SSE2;
  785. if (IS_ALIGNED(width, 16)) {
  786. UYVYToUVRow = UYVYToUVRow_SSE2;
  787. UYVYToYRow = UYVYToYRow_SSE2;
  788. }
  789. }
  790. #endif
  791. #if defined(HAS_UYVYTOYROW_AVX2)
  792. if (TestCpuFlag(kCpuHasAVX2)) {
  793. UYVYToUVRow = UYVYToUVRow_Any_AVX2;
  794. UYVYToYRow = UYVYToYRow_Any_AVX2;
  795. if (IS_ALIGNED(width, 32)) {
  796. UYVYToUVRow = UYVYToUVRow_AVX2;
  797. UYVYToYRow = UYVYToYRow_AVX2;
  798. }
  799. }
  800. #endif
  801. #if defined(HAS_UYVYTOYROW_NEON)
  802. if (TestCpuFlag(kCpuHasNEON)) {
  803. UYVYToYRow = UYVYToYRow_Any_NEON;
  804. UYVYToUVRow = UYVYToUVRow_Any_NEON;
  805. if (IS_ALIGNED(width, 16)) {
  806. UYVYToYRow = UYVYToYRow_NEON;
  807. UYVYToUVRow = UYVYToUVRow_NEON;
  808. }
  809. }
  810. #endif
  811. #if defined(HAS_UYVYTOYROW_MSA)
  812. if (TestCpuFlag(kCpuHasMSA)) {
  813. UYVYToYRow = UYVYToYRow_Any_MSA;
  814. UYVYToUVRow = UYVYToUVRow_Any_MSA;
  815. if (IS_ALIGNED(width, 32)) {
  816. UYVYToYRow = UYVYToYRow_MSA;
  817. UYVYToUVRow = UYVYToUVRow_MSA;
  818. }
  819. }
  820. #endif
  821. #if defined(HAS_UYVYTOYROW_MMI)
  822. if (TestCpuFlag(kCpuHasMMI)) {
  823. UYVYToYRow = UYVYToYRow_Any_MMI;
  824. UYVYToUVRow = UYVYToUVRow_Any_MMI;
  825. if (IS_ALIGNED(width, 16)) {
  826. UYVYToYRow = UYVYToYRow_MMI;
  827. UYVYToUVRow = UYVYToUVRow_MMI;
  828. }
  829. }
  830. #endif
  831. for (y = 0; y < height - 1; y += 2) {
  832. UYVYToUVRow(src_uyvy, src_stride_uyvy, dst_u, dst_v, width);
  833. UYVYToYRow(src_uyvy, dst_y, width);
  834. UYVYToYRow(src_uyvy + src_stride_uyvy, dst_y + dst_stride_y, width);
  835. src_uyvy += src_stride_uyvy * 2;
  836. dst_y += dst_stride_y * 2;
  837. dst_u += dst_stride_u;
  838. dst_v += dst_stride_v;
  839. }
  840. if (height & 1) {
  841. UYVYToUVRow(src_uyvy, 0, dst_u, dst_v, width);
  842. UYVYToYRow(src_uyvy, dst_y, width);
  843. }
  844. return 0;
  845. }
  846. // Convert AYUV to NV12.
  847. LIBYUV_API
  848. int AYUVToNV12(const uint8_t* src_ayuv,
  849. int src_stride_ayuv,
  850. uint8_t* dst_y,
  851. int dst_stride_y,
  852. uint8_t* dst_uv,
  853. int dst_stride_uv,
  854. int width,
  855. int height) {
  856. int y;
  857. void (*AYUVToUVRow)(const uint8_t* src_ayuv, int src_stride_ayuv,
  858. uint8_t* dst_uv, int width) = AYUVToUVRow_C;
  859. void (*AYUVToYRow)(const uint8_t* src_ayuv, uint8_t* dst_y, int width) =
  860. AYUVToYRow_C;
  861. // Negative height means invert the image.
  862. if (height < 0) {
  863. height = -height;
  864. src_ayuv = src_ayuv + (height - 1) * src_stride_ayuv;
  865. src_stride_ayuv = -src_stride_ayuv;
  866. }
  867. // place holders for future intel code
  868. #if defined(HAS_AYUVTOYROW_SSE2)
  869. if (TestCpuFlag(kCpuHasSSE2)) {
  870. AYUVToUVRow = AYUVToUVRow_Any_SSE2;
  871. AYUVToYRow = AYUVToYRow_Any_SSE2;
  872. if (IS_ALIGNED(width, 16)) {
  873. AYUVToUVRow = AYUVToUVRow_SSE2;
  874. AYUVToYRow = AYUVToYRow_SSE2;
  875. }
  876. }
  877. #endif
  878. #if defined(HAS_AYUVTOYROW_AVX2)
  879. if (TestCpuFlag(kCpuHasAVX2)) {
  880. AYUVToUVRow = AYUVToUVRow_Any_AVX2;
  881. AYUVToYRow = AYUVToYRow_Any_AVX2;
  882. if (IS_ALIGNED(width, 32)) {
  883. AYUVToUVRow = AYUVToUVRow_AVX2;
  884. AYUVToYRow = AYUVToYRow_AVX2;
  885. }
  886. }
  887. #endif
  888. #if defined(HAS_AYUVTOYROW_NEON)
  889. if (TestCpuFlag(kCpuHasNEON)) {
  890. AYUVToYRow = AYUVToYRow_Any_NEON;
  891. AYUVToUVRow = AYUVToUVRow_Any_NEON;
  892. if (IS_ALIGNED(width, 16)) {
  893. AYUVToYRow = AYUVToYRow_NEON;
  894. AYUVToUVRow = AYUVToUVRow_NEON;
  895. }
  896. }
  897. #endif
  898. for (y = 0; y < height - 1; y += 2) {
  899. AYUVToUVRow(src_ayuv, src_stride_ayuv, dst_uv, width);
  900. AYUVToYRow(src_ayuv, dst_y, width);
  901. AYUVToYRow(src_ayuv + src_stride_ayuv, dst_y + dst_stride_y, width);
  902. src_ayuv += src_stride_ayuv * 2;
  903. dst_y += dst_stride_y * 2;
  904. dst_uv += dst_stride_uv;
  905. }
  906. if (height & 1) {
  907. AYUVToUVRow(src_ayuv, 0, dst_uv, width);
  908. AYUVToYRow(src_ayuv, dst_y, width);
  909. }
  910. return 0;
  911. }
  912. // Convert AYUV to NV21.
  913. LIBYUV_API
  914. int AYUVToNV21(const uint8_t* src_ayuv,
  915. int src_stride_ayuv,
  916. uint8_t* dst_y,
  917. int dst_stride_y,
  918. uint8_t* dst_vu,
  919. int dst_stride_vu,
  920. int width,
  921. int height) {
  922. int y;
  923. void (*AYUVToVURow)(const uint8_t* src_ayuv, int src_stride_ayuv,
  924. uint8_t* dst_vu, int width) = AYUVToVURow_C;
  925. void (*AYUVToYRow)(const uint8_t* src_ayuv, uint8_t* dst_y, int width) =
  926. AYUVToYRow_C;
  927. // Negative height means invert the image.
  928. if (height < 0) {
  929. height = -height;
  930. src_ayuv = src_ayuv + (height - 1) * src_stride_ayuv;
  931. src_stride_ayuv = -src_stride_ayuv;
  932. }
  933. // place holders for future intel code
  934. #if defined(HAS_AYUVTOYROW_SSE2)
  935. if (TestCpuFlag(kCpuHasSSE2)) {
  936. AYUVToVURow = AYUVToVURow_Any_SSE2;
  937. AYUVToYRow = AYUVToYRow_Any_SSE2;
  938. if (IS_ALIGNED(width, 16)) {
  939. AYUVToVURow = AYUVToVURow_SSE2;
  940. AYUVToYRow = AYUVToYRow_SSE2;
  941. }
  942. }
  943. #endif
  944. #if defined(HAS_AYUVTOYROW_AVX2)
  945. if (TestCpuFlag(kCpuHasAVX2)) {
  946. AYUVToVURow = AYUVToVURow_Any_AVX2;
  947. AYUVToYRow = AYUVToYRow_Any_AVX2;
  948. if (IS_ALIGNED(width, 32)) {
  949. AYUVToVURow = AYUVToVURow_AVX2;
  950. AYUVToYRow = AYUVToYRow_AVX2;
  951. }
  952. }
  953. #endif
  954. #if defined(HAS_AYUVTOYROW_NEON)
  955. if (TestCpuFlag(kCpuHasNEON)) {
  956. AYUVToYRow = AYUVToYRow_Any_NEON;
  957. AYUVToVURow = AYUVToVURow_Any_NEON;
  958. if (IS_ALIGNED(width, 16)) {
  959. AYUVToYRow = AYUVToYRow_NEON;
  960. AYUVToVURow = AYUVToVURow_NEON;
  961. }
  962. }
  963. #endif
  964. for (y = 0; y < height - 1; y += 2) {
  965. AYUVToVURow(src_ayuv, src_stride_ayuv, dst_vu, width);
  966. AYUVToYRow(src_ayuv, dst_y, width);
  967. AYUVToYRow(src_ayuv + src_stride_ayuv, dst_y + dst_stride_y, width);
  968. src_ayuv += src_stride_ayuv * 2;
  969. dst_y += dst_stride_y * 2;
  970. dst_vu += dst_stride_vu;
  971. }
  972. if (height & 1) {
  973. AYUVToVURow(src_ayuv, 0, dst_vu, width);
  974. AYUVToYRow(src_ayuv, dst_y, width);
  975. }
  976. return 0;
  977. }
  978. // Convert ARGB to I420.
  979. LIBYUV_API
  980. int ARGBToI420(const uint8_t* src_argb,
  981. int src_stride_argb,
  982. uint8_t* dst_y,
  983. int dst_stride_y,
  984. uint8_t* dst_u,
  985. int dst_stride_u,
  986. uint8_t* dst_v,
  987. int dst_stride_v,
  988. int width,
  989. int height) {
  990. int y;
  991. void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
  992. uint8_t* dst_u, uint8_t* dst_v, int width) =
  993. ARGBToUVRow_C;
  994. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  995. ARGBToYRow_C;
  996. if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  997. return -1;
  998. }
  999. // Negative height means invert the image.
  1000. if (height < 0) {
  1001. height = -height;
  1002. src_argb = src_argb + (height - 1) * src_stride_argb;
  1003. src_stride_argb = -src_stride_argb;
  1004. }
  1005. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  1006. if (TestCpuFlag(kCpuHasSSSE3)) {
  1007. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  1008. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  1009. if (IS_ALIGNED(width, 16)) {
  1010. ARGBToUVRow = ARGBToUVRow_SSSE3;
  1011. ARGBToYRow = ARGBToYRow_SSSE3;
  1012. }
  1013. }
  1014. #endif
  1015. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  1016. if (TestCpuFlag(kCpuHasAVX2)) {
  1017. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  1018. ARGBToYRow = ARGBToYRow_Any_AVX2;
  1019. if (IS_ALIGNED(width, 32)) {
  1020. ARGBToUVRow = ARGBToUVRow_AVX2;
  1021. ARGBToYRow = ARGBToYRow_AVX2;
  1022. }
  1023. }
  1024. #endif
  1025. #if defined(HAS_ARGBTOYROW_NEON)
  1026. if (TestCpuFlag(kCpuHasNEON)) {
  1027. ARGBToYRow = ARGBToYRow_Any_NEON;
  1028. if (IS_ALIGNED(width, 8)) {
  1029. ARGBToYRow = ARGBToYRow_NEON;
  1030. }
  1031. }
  1032. #endif
  1033. #if defined(HAS_ARGBTOUVROW_NEON)
  1034. if (TestCpuFlag(kCpuHasNEON)) {
  1035. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  1036. if (IS_ALIGNED(width, 16)) {
  1037. ARGBToUVRow = ARGBToUVRow_NEON;
  1038. }
  1039. }
  1040. #endif
  1041. #if defined(HAS_ARGBTOYROW_MSA)
  1042. if (TestCpuFlag(kCpuHasMSA)) {
  1043. ARGBToYRow = ARGBToYRow_Any_MSA;
  1044. if (IS_ALIGNED(width, 16)) {
  1045. ARGBToYRow = ARGBToYRow_MSA;
  1046. }
  1047. }
  1048. #endif
  1049. #if defined(HAS_ARGBTOUVROW_MSA)
  1050. if (TestCpuFlag(kCpuHasMSA)) {
  1051. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  1052. if (IS_ALIGNED(width, 32)) {
  1053. ARGBToUVRow = ARGBToUVRow_MSA;
  1054. }
  1055. }
  1056. #endif
  1057. #if defined(HAS_ARGBTOYROW_MMI)
  1058. if (TestCpuFlag(kCpuHasMMI)) {
  1059. ARGBToYRow = ARGBToYRow_Any_MMI;
  1060. if (IS_ALIGNED(width, 8)) {
  1061. ARGBToYRow = ARGBToYRow_MMI;
  1062. }
  1063. }
  1064. #endif
  1065. #if defined(HAS_ARGBTOUVROW_MMI)
  1066. if (TestCpuFlag(kCpuHasMMI)) {
  1067. ARGBToUVRow = ARGBToUVRow_Any_MMI;
  1068. if (IS_ALIGNED(width, 16)) {
  1069. ARGBToUVRow = ARGBToUVRow_MMI;
  1070. }
  1071. }
  1072. #endif
  1073. for (y = 0; y < height - 1; y += 2) {
  1074. ARGBToUVRow(src_argb, src_stride_argb, dst_u, dst_v, width);
  1075. ARGBToYRow(src_argb, dst_y, width);
  1076. ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
  1077. src_argb += src_stride_argb * 2;
  1078. dst_y += dst_stride_y * 2;
  1079. dst_u += dst_stride_u;
  1080. dst_v += dst_stride_v;
  1081. }
  1082. if (height & 1) {
  1083. ARGBToUVRow(src_argb, 0, dst_u, dst_v, width);
  1084. ARGBToYRow(src_argb, dst_y, width);
  1085. }
  1086. return 0;
  1087. }
  1088. // Convert BGRA to I420.
  1089. LIBYUV_API
  1090. int BGRAToI420(const uint8_t* src_bgra,
  1091. int src_stride_bgra,
  1092. uint8_t* dst_y,
  1093. int dst_stride_y,
  1094. uint8_t* dst_u,
  1095. int dst_stride_u,
  1096. uint8_t* dst_v,
  1097. int dst_stride_v,
  1098. int width,
  1099. int height) {
  1100. int y;
  1101. void (*BGRAToUVRow)(const uint8_t* src_bgra0, int src_stride_bgra,
  1102. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1103. BGRAToUVRow_C;
  1104. void (*BGRAToYRow)(const uint8_t* src_bgra, uint8_t* dst_y, int width) =
  1105. BGRAToYRow_C;
  1106. if (!src_bgra || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  1107. return -1;
  1108. }
  1109. // Negative height means invert the image.
  1110. if (height < 0) {
  1111. height = -height;
  1112. src_bgra = src_bgra + (height - 1) * src_stride_bgra;
  1113. src_stride_bgra = -src_stride_bgra;
  1114. }
  1115. #if defined(HAS_BGRATOYROW_SSSE3) && defined(HAS_BGRATOUVROW_SSSE3)
  1116. if (TestCpuFlag(kCpuHasSSSE3)) {
  1117. BGRAToUVRow = BGRAToUVRow_Any_SSSE3;
  1118. BGRAToYRow = BGRAToYRow_Any_SSSE3;
  1119. if (IS_ALIGNED(width, 16)) {
  1120. BGRAToUVRow = BGRAToUVRow_SSSE3;
  1121. BGRAToYRow = BGRAToYRow_SSSE3;
  1122. }
  1123. }
  1124. #endif
  1125. #if defined(HAS_BGRATOYROW_NEON)
  1126. if (TestCpuFlag(kCpuHasNEON)) {
  1127. BGRAToYRow = BGRAToYRow_Any_NEON;
  1128. if (IS_ALIGNED(width, 8)) {
  1129. BGRAToYRow = BGRAToYRow_NEON;
  1130. }
  1131. }
  1132. #endif
  1133. #if defined(HAS_BGRATOUVROW_NEON)
  1134. if (TestCpuFlag(kCpuHasNEON)) {
  1135. BGRAToUVRow = BGRAToUVRow_Any_NEON;
  1136. if (IS_ALIGNED(width, 16)) {
  1137. BGRAToUVRow = BGRAToUVRow_NEON;
  1138. }
  1139. }
  1140. #endif
  1141. #if defined(HAS_BGRATOYROW_MSA)
  1142. if (TestCpuFlag(kCpuHasMSA)) {
  1143. BGRAToYRow = BGRAToYRow_Any_MSA;
  1144. if (IS_ALIGNED(width, 16)) {
  1145. BGRAToYRow = BGRAToYRow_MSA;
  1146. }
  1147. }
  1148. #endif
  1149. #if defined(HAS_BGRATOUVROW_MSA)
  1150. if (TestCpuFlag(kCpuHasMSA)) {
  1151. BGRAToUVRow = BGRAToUVRow_Any_MSA;
  1152. if (IS_ALIGNED(width, 16)) {
  1153. BGRAToUVRow = BGRAToUVRow_MSA;
  1154. }
  1155. }
  1156. #endif
  1157. #if defined(HAS_BGRATOYROW_MMI)
  1158. if (TestCpuFlag(kCpuHasMMI)) {
  1159. BGRAToYRow = BGRAToYRow_Any_MMI;
  1160. if (IS_ALIGNED(width, 8)) {
  1161. BGRAToYRow = BGRAToYRow_MMI;
  1162. }
  1163. }
  1164. #endif
  1165. #if defined(HAS_BGRATOUVROW_MMI)
  1166. if (TestCpuFlag(kCpuHasMMI)) {
  1167. BGRAToUVRow = BGRAToUVRow_Any_MMI;
  1168. if (IS_ALIGNED(width, 16)) {
  1169. BGRAToUVRow = BGRAToUVRow_MMI;
  1170. }
  1171. }
  1172. #endif
  1173. for (y = 0; y < height - 1; y += 2) {
  1174. BGRAToUVRow(src_bgra, src_stride_bgra, dst_u, dst_v, width);
  1175. BGRAToYRow(src_bgra, dst_y, width);
  1176. BGRAToYRow(src_bgra + src_stride_bgra, dst_y + dst_stride_y, width);
  1177. src_bgra += src_stride_bgra * 2;
  1178. dst_y += dst_stride_y * 2;
  1179. dst_u += dst_stride_u;
  1180. dst_v += dst_stride_v;
  1181. }
  1182. if (height & 1) {
  1183. BGRAToUVRow(src_bgra, 0, dst_u, dst_v, width);
  1184. BGRAToYRow(src_bgra, dst_y, width);
  1185. }
  1186. return 0;
  1187. }
  1188. // Convert ABGR to I420.
  1189. LIBYUV_API
  1190. int ABGRToI420(const uint8_t* src_abgr,
  1191. int src_stride_abgr,
  1192. uint8_t* dst_y,
  1193. int dst_stride_y,
  1194. uint8_t* dst_u,
  1195. int dst_stride_u,
  1196. uint8_t* dst_v,
  1197. int dst_stride_v,
  1198. int width,
  1199. int height) {
  1200. int y;
  1201. void (*ABGRToUVRow)(const uint8_t* src_abgr0, int src_stride_abgr,
  1202. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1203. ABGRToUVRow_C;
  1204. void (*ABGRToYRow)(const uint8_t* src_abgr, uint8_t* dst_y, int width) =
  1205. ABGRToYRow_C;
  1206. if (!src_abgr || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  1207. return -1;
  1208. }
  1209. // Negative height means invert the image.
  1210. if (height < 0) {
  1211. height = -height;
  1212. src_abgr = src_abgr + (height - 1) * src_stride_abgr;
  1213. src_stride_abgr = -src_stride_abgr;
  1214. }
  1215. #if defined(HAS_ABGRTOYROW_SSSE3) && defined(HAS_ABGRTOUVROW_SSSE3)
  1216. if (TestCpuFlag(kCpuHasSSSE3)) {
  1217. ABGRToUVRow = ABGRToUVRow_Any_SSSE3;
  1218. ABGRToYRow = ABGRToYRow_Any_SSSE3;
  1219. if (IS_ALIGNED(width, 16)) {
  1220. ABGRToUVRow = ABGRToUVRow_SSSE3;
  1221. ABGRToYRow = ABGRToYRow_SSSE3;
  1222. }
  1223. }
  1224. #endif
  1225. #if defined(HAS_ABGRTOYROW_NEON)
  1226. if (TestCpuFlag(kCpuHasNEON)) {
  1227. ABGRToYRow = ABGRToYRow_Any_NEON;
  1228. if (IS_ALIGNED(width, 8)) {
  1229. ABGRToYRow = ABGRToYRow_NEON;
  1230. }
  1231. }
  1232. #endif
  1233. #if defined(HAS_ABGRTOUVROW_NEON)
  1234. if (TestCpuFlag(kCpuHasNEON)) {
  1235. ABGRToUVRow = ABGRToUVRow_Any_NEON;
  1236. if (IS_ALIGNED(width, 16)) {
  1237. ABGRToUVRow = ABGRToUVRow_NEON;
  1238. }
  1239. }
  1240. #endif
  1241. #if defined(HAS_ABGRTOYROW_MSA)
  1242. if (TestCpuFlag(kCpuHasMSA)) {
  1243. ABGRToYRow = ABGRToYRow_Any_MSA;
  1244. if (IS_ALIGNED(width, 16)) {
  1245. ABGRToYRow = ABGRToYRow_MSA;
  1246. }
  1247. }
  1248. #endif
  1249. #if defined(HAS_ABGRTOUVROW_MSA)
  1250. if (TestCpuFlag(kCpuHasMSA)) {
  1251. ABGRToUVRow = ABGRToUVRow_Any_MSA;
  1252. if (IS_ALIGNED(width, 16)) {
  1253. ABGRToUVRow = ABGRToUVRow_MSA;
  1254. }
  1255. }
  1256. #endif
  1257. #if defined(HAS_ABGRTOYROW_MMI)
  1258. if (TestCpuFlag(kCpuHasMMI)) {
  1259. ABGRToYRow = ABGRToYRow_Any_MMI;
  1260. if (IS_ALIGNED(width, 8)) {
  1261. ABGRToYRow = ABGRToYRow_MMI;
  1262. }
  1263. }
  1264. #endif
  1265. #if defined(HAS_ABGRTOUVROW_MMI)
  1266. if (TestCpuFlag(kCpuHasMMI)) {
  1267. ABGRToUVRow = ABGRToUVRow_Any_MMI;
  1268. if (IS_ALIGNED(width, 16)) {
  1269. ABGRToUVRow = ABGRToUVRow_MMI;
  1270. }
  1271. }
  1272. #endif
  1273. for (y = 0; y < height - 1; y += 2) {
  1274. ABGRToUVRow(src_abgr, src_stride_abgr, dst_u, dst_v, width);
  1275. ABGRToYRow(src_abgr, dst_y, width);
  1276. ABGRToYRow(src_abgr + src_stride_abgr, dst_y + dst_stride_y, width);
  1277. src_abgr += src_stride_abgr * 2;
  1278. dst_y += dst_stride_y * 2;
  1279. dst_u += dst_stride_u;
  1280. dst_v += dst_stride_v;
  1281. }
  1282. if (height & 1) {
  1283. ABGRToUVRow(src_abgr, 0, dst_u, dst_v, width);
  1284. ABGRToYRow(src_abgr, dst_y, width);
  1285. }
  1286. return 0;
  1287. }
  1288. // Convert RGBA to I420.
  1289. LIBYUV_API
  1290. int RGBAToI420(const uint8_t* src_rgba,
  1291. int src_stride_rgba,
  1292. uint8_t* dst_y,
  1293. int dst_stride_y,
  1294. uint8_t* dst_u,
  1295. int dst_stride_u,
  1296. uint8_t* dst_v,
  1297. int dst_stride_v,
  1298. int width,
  1299. int height) {
  1300. int y;
  1301. void (*RGBAToUVRow)(const uint8_t* src_rgba0, int src_stride_rgba,
  1302. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1303. RGBAToUVRow_C;
  1304. void (*RGBAToYRow)(const uint8_t* src_rgba, uint8_t* dst_y, int width) =
  1305. RGBAToYRow_C;
  1306. if (!src_rgba || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  1307. return -1;
  1308. }
  1309. // Negative height means invert the image.
  1310. if (height < 0) {
  1311. height = -height;
  1312. src_rgba = src_rgba + (height - 1) * src_stride_rgba;
  1313. src_stride_rgba = -src_stride_rgba;
  1314. }
  1315. #if defined(HAS_RGBATOYROW_SSSE3) && defined(HAS_RGBATOUVROW_SSSE3)
  1316. if (TestCpuFlag(kCpuHasSSSE3)) {
  1317. RGBAToUVRow = RGBAToUVRow_Any_SSSE3;
  1318. RGBAToYRow = RGBAToYRow_Any_SSSE3;
  1319. if (IS_ALIGNED(width, 16)) {
  1320. RGBAToUVRow = RGBAToUVRow_SSSE3;
  1321. RGBAToYRow = RGBAToYRow_SSSE3;
  1322. }
  1323. }
  1324. #endif
  1325. #if defined(HAS_RGBATOYROW_NEON)
  1326. if (TestCpuFlag(kCpuHasNEON)) {
  1327. RGBAToYRow = RGBAToYRow_Any_NEON;
  1328. if (IS_ALIGNED(width, 8)) {
  1329. RGBAToYRow = RGBAToYRow_NEON;
  1330. }
  1331. }
  1332. #endif
  1333. #if defined(HAS_RGBATOUVROW_NEON)
  1334. if (TestCpuFlag(kCpuHasNEON)) {
  1335. RGBAToUVRow = RGBAToUVRow_Any_NEON;
  1336. if (IS_ALIGNED(width, 16)) {
  1337. RGBAToUVRow = RGBAToUVRow_NEON;
  1338. }
  1339. }
  1340. #endif
  1341. #if defined(HAS_RGBATOYROW_MSA)
  1342. if (TestCpuFlag(kCpuHasMSA)) {
  1343. RGBAToYRow = RGBAToYRow_Any_MSA;
  1344. if (IS_ALIGNED(width, 16)) {
  1345. RGBAToYRow = RGBAToYRow_MSA;
  1346. }
  1347. }
  1348. #endif
  1349. #if defined(HAS_RGBATOUVROW_MSA)
  1350. if (TestCpuFlag(kCpuHasMSA)) {
  1351. RGBAToUVRow = RGBAToUVRow_Any_MSA;
  1352. if (IS_ALIGNED(width, 16)) {
  1353. RGBAToUVRow = RGBAToUVRow_MSA;
  1354. }
  1355. }
  1356. #endif
  1357. #if defined(HAS_RGBATOYROW_MMI)
  1358. if (TestCpuFlag(kCpuHasMMI)) {
  1359. RGBAToYRow = RGBAToYRow_Any_MMI;
  1360. if (IS_ALIGNED(width, 8)) {
  1361. RGBAToYRow = RGBAToYRow_MMI;
  1362. }
  1363. }
  1364. #endif
  1365. #if defined(HAS_RGBATOUVROW_MMI)
  1366. if (TestCpuFlag(kCpuHasMMI)) {
  1367. RGBAToUVRow = RGBAToUVRow_Any_MMI;
  1368. if (IS_ALIGNED(width, 16)) {
  1369. RGBAToUVRow = RGBAToUVRow_MMI;
  1370. }
  1371. }
  1372. #endif
  1373. for (y = 0; y < height - 1; y += 2) {
  1374. RGBAToUVRow(src_rgba, src_stride_rgba, dst_u, dst_v, width);
  1375. RGBAToYRow(src_rgba, dst_y, width);
  1376. RGBAToYRow(src_rgba + src_stride_rgba, dst_y + dst_stride_y, width);
  1377. src_rgba += src_stride_rgba * 2;
  1378. dst_y += dst_stride_y * 2;
  1379. dst_u += dst_stride_u;
  1380. dst_v += dst_stride_v;
  1381. }
  1382. if (height & 1) {
  1383. RGBAToUVRow(src_rgba, 0, dst_u, dst_v, width);
  1384. RGBAToYRow(src_rgba, dst_y, width);
  1385. }
  1386. return 0;
  1387. }
  1388. // Convert RGB24 to I420.
  1389. LIBYUV_API
  1390. int RGB24ToI420(const uint8_t* src_rgb24,
  1391. int src_stride_rgb24,
  1392. uint8_t* dst_y,
  1393. int dst_stride_y,
  1394. uint8_t* dst_u,
  1395. int dst_stride_u,
  1396. uint8_t* dst_v,
  1397. int dst_stride_v,
  1398. int width,
  1399. int height) {
  1400. int y;
  1401. #if (defined(HAS_RGB24TOYROW_NEON) || defined(HAS_RGB24TOYROW_MSA) || \
  1402. defined(HAS_RGB24TOYROW_MMI))
  1403. void (*RGB24ToUVRow)(const uint8_t* src_rgb24, int src_stride_rgb24,
  1404. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1405. RGB24ToUVRow_C;
  1406. void (*RGB24ToYRow)(const uint8_t* src_rgb24, uint8_t* dst_y, int width) =
  1407. RGB24ToYRow_C;
  1408. #else
  1409. void (*RGB24ToARGBRow)(const uint8_t* src_rgb, uint8_t* dst_argb, int width) =
  1410. RGB24ToARGBRow_C;
  1411. void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
  1412. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1413. ARGBToUVRow_C;
  1414. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  1415. ARGBToYRow_C;
  1416. #endif
  1417. if (!src_rgb24 || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  1418. return -1;
  1419. }
  1420. // Negative height means invert the image.
  1421. if (height < 0) {
  1422. height = -height;
  1423. src_rgb24 = src_rgb24 + (height - 1) * src_stride_rgb24;
  1424. src_stride_rgb24 = -src_stride_rgb24;
  1425. }
  1426. // Neon version does direct RGB24 to YUV.
  1427. #if defined(HAS_RGB24TOYROW_NEON)
  1428. if (TestCpuFlag(kCpuHasNEON)) {
  1429. RGB24ToUVRow = RGB24ToUVRow_Any_NEON;
  1430. RGB24ToYRow = RGB24ToYRow_Any_NEON;
  1431. if (IS_ALIGNED(width, 8)) {
  1432. RGB24ToYRow = RGB24ToYRow_NEON;
  1433. if (IS_ALIGNED(width, 16)) {
  1434. RGB24ToUVRow = RGB24ToUVRow_NEON;
  1435. }
  1436. }
  1437. }
  1438. #elif defined(HAS_RGB24TOYROW_MSA)
  1439. if (TestCpuFlag(kCpuHasMSA)) {
  1440. RGB24ToUVRow = RGB24ToUVRow_Any_MSA;
  1441. RGB24ToYRow = RGB24ToYRow_Any_MSA;
  1442. if (IS_ALIGNED(width, 16)) {
  1443. RGB24ToYRow = RGB24ToYRow_MSA;
  1444. RGB24ToUVRow = RGB24ToUVRow_MSA;
  1445. }
  1446. }
  1447. #elif defined(HAS_RGB24TOYROW_MMI)
  1448. if (TestCpuFlag(kCpuHasMMI)) {
  1449. RGB24ToUVRow = RGB24ToUVRow_Any_MMI;
  1450. RGB24ToYRow = RGB24ToYRow_Any_MMI;
  1451. if (IS_ALIGNED(width, 8)) {
  1452. RGB24ToYRow = RGB24ToYRow_MMI;
  1453. if (IS_ALIGNED(width, 16)) {
  1454. RGB24ToUVRow = RGB24ToUVRow_MMI;
  1455. }
  1456. }
  1457. }
  1458. // Other platforms do intermediate conversion from RGB24 to ARGB.
  1459. #else
  1460. #if defined(HAS_RGB24TOARGBROW_SSSE3)
  1461. if (TestCpuFlag(kCpuHasSSSE3)) {
  1462. RGB24ToARGBRow = RGB24ToARGBRow_Any_SSSE3;
  1463. if (IS_ALIGNED(width, 16)) {
  1464. RGB24ToARGBRow = RGB24ToARGBRow_SSSE3;
  1465. }
  1466. }
  1467. #endif
  1468. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  1469. if (TestCpuFlag(kCpuHasSSSE3)) {
  1470. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  1471. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  1472. if (IS_ALIGNED(width, 16)) {
  1473. ARGBToUVRow = ARGBToUVRow_SSSE3;
  1474. ARGBToYRow = ARGBToYRow_SSSE3;
  1475. }
  1476. }
  1477. #endif
  1478. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  1479. if (TestCpuFlag(kCpuHasAVX2)) {
  1480. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  1481. ARGBToYRow = ARGBToYRow_Any_AVX2;
  1482. if (IS_ALIGNED(width, 32)) {
  1483. ARGBToUVRow = ARGBToUVRow_AVX2;
  1484. ARGBToYRow = ARGBToYRow_AVX2;
  1485. }
  1486. }
  1487. #endif
  1488. #endif
  1489. {
  1490. #if !(defined(HAS_RGB24TOYROW_NEON) || defined(HAS_RGB24TOYROW_MSA) || \
  1491. defined(HAS_RGB24TOYROW_MMI))
  1492. // Allocate 2 rows of ARGB.
  1493. const int kRowSize = (width * 4 + 31) & ~31;
  1494. align_buffer_64(row, kRowSize * 2);
  1495. #endif
  1496. for (y = 0; y < height - 1; y += 2) {
  1497. #if (defined(HAS_RGB24TOYROW_NEON) || defined(HAS_RGB24TOYROW_MSA) || \
  1498. defined(HAS_RGB24TOYROW_MMI))
  1499. RGB24ToUVRow(src_rgb24, src_stride_rgb24, dst_u, dst_v, width);
  1500. RGB24ToYRow(src_rgb24, dst_y, width);
  1501. RGB24ToYRow(src_rgb24 + src_stride_rgb24, dst_y + dst_stride_y, width);
  1502. #else
  1503. RGB24ToARGBRow(src_rgb24, row, width);
  1504. RGB24ToARGBRow(src_rgb24 + src_stride_rgb24, row + kRowSize, width);
  1505. ARGBToUVRow(row, kRowSize, dst_u, dst_v, width);
  1506. ARGBToYRow(row, dst_y, width);
  1507. ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width);
  1508. #endif
  1509. src_rgb24 += src_stride_rgb24 * 2;
  1510. dst_y += dst_stride_y * 2;
  1511. dst_u += dst_stride_u;
  1512. dst_v += dst_stride_v;
  1513. }
  1514. if (height & 1) {
  1515. #if (defined(HAS_RGB24TOYROW_NEON) || defined(HAS_RGB24TOYROW_MSA) || \
  1516. defined(HAS_RGB24TOYROW_MMI))
  1517. RGB24ToUVRow(src_rgb24, 0, dst_u, dst_v, width);
  1518. RGB24ToYRow(src_rgb24, dst_y, width);
  1519. #else
  1520. RGB24ToARGBRow(src_rgb24, row, width);
  1521. ARGBToUVRow(row, 0, dst_u, dst_v, width);
  1522. ARGBToYRow(row, dst_y, width);
  1523. #endif
  1524. }
  1525. #if !(defined(HAS_RGB24TOYROW_NEON) || defined(HAS_RGB24TOYROW_MSA) || \
  1526. defined(HAS_RGB24TOYROW_MMI))
  1527. free_aligned_buffer_64(row);
  1528. #endif
  1529. }
  1530. return 0;
  1531. }
  1532. // TODO(fbarchard): Use Matrix version to implement I420 and J420.
  1533. // Convert RGB24 to J420.
  1534. LIBYUV_API
  1535. int RGB24ToJ420(const uint8_t* src_rgb24,
  1536. int src_stride_rgb24,
  1537. uint8_t* dst_y,
  1538. int dst_stride_y,
  1539. uint8_t* dst_u,
  1540. int dst_stride_u,
  1541. uint8_t* dst_v,
  1542. int dst_stride_v,
  1543. int width,
  1544. int height) {
  1545. int y;
  1546. #if (defined(HAS_RGB24TOYJROW_NEON) || defined(HAS_RGB24TOYJROW_MSA) || \
  1547. defined(HAS_RGB24TOYJROW_MMI))
  1548. void (*RGB24ToUVJRow)(const uint8_t* src_rgb24, int src_stride_rgb24,
  1549. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1550. RGB24ToUVJRow_C;
  1551. void (*RGB24ToYJRow)(const uint8_t* src_rgb24, uint8_t* dst_y, int width) =
  1552. RGB24ToYJRow_C;
  1553. #else
  1554. void (*RGB24ToARGBRow)(const uint8_t* src_rgb, uint8_t* dst_argb, int width) =
  1555. RGB24ToARGBRow_C;
  1556. void (*ARGBToUVJRow)(const uint8_t* src_argb0, int src_stride_argb,
  1557. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1558. ARGBToUVJRow_C;
  1559. void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  1560. ARGBToYJRow_C;
  1561. #endif
  1562. if (!src_rgb24 || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  1563. return -1;
  1564. }
  1565. // Negative height means invert the image.
  1566. if (height < 0) {
  1567. height = -height;
  1568. src_rgb24 = src_rgb24 + (height - 1) * src_stride_rgb24;
  1569. src_stride_rgb24 = -src_stride_rgb24;
  1570. }
  1571. // Neon version does direct RGB24 to YUV.
  1572. #if defined(HAS_RGB24TOYJROW_NEON)
  1573. if (TestCpuFlag(kCpuHasNEON)) {
  1574. RGB24ToUVJRow = RGB24ToUVJRow_Any_NEON;
  1575. RGB24ToYJRow = RGB24ToYJRow_Any_NEON;
  1576. if (IS_ALIGNED(width, 8)) {
  1577. RGB24ToYJRow = RGB24ToYJRow_NEON;
  1578. if (IS_ALIGNED(width, 16)) {
  1579. RGB24ToUVJRow = RGB24ToUVJRow_NEON;
  1580. }
  1581. }
  1582. }
  1583. #elif defined(HAS_RGB24TOYJROW_MSA)
  1584. if (TestCpuFlag(kCpuHasMSA)) {
  1585. RGB24ToUVJRow = RGB24ToUVJRow_Any_MSA;
  1586. RGB24ToYJRow = RGB24ToYJRow_Any_MSA;
  1587. if (IS_ALIGNED(width, 16)) {
  1588. RGB24ToYJRow = RGB24ToYJRow_MSA;
  1589. RGB24ToUVJRow = RGB24ToUVJRow_MSA;
  1590. }
  1591. }
  1592. #elif defined(HAS_RGB24TOYJROW_MMI)
  1593. if (TestCpuFlag(kCpuHasMMI)) {
  1594. RGB24ToUVJRow = RGB24ToUVJRow_Any_MMI;
  1595. RGB24ToYJRow = RGB24ToYJRow_Any_MMI;
  1596. if (IS_ALIGNED(width, 8)) {
  1597. RGB24ToYJRow = RGB24ToYJRow_MMI;
  1598. if (IS_ALIGNED(width, 16)) {
  1599. RGB24ToUVJRow = RGB24ToUVJRow_MMI;
  1600. }
  1601. }
  1602. }
  1603. // Other platforms do intermediate conversion from RGB24 to ARGB.
  1604. #else
  1605. #if defined(HAS_RGB24TOARGBROW_SSSE3)
  1606. if (TestCpuFlag(kCpuHasSSSE3)) {
  1607. RGB24ToARGBRow = RGB24ToARGBRow_Any_SSSE3;
  1608. if (IS_ALIGNED(width, 16)) {
  1609. RGB24ToARGBRow = RGB24ToARGBRow_SSSE3;
  1610. }
  1611. }
  1612. #endif
  1613. #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
  1614. if (TestCpuFlag(kCpuHasSSSE3)) {
  1615. ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
  1616. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1617. if (IS_ALIGNED(width, 16)) {
  1618. ARGBToUVJRow = ARGBToUVJRow_SSSE3;
  1619. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1620. }
  1621. }
  1622. #endif
  1623. #if defined(HAS_ARGBTOYJROW_AVX2) && defined(HAS_ARGBTOUVJROW_AVX2)
  1624. if (TestCpuFlag(kCpuHasAVX2)) {
  1625. ARGBToUVJRow = ARGBToUVJRow_Any_AVX2;
  1626. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1627. if (IS_ALIGNED(width, 32)) {
  1628. ARGBToUVJRow = ARGBToUVJRow_AVX2;
  1629. ARGBToYJRow = ARGBToYJRow_AVX2;
  1630. }
  1631. }
  1632. #endif
  1633. #endif
  1634. {
  1635. #if !(defined(HAS_RGB24TOYJROW_NEON) || defined(HAS_RGB24TOYJROW_MSA) || \
  1636. defined(HAS_RGB24TOYJROW_MMI))
  1637. // Allocate 2 rows of ARGB.
  1638. const int kRowSize = (width * 4 + 31) & ~31;
  1639. align_buffer_64(row, kRowSize * 2);
  1640. #endif
  1641. for (y = 0; y < height - 1; y += 2) {
  1642. #if (defined(HAS_RGB24TOYJROW_NEON) || defined(HAS_RGB24TOYJROW_MSA) || \
  1643. defined(HAS_RGB24TOYJROW_MMI))
  1644. RGB24ToUVJRow(src_rgb24, src_stride_rgb24, dst_u, dst_v, width);
  1645. RGB24ToYJRow(src_rgb24, dst_y, width);
  1646. RGB24ToYJRow(src_rgb24 + src_stride_rgb24, dst_y + dst_stride_y, width);
  1647. #else
  1648. RGB24ToARGBRow(src_rgb24, row, width);
  1649. RGB24ToARGBRow(src_rgb24 + src_stride_rgb24, row + kRowSize, width);
  1650. ARGBToUVJRow(row, kRowSize, dst_u, dst_v, width);
  1651. ARGBToYJRow(row, dst_y, width);
  1652. ARGBToYJRow(row + kRowSize, dst_y + dst_stride_y, width);
  1653. #endif
  1654. src_rgb24 += src_stride_rgb24 * 2;
  1655. dst_y += dst_stride_y * 2;
  1656. dst_u += dst_stride_u;
  1657. dst_v += dst_stride_v;
  1658. }
  1659. if (height & 1) {
  1660. #if (defined(HAS_RGB24TOYJROW_NEON) || defined(HAS_RGB24TOYJROW_MSA) || \
  1661. defined(HAS_RGB24TOYJROW_MMI))
  1662. RGB24ToUVJRow(src_rgb24, 0, dst_u, dst_v, width);
  1663. RGB24ToYJRow(src_rgb24, dst_y, width);
  1664. #else
  1665. RGB24ToARGBRow(src_rgb24, row, width);
  1666. ARGBToUVJRow(row, 0, dst_u, dst_v, width);
  1667. ARGBToYJRow(row, dst_y, width);
  1668. #endif
  1669. }
  1670. #if !(defined(HAS_RGB24TOYJROW_NEON) || defined(HAS_RGB24TOYJROW_MSA) || \
  1671. defined(HAS_RGB24TOYJROW_MMI))
  1672. free_aligned_buffer_64(row);
  1673. #endif
  1674. }
  1675. return 0;
  1676. }
  1677. // Convert RAW to I420.
  1678. LIBYUV_API
  1679. int RAWToI420(const uint8_t* src_raw,
  1680. int src_stride_raw,
  1681. uint8_t* dst_y,
  1682. int dst_stride_y,
  1683. uint8_t* dst_u,
  1684. int dst_stride_u,
  1685. uint8_t* dst_v,
  1686. int dst_stride_v,
  1687. int width,
  1688. int height) {
  1689. int y;
  1690. #if (defined(HAS_RAWTOYROW_NEON) || defined(HAS_RAWTOYROW_MSA) || \
  1691. defined(HAS_RAWTOYROW_MMI))
  1692. void (*RAWToUVRow)(const uint8_t* src_raw, int src_stride_raw, uint8_t* dst_u,
  1693. uint8_t* dst_v, int width) = RAWToUVRow_C;
  1694. void (*RAWToYRow)(const uint8_t* src_raw, uint8_t* dst_y, int width) =
  1695. RAWToYRow_C;
  1696. #else
  1697. void (*RAWToARGBRow)(const uint8_t* src_rgb, uint8_t* dst_argb, int width) =
  1698. RAWToARGBRow_C;
  1699. void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
  1700. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1701. ARGBToUVRow_C;
  1702. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  1703. ARGBToYRow_C;
  1704. #endif
  1705. if (!src_raw || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  1706. return -1;
  1707. }
  1708. // Negative height means invert the image.
  1709. if (height < 0) {
  1710. height = -height;
  1711. src_raw = src_raw + (height - 1) * src_stride_raw;
  1712. src_stride_raw = -src_stride_raw;
  1713. }
  1714. // Neon version does direct RAW to YUV.
  1715. #if defined(HAS_RAWTOYROW_NEON)
  1716. if (TestCpuFlag(kCpuHasNEON)) {
  1717. RAWToUVRow = RAWToUVRow_Any_NEON;
  1718. RAWToYRow = RAWToYRow_Any_NEON;
  1719. if (IS_ALIGNED(width, 8)) {
  1720. RAWToYRow = RAWToYRow_NEON;
  1721. if (IS_ALIGNED(width, 16)) {
  1722. RAWToUVRow = RAWToUVRow_NEON;
  1723. }
  1724. }
  1725. }
  1726. #elif defined(HAS_RAWTOYROW_MSA)
  1727. if (TestCpuFlag(kCpuHasMSA)) {
  1728. RAWToUVRow = RAWToUVRow_Any_MSA;
  1729. RAWToYRow = RAWToYRow_Any_MSA;
  1730. if (IS_ALIGNED(width, 16)) {
  1731. RAWToYRow = RAWToYRow_MSA;
  1732. RAWToUVRow = RAWToUVRow_MSA;
  1733. }
  1734. }
  1735. #elif defined(HAS_RAWTOYROW_MMI)
  1736. if (TestCpuFlag(kCpuHasMMI)) {
  1737. RAWToUVRow = RAWToUVRow_Any_MMI;
  1738. RAWToYRow = RAWToYRow_Any_MMI;
  1739. if (IS_ALIGNED(width, 8)) {
  1740. RAWToYRow = RAWToYRow_MMI;
  1741. if (IS_ALIGNED(width, 16)) {
  1742. RAWToUVRow = RAWToUVRow_MMI;
  1743. }
  1744. }
  1745. }
  1746. // Other platforms do intermediate conversion from RAW to ARGB.
  1747. #else
  1748. #if defined(HAS_RAWTOARGBROW_SSSE3)
  1749. if (TestCpuFlag(kCpuHasSSSE3)) {
  1750. RAWToARGBRow = RAWToARGBRow_Any_SSSE3;
  1751. if (IS_ALIGNED(width, 16)) {
  1752. RAWToARGBRow = RAWToARGBRow_SSSE3;
  1753. }
  1754. }
  1755. #endif
  1756. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  1757. if (TestCpuFlag(kCpuHasSSSE3)) {
  1758. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  1759. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  1760. if (IS_ALIGNED(width, 16)) {
  1761. ARGBToUVRow = ARGBToUVRow_SSSE3;
  1762. ARGBToYRow = ARGBToYRow_SSSE3;
  1763. }
  1764. }
  1765. #endif
  1766. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  1767. if (TestCpuFlag(kCpuHasAVX2)) {
  1768. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  1769. ARGBToYRow = ARGBToYRow_Any_AVX2;
  1770. if (IS_ALIGNED(width, 32)) {
  1771. ARGBToUVRow = ARGBToUVRow_AVX2;
  1772. ARGBToYRow = ARGBToYRow_AVX2;
  1773. }
  1774. }
  1775. #endif
  1776. #endif
  1777. {
  1778. #if !(defined(HAS_RAWTOYROW_NEON) || defined(HAS_RAWTOYROW_MSA) || \
  1779. defined(HAS_RAWTOYROW_MMI))
  1780. // Allocate 2 rows of ARGB.
  1781. const int kRowSize = (width * 4 + 31) & ~31;
  1782. align_buffer_64(row, kRowSize * 2);
  1783. #endif
  1784. for (y = 0; y < height - 1; y += 2) {
  1785. #if (defined(HAS_RAWTOYROW_NEON) || defined(HAS_RAWTOYROW_MSA) || \
  1786. defined(HAS_RAWTOYROW_MMI))
  1787. RAWToUVRow(src_raw, src_stride_raw, dst_u, dst_v, width);
  1788. RAWToYRow(src_raw, dst_y, width);
  1789. RAWToYRow(src_raw + src_stride_raw, dst_y + dst_stride_y, width);
  1790. #else
  1791. RAWToARGBRow(src_raw, row, width);
  1792. RAWToARGBRow(src_raw + src_stride_raw, row + kRowSize, width);
  1793. ARGBToUVRow(row, kRowSize, dst_u, dst_v, width);
  1794. ARGBToYRow(row, dst_y, width);
  1795. ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width);
  1796. #endif
  1797. src_raw += src_stride_raw * 2;
  1798. dst_y += dst_stride_y * 2;
  1799. dst_u += dst_stride_u;
  1800. dst_v += dst_stride_v;
  1801. }
  1802. if (height & 1) {
  1803. #if (defined(HAS_RAWTOYROW_NEON) || defined(HAS_RAWTOYROW_MSA) || \
  1804. defined(HAS_RAWTOYROW_MMI))
  1805. RAWToUVRow(src_raw, 0, dst_u, dst_v, width);
  1806. RAWToYRow(src_raw, dst_y, width);
  1807. #else
  1808. RAWToARGBRow(src_raw, row, width);
  1809. ARGBToUVRow(row, 0, dst_u, dst_v, width);
  1810. ARGBToYRow(row, dst_y, width);
  1811. #endif
  1812. }
  1813. #if !(defined(HAS_RAWTOYROW_NEON) || defined(HAS_RAWTOYROW_MSA) || \
  1814. defined(HAS_RAWTOYROW_MMI))
  1815. free_aligned_buffer_64(row);
  1816. #endif
  1817. }
  1818. return 0;
  1819. }
  1820. // Convert RGB565 to I420.
  1821. LIBYUV_API
  1822. int RGB565ToI420(const uint8_t* src_rgb565,
  1823. int src_stride_rgb565,
  1824. uint8_t* dst_y,
  1825. int dst_stride_y,
  1826. uint8_t* dst_u,
  1827. int dst_stride_u,
  1828. uint8_t* dst_v,
  1829. int dst_stride_v,
  1830. int width,
  1831. int height) {
  1832. int y;
  1833. #if (defined(HAS_RGB565TOYROW_NEON) || defined(HAS_RGB565TOYROW_MSA) || \
  1834. defined(HAS_RGB565TOYROW_MMI))
  1835. void (*RGB565ToUVRow)(const uint8_t* src_rgb565, int src_stride_rgb565,
  1836. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1837. RGB565ToUVRow_C;
  1838. void (*RGB565ToYRow)(const uint8_t* src_rgb565, uint8_t* dst_y, int width) =
  1839. RGB565ToYRow_C;
  1840. #else
  1841. void (*RGB565ToARGBRow)(const uint8_t* src_rgb, uint8_t* dst_argb,
  1842. int width) = RGB565ToARGBRow_C;
  1843. void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
  1844. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1845. ARGBToUVRow_C;
  1846. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  1847. ARGBToYRow_C;
  1848. #endif
  1849. if (!src_rgb565 || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  1850. return -1;
  1851. }
  1852. // Negative height means invert the image.
  1853. if (height < 0) {
  1854. height = -height;
  1855. src_rgb565 = src_rgb565 + (height - 1) * src_stride_rgb565;
  1856. src_stride_rgb565 = -src_stride_rgb565;
  1857. }
  1858. // Neon version does direct RGB565 to YUV.
  1859. #if defined(HAS_RGB565TOYROW_NEON)
  1860. if (TestCpuFlag(kCpuHasNEON)) {
  1861. RGB565ToUVRow = RGB565ToUVRow_Any_NEON;
  1862. RGB565ToYRow = RGB565ToYRow_Any_NEON;
  1863. if (IS_ALIGNED(width, 8)) {
  1864. RGB565ToYRow = RGB565ToYRow_NEON;
  1865. if (IS_ALIGNED(width, 16)) {
  1866. RGB565ToUVRow = RGB565ToUVRow_NEON;
  1867. }
  1868. }
  1869. }
  1870. #elif defined(HAS_RGB565TOYROW_MSA)
  1871. if (TestCpuFlag(kCpuHasMSA)) {
  1872. RGB565ToUVRow = RGB565ToUVRow_Any_MSA;
  1873. RGB565ToYRow = RGB565ToYRow_Any_MSA;
  1874. if (IS_ALIGNED(width, 16)) {
  1875. RGB565ToYRow = RGB565ToYRow_MSA;
  1876. RGB565ToUVRow = RGB565ToUVRow_MSA;
  1877. }
  1878. }
  1879. #elif defined(HAS_RGB565TOYROW_MMI)
  1880. if (TestCpuFlag(kCpuHasMMI)) {
  1881. RGB565ToUVRow = RGB565ToUVRow_Any_MMI;
  1882. RGB565ToYRow = RGB565ToYRow_Any_MMI;
  1883. if (IS_ALIGNED(width, 8)) {
  1884. RGB565ToYRow = RGB565ToYRow_MMI;
  1885. if (IS_ALIGNED(width, 16)) {
  1886. RGB565ToUVRow = RGB565ToUVRow_MMI;
  1887. }
  1888. }
  1889. }
  1890. // Other platforms do intermediate conversion from RGB565 to ARGB.
  1891. #else
  1892. #if defined(HAS_RGB565TOARGBROW_SSE2)
  1893. if (TestCpuFlag(kCpuHasSSE2)) {
  1894. RGB565ToARGBRow = RGB565ToARGBRow_Any_SSE2;
  1895. if (IS_ALIGNED(width, 8)) {
  1896. RGB565ToARGBRow = RGB565ToARGBRow_SSE2;
  1897. }
  1898. }
  1899. #endif
  1900. #if defined(HAS_RGB565TOARGBROW_AVX2)
  1901. if (TestCpuFlag(kCpuHasAVX2)) {
  1902. RGB565ToARGBRow = RGB565ToARGBRow_Any_AVX2;
  1903. if (IS_ALIGNED(width, 16)) {
  1904. RGB565ToARGBRow = RGB565ToARGBRow_AVX2;
  1905. }
  1906. }
  1907. #endif
  1908. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  1909. if (TestCpuFlag(kCpuHasSSSE3)) {
  1910. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  1911. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  1912. if (IS_ALIGNED(width, 16)) {
  1913. ARGBToUVRow = ARGBToUVRow_SSSE3;
  1914. ARGBToYRow = ARGBToYRow_SSSE3;
  1915. }
  1916. }
  1917. #endif
  1918. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  1919. if (TestCpuFlag(kCpuHasAVX2)) {
  1920. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  1921. ARGBToYRow = ARGBToYRow_Any_AVX2;
  1922. if (IS_ALIGNED(width, 32)) {
  1923. ARGBToUVRow = ARGBToUVRow_AVX2;
  1924. ARGBToYRow = ARGBToYRow_AVX2;
  1925. }
  1926. }
  1927. #endif
  1928. #endif
  1929. {
  1930. #if !(defined(HAS_RGB565TOYROW_NEON) || defined(HAS_RGB565TOYROW_MSA) || \
  1931. defined(HAS_RGB565TOYROW_MMI))
  1932. // Allocate 2 rows of ARGB.
  1933. const int kRowSize = (width * 4 + 31) & ~31;
  1934. align_buffer_64(row, kRowSize * 2);
  1935. #endif
  1936. for (y = 0; y < height - 1; y += 2) {
  1937. #if (defined(HAS_RGB565TOYROW_NEON) || defined(HAS_RGB565TOYROW_MSA) || \
  1938. defined(HAS_RGB565TOYROW_MMI))
  1939. RGB565ToUVRow(src_rgb565, src_stride_rgb565, dst_u, dst_v, width);
  1940. RGB565ToYRow(src_rgb565, dst_y, width);
  1941. RGB565ToYRow(src_rgb565 + src_stride_rgb565, dst_y + dst_stride_y, width);
  1942. #else
  1943. RGB565ToARGBRow(src_rgb565, row, width);
  1944. RGB565ToARGBRow(src_rgb565 + src_stride_rgb565, row + kRowSize, width);
  1945. ARGBToUVRow(row, kRowSize, dst_u, dst_v, width);
  1946. ARGBToYRow(row, dst_y, width);
  1947. ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width);
  1948. #endif
  1949. src_rgb565 += src_stride_rgb565 * 2;
  1950. dst_y += dst_stride_y * 2;
  1951. dst_u += dst_stride_u;
  1952. dst_v += dst_stride_v;
  1953. }
  1954. if (height & 1) {
  1955. #if (defined(HAS_RGB565TOYROW_NEON) || defined(HAS_RGB565TOYROW_MSA) || \
  1956. defined(HAS_RGB565TOYROW_MMI))
  1957. RGB565ToUVRow(src_rgb565, 0, dst_u, dst_v, width);
  1958. RGB565ToYRow(src_rgb565, dst_y, width);
  1959. #else
  1960. RGB565ToARGBRow(src_rgb565, row, width);
  1961. ARGBToUVRow(row, 0, dst_u, dst_v, width);
  1962. ARGBToYRow(row, dst_y, width);
  1963. #endif
  1964. }
  1965. #if !(defined(HAS_RGB565TOYROW_NEON) || defined(HAS_RGB565TOYROW_MSA) || \
  1966. defined(HAS_RGB565TOYROW_MMI))
  1967. free_aligned_buffer_64(row);
  1968. #endif
  1969. }
  1970. return 0;
  1971. }
  1972. // Convert ARGB1555 to I420.
  1973. LIBYUV_API
  1974. int ARGB1555ToI420(const uint8_t* src_argb1555,
  1975. int src_stride_argb1555,
  1976. uint8_t* dst_y,
  1977. int dst_stride_y,
  1978. uint8_t* dst_u,
  1979. int dst_stride_u,
  1980. uint8_t* dst_v,
  1981. int dst_stride_v,
  1982. int width,
  1983. int height) {
  1984. int y;
  1985. #if (defined(HAS_ARGB1555TOYROW_NEON) || defined(HAS_ARGB1555TOYROW_MSA) || \
  1986. defined(HAS_ARGB1555TOYROW_MMI))
  1987. void (*ARGB1555ToUVRow)(const uint8_t* src_argb1555, int src_stride_argb1555,
  1988. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1989. ARGB1555ToUVRow_C;
  1990. void (*ARGB1555ToYRow)(const uint8_t* src_argb1555, uint8_t* dst_y,
  1991. int width) = ARGB1555ToYRow_C;
  1992. #else
  1993. void (*ARGB1555ToARGBRow)(const uint8_t* src_rgb, uint8_t* dst_argb,
  1994. int width) = ARGB1555ToARGBRow_C;
  1995. void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
  1996. uint8_t* dst_u, uint8_t* dst_v, int width) =
  1997. ARGBToUVRow_C;
  1998. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  1999. ARGBToYRow_C;
  2000. #endif
  2001. if (!src_argb1555 || !dst_y || !dst_u || !dst_v || width <= 0 ||
  2002. height == 0) {
  2003. return -1;
  2004. }
  2005. // Negative height means invert the image.
  2006. if (height < 0) {
  2007. height = -height;
  2008. src_argb1555 = src_argb1555 + (height - 1) * src_stride_argb1555;
  2009. src_stride_argb1555 = -src_stride_argb1555;
  2010. }
  2011. // Neon version does direct ARGB1555 to YUV.
  2012. #if defined(HAS_ARGB1555TOYROW_NEON)
  2013. if (TestCpuFlag(kCpuHasNEON)) {
  2014. ARGB1555ToUVRow = ARGB1555ToUVRow_Any_NEON;
  2015. ARGB1555ToYRow = ARGB1555ToYRow_Any_NEON;
  2016. if (IS_ALIGNED(width, 8)) {
  2017. ARGB1555ToYRow = ARGB1555ToYRow_NEON;
  2018. if (IS_ALIGNED(width, 16)) {
  2019. ARGB1555ToUVRow = ARGB1555ToUVRow_NEON;
  2020. }
  2021. }
  2022. }
  2023. #elif defined(HAS_ARGB1555TOYROW_MSA)
  2024. if (TestCpuFlag(kCpuHasMSA)) {
  2025. ARGB1555ToUVRow = ARGB1555ToUVRow_Any_MSA;
  2026. ARGB1555ToYRow = ARGB1555ToYRow_Any_MSA;
  2027. if (IS_ALIGNED(width, 16)) {
  2028. ARGB1555ToYRow = ARGB1555ToYRow_MSA;
  2029. ARGB1555ToUVRow = ARGB1555ToUVRow_MSA;
  2030. }
  2031. }
  2032. #elif defined(HAS_ARGB1555TOYROW_MMI)
  2033. if (TestCpuFlag(kCpuHasMMI)) {
  2034. ARGB1555ToUVRow = ARGB1555ToUVRow_Any_MMI;
  2035. ARGB1555ToYRow = ARGB1555ToYRow_Any_MMI;
  2036. if (IS_ALIGNED(width, 8)) {
  2037. ARGB1555ToYRow = ARGB1555ToYRow_MMI;
  2038. if (IS_ALIGNED(width, 16)) {
  2039. ARGB1555ToUVRow = ARGB1555ToUVRow_MMI;
  2040. }
  2041. }
  2042. }
  2043. // Other platforms do intermediate conversion from ARGB1555 to ARGB.
  2044. #else
  2045. #if defined(HAS_ARGB1555TOARGBROW_SSE2)
  2046. if (TestCpuFlag(kCpuHasSSE2)) {
  2047. ARGB1555ToARGBRow = ARGB1555ToARGBRow_Any_SSE2;
  2048. if (IS_ALIGNED(width, 8)) {
  2049. ARGB1555ToARGBRow = ARGB1555ToARGBRow_SSE2;
  2050. }
  2051. }
  2052. #endif
  2053. #if defined(HAS_ARGB1555TOARGBROW_AVX2)
  2054. if (TestCpuFlag(kCpuHasAVX2)) {
  2055. ARGB1555ToARGBRow = ARGB1555ToARGBRow_Any_AVX2;
  2056. if (IS_ALIGNED(width, 16)) {
  2057. ARGB1555ToARGBRow = ARGB1555ToARGBRow_AVX2;
  2058. }
  2059. }
  2060. #endif
  2061. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  2062. if (TestCpuFlag(kCpuHasSSSE3)) {
  2063. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  2064. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  2065. if (IS_ALIGNED(width, 16)) {
  2066. ARGBToUVRow = ARGBToUVRow_SSSE3;
  2067. ARGBToYRow = ARGBToYRow_SSSE3;
  2068. }
  2069. }
  2070. #endif
  2071. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  2072. if (TestCpuFlag(kCpuHasAVX2)) {
  2073. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  2074. ARGBToYRow = ARGBToYRow_Any_AVX2;
  2075. if (IS_ALIGNED(width, 32)) {
  2076. ARGBToUVRow = ARGBToUVRow_AVX2;
  2077. ARGBToYRow = ARGBToYRow_AVX2;
  2078. }
  2079. }
  2080. #endif
  2081. #endif
  2082. {
  2083. #if !(defined(HAS_ARGB1555TOYROW_NEON) || defined(HAS_ARGB1555TOYROW_MSA) || \
  2084. defined(HAS_ARGB1555TOYROW_MMI))
  2085. // Allocate 2 rows of ARGB.
  2086. const int kRowSize = (width * 4 + 31) & ~31;
  2087. align_buffer_64(row, kRowSize * 2);
  2088. #endif
  2089. for (y = 0; y < height - 1; y += 2) {
  2090. #if (defined(HAS_ARGB1555TOYROW_NEON) || defined(HAS_ARGB1555TOYROW_MSA) || \
  2091. defined(HAS_ARGB1555TOYROW_MMI))
  2092. ARGB1555ToUVRow(src_argb1555, src_stride_argb1555, dst_u, dst_v, width);
  2093. ARGB1555ToYRow(src_argb1555, dst_y, width);
  2094. ARGB1555ToYRow(src_argb1555 + src_stride_argb1555, dst_y + dst_stride_y,
  2095. width);
  2096. #else
  2097. ARGB1555ToARGBRow(src_argb1555, row, width);
  2098. ARGB1555ToARGBRow(src_argb1555 + src_stride_argb1555, row + kRowSize,
  2099. width);
  2100. ARGBToUVRow(row, kRowSize, dst_u, dst_v, width);
  2101. ARGBToYRow(row, dst_y, width);
  2102. ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width);
  2103. #endif
  2104. src_argb1555 += src_stride_argb1555 * 2;
  2105. dst_y += dst_stride_y * 2;
  2106. dst_u += dst_stride_u;
  2107. dst_v += dst_stride_v;
  2108. }
  2109. if (height & 1) {
  2110. #if (defined(HAS_ARGB1555TOYROW_NEON) || defined(HAS_ARGB1555TOYROW_MSA) || \
  2111. defined(HAS_ARGB1555TOYROW_MMI))
  2112. ARGB1555ToUVRow(src_argb1555, 0, dst_u, dst_v, width);
  2113. ARGB1555ToYRow(src_argb1555, dst_y, width);
  2114. #else
  2115. ARGB1555ToARGBRow(src_argb1555, row, width);
  2116. ARGBToUVRow(row, 0, dst_u, dst_v, width);
  2117. ARGBToYRow(row, dst_y, width);
  2118. #endif
  2119. }
  2120. #if !(defined(HAS_ARGB1555TOYROW_NEON) || defined(HAS_ARGB1555TOYROW_MSA) || \
  2121. defined(HAS_ARGB1555TOYROW_MMI))
  2122. free_aligned_buffer_64(row);
  2123. #endif
  2124. }
  2125. return 0;
  2126. }
  2127. // Convert ARGB4444 to I420.
  2128. LIBYUV_API
  2129. int ARGB4444ToI420(const uint8_t* src_argb4444,
  2130. int src_stride_argb4444,
  2131. uint8_t* dst_y,
  2132. int dst_stride_y,
  2133. uint8_t* dst_u,
  2134. int dst_stride_u,
  2135. uint8_t* dst_v,
  2136. int dst_stride_v,
  2137. int width,
  2138. int height) {
  2139. int y;
  2140. #if (defined(HAS_ARGB4444TOYROW_NEON) || defined(HAS_ARGB4444TOYROW_MMI))
  2141. void (*ARGB4444ToUVRow)(const uint8_t* src_argb4444, int src_stride_argb4444,
  2142. uint8_t* dst_u, uint8_t* dst_v, int width) =
  2143. ARGB4444ToUVRow_C;
  2144. void (*ARGB4444ToYRow)(const uint8_t* src_argb4444, uint8_t* dst_y,
  2145. int width) = ARGB4444ToYRow_C;
  2146. #else
  2147. void (*ARGB4444ToARGBRow)(const uint8_t* src_rgb, uint8_t* dst_argb,
  2148. int width) = ARGB4444ToARGBRow_C;
  2149. void (*ARGBToUVRow)(const uint8_t* src_argb0, int src_stride_argb,
  2150. uint8_t* dst_u, uint8_t* dst_v, int width) =
  2151. ARGBToUVRow_C;
  2152. void (*ARGBToYRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) =
  2153. ARGBToYRow_C;
  2154. #endif
  2155. if (!src_argb4444 || !dst_y || !dst_u || !dst_v || width <= 0 ||
  2156. height == 0) {
  2157. return -1;
  2158. }
  2159. // Negative height means invert the image.
  2160. if (height < 0) {
  2161. height = -height;
  2162. src_argb4444 = src_argb4444 + (height - 1) * src_stride_argb4444;
  2163. src_stride_argb4444 = -src_stride_argb4444;
  2164. }
  2165. // Neon version does direct ARGB4444 to YUV.
  2166. #if defined(HAS_ARGB4444TOYROW_NEON)
  2167. if (TestCpuFlag(kCpuHasNEON)) {
  2168. ARGB4444ToUVRow = ARGB4444ToUVRow_Any_NEON;
  2169. ARGB4444ToYRow = ARGB4444ToYRow_Any_NEON;
  2170. if (IS_ALIGNED(width, 8)) {
  2171. ARGB4444ToYRow = ARGB4444ToYRow_NEON;
  2172. if (IS_ALIGNED(width, 16)) {
  2173. ARGB4444ToUVRow = ARGB4444ToUVRow_NEON;
  2174. }
  2175. }
  2176. }
  2177. #elif defined(HAS_ARGB4444TOYROW_MMI)
  2178. if (TestCpuFlag(kCpuHasMMI)) {
  2179. ARGB4444ToUVRow = ARGB4444ToUVRow_Any_MMI;
  2180. ARGB4444ToYRow = ARGB4444ToYRow_Any_MMI;
  2181. if (IS_ALIGNED(width, 8)) {
  2182. ARGB4444ToYRow = ARGB4444ToYRow_MMI;
  2183. if (IS_ALIGNED(width, 16)) {
  2184. ARGB4444ToUVRow = ARGB4444ToUVRow_MMI;
  2185. }
  2186. }
  2187. }
  2188. // Other platforms do intermediate conversion from ARGB4444 to ARGB.
  2189. #else
  2190. #if defined(HAS_ARGB4444TOARGBROW_SSE2)
  2191. if (TestCpuFlag(kCpuHasSSE2)) {
  2192. ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_SSE2;
  2193. if (IS_ALIGNED(width, 8)) {
  2194. ARGB4444ToARGBRow = ARGB4444ToARGBRow_SSE2;
  2195. }
  2196. }
  2197. #endif
  2198. #if defined(HAS_ARGB4444TOARGBROW_AVX2)
  2199. if (TestCpuFlag(kCpuHasAVX2)) {
  2200. ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_AVX2;
  2201. if (IS_ALIGNED(width, 16)) {
  2202. ARGB4444ToARGBRow = ARGB4444ToARGBRow_AVX2;
  2203. }
  2204. }
  2205. #endif
  2206. #if defined(HAS_ARGB4444TOARGBROW_MSA)
  2207. if (TestCpuFlag(kCpuHasMSA)) {
  2208. ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_MSA;
  2209. if (IS_ALIGNED(width, 16)) {
  2210. ARGB4444ToARGBRow = ARGB4444ToARGBRow_MSA;
  2211. }
  2212. }
  2213. #endif
  2214. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  2215. if (TestCpuFlag(kCpuHasSSSE3)) {
  2216. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  2217. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  2218. if (IS_ALIGNED(width, 16)) {
  2219. ARGBToUVRow = ARGBToUVRow_SSSE3;
  2220. ARGBToYRow = ARGBToYRow_SSSE3;
  2221. }
  2222. }
  2223. #endif
  2224. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  2225. if (TestCpuFlag(kCpuHasAVX2)) {
  2226. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  2227. ARGBToYRow = ARGBToYRow_Any_AVX2;
  2228. if (IS_ALIGNED(width, 32)) {
  2229. ARGBToUVRow = ARGBToUVRow_AVX2;
  2230. ARGBToYRow = ARGBToYRow_AVX2;
  2231. }
  2232. }
  2233. #endif
  2234. #if defined(HAS_ARGBTOYROW_MSA)
  2235. if (TestCpuFlag(kCpuHasMSA)) {
  2236. ARGBToUVRow = ARGBToUVRow_Any_MSA;
  2237. ARGBToYRow = ARGBToYRow_Any_MSA;
  2238. if (IS_ALIGNED(width, 16)) {
  2239. ARGBToYRow = ARGBToYRow_MSA;
  2240. if (IS_ALIGNED(width, 32)) {
  2241. ARGBToUVRow = ARGBToUVRow_MSA;
  2242. }
  2243. }
  2244. }
  2245. #endif
  2246. #if defined(HAS_ARGBTOYROW_MMI)
  2247. if (TestCpuFlag(kCpuHasMMI)) {
  2248. ARGBToUVRow = ARGBToUVRow_Any_MMI;
  2249. ARGBToYRow = ARGBToYRow_Any_MMI;
  2250. if (IS_ALIGNED(width, 8)) {
  2251. ARGBToYRow = ARGBToYRow_MMI;
  2252. if (IS_ALIGNED(width, 16)) {
  2253. ARGBToUVRow = ARGBToUVRow_MMI;
  2254. }
  2255. }
  2256. }
  2257. #endif
  2258. #endif
  2259. {
  2260. #if !(defined(HAS_ARGB4444TOYROW_NEON) || defined(HAS_ARGB4444TOYROW_MMI))
  2261. // Allocate 2 rows of ARGB.
  2262. const int kRowSize = (width * 4 + 31) & ~31;
  2263. align_buffer_64(row, kRowSize * 2);
  2264. #endif
  2265. for (y = 0; y < height - 1; y += 2) {
  2266. #if (defined(HAS_ARGB4444TOYROW_NEON) || defined(HAS_ARGB4444TOYROW_MMI))
  2267. ARGB4444ToUVRow(src_argb4444, src_stride_argb4444, dst_u, dst_v, width);
  2268. ARGB4444ToYRow(src_argb4444, dst_y, width);
  2269. ARGB4444ToYRow(src_argb4444 + src_stride_argb4444, dst_y + dst_stride_y,
  2270. width);
  2271. #else
  2272. ARGB4444ToARGBRow(src_argb4444, row, width);
  2273. ARGB4444ToARGBRow(src_argb4444 + src_stride_argb4444, row + kRowSize,
  2274. width);
  2275. ARGBToUVRow(row, kRowSize, dst_u, dst_v, width);
  2276. ARGBToYRow(row, dst_y, width);
  2277. ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width);
  2278. #endif
  2279. src_argb4444 += src_stride_argb4444 * 2;
  2280. dst_y += dst_stride_y * 2;
  2281. dst_u += dst_stride_u;
  2282. dst_v += dst_stride_v;
  2283. }
  2284. if (height & 1) {
  2285. #if (defined(HAS_ARGB4444TOYROW_NEON) || defined(HAS_ARGB4444TOYROW_MMI))
  2286. ARGB4444ToUVRow(src_argb4444, 0, dst_u, dst_v, width);
  2287. ARGB4444ToYRow(src_argb4444, dst_y, width);
  2288. #else
  2289. ARGB4444ToARGBRow(src_argb4444, row, width);
  2290. ARGBToUVRow(row, 0, dst_u, dst_v, width);
  2291. ARGBToYRow(row, dst_y, width);
  2292. #endif
  2293. }
  2294. #if !(defined(HAS_ARGB4444TOYROW_NEON) || defined(HAS_ARGB4444TOYROW_MMI))
  2295. free_aligned_buffer_64(row);
  2296. #endif
  2297. }
  2298. return 0;
  2299. }
  2300. // Convert RGB24 to J400.
  2301. LIBYUV_API
  2302. int RGB24ToJ400(const uint8_t* src_rgb24,
  2303. int src_stride_rgb24,
  2304. uint8_t* dst_yj,
  2305. int dst_stride_yj,
  2306. int width,
  2307. int height) {
  2308. int y;
  2309. #if (defined(HAS_RGB24TOYJROW_NEON) || defined(HAS_RGB24TOYJROW_MSA) || \
  2310. defined(HAS_RGB24TOYJROW_MMI))
  2311. void (*RGB24ToYJRow)(const uint8_t* src_rgb24, uint8_t* dst_yj, int width) =
  2312. RGB24ToYJRow_C;
  2313. #else
  2314. void (*RGB24ToARGBRow)(const uint8_t* src_rgb, uint8_t* dst_argb, int width) =
  2315. RGB24ToARGBRow_C;
  2316. void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) =
  2317. ARGBToYJRow_C;
  2318. #endif
  2319. if (!src_rgb24 || !dst_yj || width <= 0 || height == 0) {
  2320. return -1;
  2321. }
  2322. // Negative height means invert the image.
  2323. if (height < 0) {
  2324. height = -height;
  2325. src_rgb24 = src_rgb24 + (height - 1) * src_stride_rgb24;
  2326. src_stride_rgb24 = -src_stride_rgb24;
  2327. }
  2328. // Neon version does direct RGB24 to YUV.
  2329. #if defined(HAS_RGB24TOYJROW_NEON)
  2330. if (TestCpuFlag(kCpuHasNEON)) {
  2331. RGB24ToYJRow = RGB24ToYJRow_Any_NEON;
  2332. if (IS_ALIGNED(width, 8)) {
  2333. RGB24ToYJRow = RGB24ToYJRow_NEON;
  2334. }
  2335. }
  2336. #elif defined(HAS_RGB24TOYJROW_MSA)
  2337. if (TestCpuFlag(kCpuHasMSA)) {
  2338. RGB24ToYJRow = RGB24ToYJRow_Any_MSA;
  2339. if (IS_ALIGNED(width, 16)) {
  2340. RGB24ToYJRow = RGB24ToYJRow_MSA;
  2341. }
  2342. }
  2343. #elif defined(HAS_RGB24TOYJROW_MMI)
  2344. if (TestCpuFlag(kCpuHasMMI)) {
  2345. RGB24ToYJRow = RGB24ToYJRow_Any_MMI;
  2346. if (IS_ALIGNED(width, 8)) {
  2347. RGB24ToYJRow = RGB24ToYJRow_MMI;
  2348. }
  2349. }
  2350. // Other platforms do intermediate conversion from RGB24 to ARGB.
  2351. #else
  2352. #if defined(HAS_RGB24TOARGBROW_SSSE3)
  2353. if (TestCpuFlag(kCpuHasSSSE3)) {
  2354. RGB24ToARGBRow = RGB24ToARGBRow_Any_SSSE3;
  2355. if (IS_ALIGNED(width, 16)) {
  2356. RGB24ToARGBRow = RGB24ToARGBRow_SSSE3;
  2357. }
  2358. }
  2359. #endif
  2360. #if defined(HAS_ARGBTOYJROW_SSSE3)
  2361. if (TestCpuFlag(kCpuHasSSSE3)) {
  2362. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  2363. if (IS_ALIGNED(width, 16)) {
  2364. ARGBToYJRow = ARGBToYJRow_SSSE3;
  2365. }
  2366. }
  2367. #endif
  2368. #if defined(HAS_ARGBTOYJROW_AVX2)
  2369. if (TestCpuFlag(kCpuHasAVX2)) {
  2370. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  2371. if (IS_ALIGNED(width, 32)) {
  2372. ARGBToYJRow = ARGBToYJRow_AVX2;
  2373. }
  2374. }
  2375. #endif
  2376. #endif
  2377. {
  2378. #if !(defined(HAS_RGB24TOYJROW_NEON) || defined(HAS_RGB24TOYJROW_MSA) || \
  2379. defined(HAS_RGB24TOYJROW_MMI))
  2380. // Allocate 2 rows of ARGB.
  2381. const int kRowSize = (width * 4 + 31) & ~31;
  2382. align_buffer_64(row, kRowSize * 2);
  2383. #endif
  2384. for (y = 0; y < height - 1; y += 2) {
  2385. #if (defined(HAS_RGB24TOYJROW_NEON) || defined(HAS_RGB24TOYJROW_MSA) || \
  2386. defined(HAS_RGB24TOYJROW_MMI))
  2387. RGB24ToYJRow(src_rgb24, dst_yj, width);
  2388. RGB24ToYJRow(src_rgb24 + src_stride_rgb24, dst_yj + dst_stride_yj, width);
  2389. #else
  2390. RGB24ToARGBRow(src_rgb24, row, width);
  2391. RGB24ToARGBRow(src_rgb24 + src_stride_rgb24, row + kRowSize, width);
  2392. ARGBToYJRow(row, dst_yj, width);
  2393. ARGBToYJRow(row + kRowSize, dst_yj + dst_stride_yj, width);
  2394. #endif
  2395. src_rgb24 += src_stride_rgb24 * 2;
  2396. dst_yj += dst_stride_yj * 2;
  2397. }
  2398. if (height & 1) {
  2399. #if (defined(HAS_RGB24TOYJROW_NEON) || defined(HAS_RGB24TOYJROW_MSA) || \
  2400. defined(HAS_RGB24TOYJROW_MMI))
  2401. RGB24ToYJRow(src_rgb24, dst_yj, width);
  2402. #else
  2403. RGB24ToARGBRow(src_rgb24, row, width);
  2404. ARGBToYJRow(row, dst_yj, width);
  2405. #endif
  2406. }
  2407. #if !(defined(HAS_RGB24TOYJROW_NEON) || defined(HAS_RGB24TOYJROW_MSA) || \
  2408. defined(HAS_RGB24TOYJROW_MMI))
  2409. free_aligned_buffer_64(row);
  2410. #endif
  2411. }
  2412. return 0;
  2413. }
  2414. static void SplitPixels(const uint8_t* src_u,
  2415. int src_pixel_stride_uv,
  2416. uint8_t* dst_u,
  2417. int width) {
  2418. int i;
  2419. for (i = 0; i < width; ++i) {
  2420. *dst_u = *src_u;
  2421. ++dst_u;
  2422. src_u += src_pixel_stride_uv;
  2423. }
  2424. }
  2425. // Convert Android420 to I420.
  2426. LIBYUV_API
  2427. int Android420ToI420(const uint8_t* src_y,
  2428. int src_stride_y,
  2429. const uint8_t* src_u,
  2430. int src_stride_u,
  2431. const uint8_t* src_v,
  2432. int src_stride_v,
  2433. int src_pixel_stride_uv,
  2434. uint8_t* dst_y,
  2435. int dst_stride_y,
  2436. uint8_t* dst_u,
  2437. int dst_stride_u,
  2438. uint8_t* dst_v,
  2439. int dst_stride_v,
  2440. int width,
  2441. int height) {
  2442. int y;
  2443. const ptrdiff_t vu_off = src_v - src_u;
  2444. int halfwidth = (width + 1) >> 1;
  2445. int halfheight = (height + 1) >> 1;
  2446. if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) {
  2447. return -1;
  2448. }
  2449. // Negative height means invert the image.
  2450. if (height < 0) {
  2451. height = -height;
  2452. halfheight = (height + 1) >> 1;
  2453. src_y = src_y + (height - 1) * src_stride_y;
  2454. src_u = src_u + (halfheight - 1) * src_stride_u;
  2455. src_v = src_v + (halfheight - 1) * src_stride_v;
  2456. src_stride_y = -src_stride_y;
  2457. src_stride_u = -src_stride_u;
  2458. src_stride_v = -src_stride_v;
  2459. }
  2460. if (dst_y) {
  2461. CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
  2462. }
  2463. // Copy UV planes as is - I420
  2464. if (src_pixel_stride_uv == 1) {
  2465. CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, halfheight);
  2466. CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, halfheight);
  2467. return 0;
  2468. // Split UV planes - NV21
  2469. }
  2470. if (src_pixel_stride_uv == 2 && vu_off == -1 &&
  2471. src_stride_u == src_stride_v) {
  2472. SplitUVPlane(src_v, src_stride_v, dst_v, dst_stride_v, dst_u, dst_stride_u,
  2473. halfwidth, halfheight);
  2474. return 0;
  2475. // Split UV planes - NV12
  2476. }
  2477. if (src_pixel_stride_uv == 2 && vu_off == 1 && src_stride_u == src_stride_v) {
  2478. SplitUVPlane(src_u, src_stride_u, dst_u, dst_stride_u, dst_v, dst_stride_v,
  2479. halfwidth, halfheight);
  2480. return 0;
  2481. }
  2482. for (y = 0; y < halfheight; ++y) {
  2483. SplitPixels(src_u, src_pixel_stride_uv, dst_u, halfwidth);
  2484. SplitPixels(src_v, src_pixel_stride_uv, dst_v, halfwidth);
  2485. src_u += src_stride_u;
  2486. src_v += src_stride_v;
  2487. dst_u += dst_stride_u;
  2488. dst_v += dst_stride_v;
  2489. }
  2490. return 0;
  2491. }
  2492. #ifdef __cplusplus
  2493. } // extern "C"
  2494. } // namespace libyuv
  2495. #endif