swscale_vsx.c 87 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263
  1. /*
  2. * AltiVec-enhanced yuv2yuvX
  3. *
  4. * Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
  5. * based on the equivalent C code in swscale.c
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <inttypes.h>
  24. #include "config.h"
  25. #include "libswscale/swscale.h"
  26. #include "libswscale/swscale_internal.h"
  27. #include "libavutil/attributes.h"
  28. #include "libavutil/cpu.h"
  29. #include "yuv2rgb_altivec.h"
  30. #include "libavutil/ppc/util_altivec.h"
  31. #if HAVE_VSX
  32. #define vzero vec_splat_s32(0)
  33. #if !HAVE_BIGENDIAN
  34. #define GET_LS(a,b,c,s) {\
  35. ls = a;\
  36. a = vec_vsx_ld(((b) << 1) + 16, s);\
  37. }
  38. #define yuv2planeX_8(d1, d2, l1, src, x, perm, filter) do {\
  39. vector signed short ls;\
  40. vector signed int vf1, vf2, i1, i2;\
  41. GET_LS(l1, x, perm, src);\
  42. i1 = vec_mule(filter, ls);\
  43. i2 = vec_mulo(filter, ls);\
  44. vf1 = vec_mergeh(i1, i2);\
  45. vf2 = vec_mergel(i1, i2);\
  46. d1 = vec_add(d1, vf1);\
  47. d2 = vec_add(d2, vf2);\
  48. } while (0)
  49. #define LOAD_FILTER(vf,f) {\
  50. vf = vec_vsx_ld(joffset, f);\
  51. }
  52. #define LOAD_L1(ll1,s,p){\
  53. ll1 = vec_vsx_ld(xoffset, s);\
  54. }
  55. // The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2).
  56. // The neat trick: We only care for half the elements,
  57. // high or low depending on (i<<3)%16 (it's 0 or 8 here),
  58. // and we're going to use vec_mule, so we choose
  59. // carefully how to "unpack" the elements into the even slots.
  60. #define GET_VF4(a, vf, f) {\
  61. vf = (vector signed short)vec_vsx_ld(a << 3, f);\
  62. vf = vec_mergeh(vf, (vector signed short)vzero);\
  63. }
  64. #define FIRST_LOAD(sv, pos, s, per) {}
  65. #define UPDATE_PTR(s0, d0, s1, d1) {}
  66. #define LOAD_SRCV(pos, a, s, per, v0, v1, vf) {\
  67. vf = vec_vsx_ld(pos + a, s);\
  68. }
  69. #define LOAD_SRCV8(pos, a, s, per, v0, v1, vf) LOAD_SRCV(pos, a, s, per, v0, v1, vf)
  70. #define GET_VFD(a, b, f, vf0, vf1, per, vf, off) {\
  71. vf = vec_vsx_ld((a * 2 * filterSize) + (b * 2) + off, f);\
  72. }
  73. #define FUNC(name) name ## _vsx
  74. #include "swscale_ppc_template.c"
  75. #undef FUNC
  76. #undef vzero
  77. #endif /* !HAVE_BIGENDIAN */
  78. static void yuv2plane1_8_u(const int16_t *src, uint8_t *dest, int dstW,
  79. const uint8_t *dither, int offset, int start)
  80. {
  81. int i;
  82. for (i = start; i < dstW; i++) {
  83. int val = (src[i] + dither[(i + offset) & 7]) >> 7;
  84. dest[i] = av_clip_uint8(val);
  85. }
  86. }
  87. static void yuv2plane1_8_vsx(const int16_t *src, uint8_t *dest, int dstW,
  88. const uint8_t *dither, int offset)
  89. {
  90. const int dst_u = -(uintptr_t)dest & 15;
  91. int i, j;
  92. LOCAL_ALIGNED(16, int16_t, val, [16]);
  93. const vector uint16_t shifts = (vector uint16_t) {7, 7, 7, 7, 7, 7, 7, 7};
  94. vector int16_t vi, vileft, ditherleft, ditherright;
  95. vector uint8_t vd;
  96. for (j = 0; j < 16; j++) {
  97. val[j] = dither[(dst_u + offset + j) & 7];
  98. }
  99. ditherleft = vec_ld(0, val);
  100. ditherright = vec_ld(0, &val[8]);
  101. yuv2plane1_8_u(src, dest, dst_u, dither, offset, 0);
  102. for (i = dst_u; i < dstW - 15; i += 16) {
  103. vi = vec_vsx_ld(0, &src[i]);
  104. vi = vec_adds(ditherleft, vi);
  105. vileft = vec_sra(vi, shifts);
  106. vi = vec_vsx_ld(0, &src[i + 8]);
  107. vi = vec_adds(ditherright, vi);
  108. vi = vec_sra(vi, shifts);
  109. vd = vec_packsu(vileft, vi);
  110. vec_st(vd, 0, &dest[i]);
  111. }
  112. yuv2plane1_8_u(src, dest, dstW, dither, offset, i);
  113. }
  114. #if !HAVE_BIGENDIAN
  115. #define output_pixel(pos, val) \
  116. if (big_endian) { \
  117. AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
  118. } else { \
  119. AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
  120. }
  121. static void yuv2plane1_nbps_u(const int16_t *src, uint16_t *dest, int dstW,
  122. int big_endian, int output_bits, int start)
  123. {
  124. int i;
  125. int shift = 15 - output_bits;
  126. for (i = start; i < dstW; i++) {
  127. int val = src[i] + (1 << (shift - 1));
  128. output_pixel(&dest[i], val);
  129. }
  130. }
  131. static void yuv2plane1_nbps_vsx(const int16_t *src, uint16_t *dest, int dstW,
  132. int big_endian, int output_bits)
  133. {
  134. const int dst_u = -(uintptr_t)dest & 7;
  135. const int shift = 15 - output_bits;
  136. const int add = (1 << (shift - 1));
  137. const int clip = (1 << output_bits) - 1;
  138. const vector uint16_t vadd = (vector uint16_t) {add, add, add, add, add, add, add, add};
  139. const vector uint16_t vswap = (vector uint16_t) vec_splat_u16(big_endian ? 8 : 0);
  140. const vector uint16_t vshift = (vector uint16_t) vec_splat_u16(shift);
  141. const vector uint16_t vlargest = (vector uint16_t) {clip, clip, clip, clip, clip, clip, clip, clip};
  142. vector uint16_t v;
  143. int i;
  144. yuv2plane1_nbps_u(src, dest, dst_u, big_endian, output_bits, 0);
  145. for (i = dst_u; i < dstW - 7; i += 8) {
  146. v = vec_vsx_ld(0, (const uint16_t *) &src[i]);
  147. v = vec_add(v, vadd);
  148. v = vec_sr(v, vshift);
  149. v = vec_min(v, vlargest);
  150. v = vec_rl(v, vswap);
  151. vec_st(v, 0, &dest[i]);
  152. }
  153. yuv2plane1_nbps_u(src, dest, dstW, big_endian, output_bits, i);
  154. }
  155. static void yuv2planeX_nbps_u(const int16_t *filter, int filterSize,
  156. const int16_t **src, uint16_t *dest, int dstW,
  157. int big_endian, int output_bits, int start)
  158. {
  159. int i;
  160. int shift = 11 + 16 - output_bits;
  161. for (i = start; i < dstW; i++) {
  162. int val = 1 << (shift - 1);
  163. int j;
  164. for (j = 0; j < filterSize; j++)
  165. val += src[j][i] * filter[j];
  166. output_pixel(&dest[i], val);
  167. }
  168. }
  169. static void yuv2planeX_nbps_vsx(const int16_t *filter, int filterSize,
  170. const int16_t **src, uint16_t *dest, int dstW,
  171. int big_endian, int output_bits)
  172. {
  173. const int dst_u = -(uintptr_t)dest & 7;
  174. const int shift = 11 + 16 - output_bits;
  175. const int add = (1 << (shift - 1));
  176. const int clip = (1 << output_bits) - 1;
  177. const uint16_t swap = big_endian ? 8 : 0;
  178. const vector uint32_t vadd = (vector uint32_t) {add, add, add, add};
  179. const vector uint32_t vshift = (vector uint32_t) {shift, shift, shift, shift};
  180. const vector uint16_t vswap = (vector uint16_t) {swap, swap, swap, swap, swap, swap, swap, swap};
  181. const vector uint16_t vlargest = (vector uint16_t) {clip, clip, clip, clip, clip, clip, clip, clip};
  182. const vector int16_t vzero = vec_splat_s16(0);
  183. const vector uint8_t vperm = (vector uint8_t) {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15};
  184. vector int16_t vfilter[MAX_FILTER_SIZE], vin;
  185. vector uint16_t v;
  186. vector uint32_t vleft, vright, vtmp;
  187. int i, j;
  188. for (i = 0; i < filterSize; i++) {
  189. vfilter[i] = (vector int16_t) {filter[i], filter[i], filter[i], filter[i],
  190. filter[i], filter[i], filter[i], filter[i]};
  191. }
  192. yuv2planeX_nbps_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0);
  193. for (i = dst_u; i < dstW - 7; i += 8) {
  194. vleft = vright = vadd;
  195. for (j = 0; j < filterSize; j++) {
  196. vin = vec_vsx_ld(0, &src[j][i]);
  197. vtmp = (vector uint32_t) vec_mule(vin, vfilter[j]);
  198. vleft = vec_add(vleft, vtmp);
  199. vtmp = (vector uint32_t) vec_mulo(vin, vfilter[j]);
  200. vright = vec_add(vright, vtmp);
  201. }
  202. vleft = vec_sra(vleft, vshift);
  203. vright = vec_sra(vright, vshift);
  204. v = vec_packsu(vleft, vright);
  205. v = (vector uint16_t) vec_max((vector int16_t) v, vzero);
  206. v = vec_min(v, vlargest);
  207. v = vec_rl(v, vswap);
  208. v = vec_perm(v, v, vperm);
  209. vec_st(v, 0, &dest[i]);
  210. }
  211. yuv2planeX_nbps_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i);
  212. }
  213. #undef output_pixel
  214. #define output_pixel(pos, val, bias, signedness) \
  215. if (big_endian) { \
  216. AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
  217. } else { \
  218. AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
  219. }
  220. static void yuv2plane1_16_u(const int32_t *src, uint16_t *dest, int dstW,
  221. int big_endian, int output_bits, int start)
  222. {
  223. int i;
  224. const int shift = 3;
  225. for (i = start; i < dstW; i++) {
  226. int val = src[i] + (1 << (shift - 1));
  227. output_pixel(&dest[i], val, 0, uint);
  228. }
  229. }
  230. static void yuv2plane1_16_vsx(const int32_t *src, uint16_t *dest, int dstW,
  231. int big_endian, int output_bits)
  232. {
  233. const int dst_u = -(uintptr_t)dest & 7;
  234. const int shift = 3;
  235. const int add = (1 << (shift - 1));
  236. const vector uint32_t vadd = (vector uint32_t) {add, add, add, add};
  237. const vector uint16_t vswap = (vector uint16_t) vec_splat_u16(big_endian ? 8 : 0);
  238. const vector uint32_t vshift = (vector uint32_t) vec_splat_u32(shift);
  239. vector uint32_t v, v2;
  240. vector uint16_t vd;
  241. int i;
  242. yuv2plane1_16_u(src, dest, dst_u, big_endian, output_bits, 0);
  243. for (i = dst_u; i < dstW - 7; i += 8) {
  244. v = vec_vsx_ld(0, (const uint32_t *) &src[i]);
  245. v = vec_add(v, vadd);
  246. v = vec_sr(v, vshift);
  247. v2 = vec_vsx_ld(0, (const uint32_t *) &src[i + 4]);
  248. v2 = vec_add(v2, vadd);
  249. v2 = vec_sr(v2, vshift);
  250. vd = vec_packsu(v, v2);
  251. vd = vec_rl(vd, vswap);
  252. vec_st(vd, 0, &dest[i]);
  253. }
  254. yuv2plane1_16_u(src, dest, dstW, big_endian, output_bits, i);
  255. }
  256. #if HAVE_POWER8
  257. static void yuv2planeX_16_u(const int16_t *filter, int filterSize,
  258. const int32_t **src, uint16_t *dest, int dstW,
  259. int big_endian, int output_bits, int start)
  260. {
  261. int i;
  262. int shift = 15;
  263. for (i = start; i < dstW; i++) {
  264. int val = 1 << (shift - 1);
  265. int j;
  266. /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
  267. * filters (or anything with negative coeffs, the range can be slightly
  268. * wider in both directions. To account for this overflow, we subtract
  269. * a constant so it always fits in the signed range (assuming a
  270. * reasonable filterSize), and re-add that at the end. */
  271. val -= 0x40000000;
  272. for (j = 0; j < filterSize; j++)
  273. val += src[j][i] * (unsigned)filter[j];
  274. output_pixel(&dest[i], val, 0x8000, int);
  275. }
  276. }
  277. static void yuv2planeX_16_vsx(const int16_t *filter, int filterSize,
  278. const int32_t **src, uint16_t *dest, int dstW,
  279. int big_endian, int output_bits)
  280. {
  281. const int dst_u = -(uintptr_t)dest & 7;
  282. const int shift = 15;
  283. const int bias = 0x8000;
  284. const int add = (1 << (shift - 1)) - 0x40000000;
  285. const uint16_t swap = big_endian ? 8 : 0;
  286. const vector uint32_t vadd = (vector uint32_t) {add, add, add, add};
  287. const vector uint32_t vshift = (vector uint32_t) {shift, shift, shift, shift};
  288. const vector uint16_t vswap = (vector uint16_t) {swap, swap, swap, swap, swap, swap, swap, swap};
  289. const vector uint16_t vbias = (vector uint16_t) {bias, bias, bias, bias, bias, bias, bias, bias};
  290. vector int32_t vfilter[MAX_FILTER_SIZE];
  291. vector uint16_t v;
  292. vector uint32_t vleft, vright, vtmp;
  293. vector int32_t vin32l, vin32r;
  294. int i, j;
  295. for (i = 0; i < filterSize; i++) {
  296. vfilter[i] = (vector int32_t) {filter[i], filter[i], filter[i], filter[i]};
  297. }
  298. yuv2planeX_16_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0);
  299. for (i = dst_u; i < dstW - 7; i += 8) {
  300. vleft = vright = vadd;
  301. for (j = 0; j < filterSize; j++) {
  302. vin32l = vec_vsx_ld(0, &src[j][i]);
  303. vin32r = vec_vsx_ld(0, &src[j][i + 4]);
  304. vtmp = (vector uint32_t) vec_mul(vin32l, vfilter[j]);
  305. vleft = vec_add(vleft, vtmp);
  306. vtmp = (vector uint32_t) vec_mul(vin32r, vfilter[j]);
  307. vright = vec_add(vright, vtmp);
  308. }
  309. vleft = vec_sra(vleft, vshift);
  310. vright = vec_sra(vright, vshift);
  311. v = (vector uint16_t) vec_packs((vector int32_t) vleft, (vector int32_t) vright);
  312. v = vec_add(v, vbias);
  313. v = vec_rl(v, vswap);
  314. vec_st(v, 0, &dest[i]);
  315. }
  316. yuv2planeX_16_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i);
  317. }
  318. #endif /* HAVE_POWER8 */
  319. #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
  320. yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \
  321. yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t)
  322. #define yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \
  323. static void yuv2plane1_ ## bits ## BE_LE ## _vsx(const int16_t *src, \
  324. uint8_t *dest, int dstW, \
  325. const uint8_t *dither, int offset) \
  326. { \
  327. yuv2plane1_ ## template_size ## _vsx((const typeX_t *) src, \
  328. (uint16_t *) dest, dstW, is_be, bits); \
  329. }
  330. #define yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t) \
  331. static void yuv2planeX_ ## bits ## BE_LE ## _vsx(const int16_t *filter, int filterSize, \
  332. const int16_t **src, uint8_t *dest, int dstW, \
  333. const uint8_t *dither, int offset)\
  334. { \
  335. yuv2planeX_## template_size ## _vsx(filter, \
  336. filterSize, (const typeX_t **) src, \
  337. (uint16_t *) dest, dstW, is_be, bits); \
  338. }
  339. yuv2NBPS( 9, BE, 1, nbps, int16_t)
  340. yuv2NBPS( 9, LE, 0, nbps, int16_t)
  341. yuv2NBPS(10, BE, 1, nbps, int16_t)
  342. yuv2NBPS(10, LE, 0, nbps, int16_t)
  343. yuv2NBPS(12, BE, 1, nbps, int16_t)
  344. yuv2NBPS(12, LE, 0, nbps, int16_t)
  345. yuv2NBPS(14, BE, 1, nbps, int16_t)
  346. yuv2NBPS(14, LE, 0, nbps, int16_t)
  347. yuv2NBPS1(16, BE, 1, 16, int32_t)
  348. yuv2NBPS1(16, LE, 0, 16, int32_t)
  349. #if HAVE_POWER8
  350. yuv2NBPSX(16, BE, 1, 16, int32_t)
  351. yuv2NBPSX(16, LE, 0, 16, int32_t)
  352. #endif
  353. #define WRITERGB \
  354. R_l = vec_max(R_l, zero32); \
  355. R_r = vec_max(R_r, zero32); \
  356. G_l = vec_max(G_l, zero32); \
  357. G_r = vec_max(G_r, zero32); \
  358. B_l = vec_max(B_l, zero32); \
  359. B_r = vec_max(B_r, zero32); \
  360. \
  361. R_l = vec_min(R_l, rgbclip); \
  362. R_r = vec_min(R_r, rgbclip); \
  363. G_l = vec_min(G_l, rgbclip); \
  364. G_r = vec_min(G_r, rgbclip); \
  365. B_l = vec_min(B_l, rgbclip); \
  366. B_r = vec_min(B_r, rgbclip); \
  367. \
  368. R_l = vec_sr(R_l, shift22); \
  369. R_r = vec_sr(R_r, shift22); \
  370. G_l = vec_sr(G_l, shift22); \
  371. G_r = vec_sr(G_r, shift22); \
  372. B_l = vec_sr(B_l, shift22); \
  373. B_r = vec_sr(B_r, shift22); \
  374. \
  375. rd16 = vec_packsu(R_l, R_r); \
  376. gd16 = vec_packsu(G_l, G_r); \
  377. bd16 = vec_packsu(B_l, B_r); \
  378. rd = vec_packsu(rd16, zero16); \
  379. gd = vec_packsu(gd16, zero16); \
  380. bd = vec_packsu(bd16, zero16); \
  381. \
  382. switch(target) { \
  383. case AV_PIX_FMT_RGB24: \
  384. out0 = vec_perm(rd, gd, perm3rg0); \
  385. out0 = vec_perm(out0, bd, perm3tb0); \
  386. out1 = vec_perm(rd, gd, perm3rg1); \
  387. out1 = vec_perm(out1, bd, perm3tb1); \
  388. \
  389. vec_vsx_st(out0, 0, dest); \
  390. vec_vsx_st(out1, 16, dest); \
  391. \
  392. dest += 24; \
  393. break; \
  394. case AV_PIX_FMT_BGR24: \
  395. out0 = vec_perm(bd, gd, perm3rg0); \
  396. out0 = vec_perm(out0, rd, perm3tb0); \
  397. out1 = vec_perm(bd, gd, perm3rg1); \
  398. out1 = vec_perm(out1, rd, perm3tb1); \
  399. \
  400. vec_vsx_st(out0, 0, dest); \
  401. vec_vsx_st(out1, 16, dest); \
  402. \
  403. dest += 24; \
  404. break; \
  405. case AV_PIX_FMT_BGRA: \
  406. out0 = vec_mergeh(bd, gd); \
  407. out1 = vec_mergeh(rd, ad); \
  408. \
  409. tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1); \
  410. vec_vsx_st(tmp8, 0, dest); \
  411. tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1); \
  412. vec_vsx_st(tmp8, 16, dest); \
  413. \
  414. dest += 32; \
  415. break; \
  416. case AV_PIX_FMT_RGBA: \
  417. out0 = vec_mergeh(rd, gd); \
  418. out1 = vec_mergeh(bd, ad); \
  419. \
  420. tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1); \
  421. vec_vsx_st(tmp8, 0, dest); \
  422. tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1); \
  423. vec_vsx_st(tmp8, 16, dest); \
  424. \
  425. dest += 32; \
  426. break; \
  427. case AV_PIX_FMT_ARGB: \
  428. out0 = vec_mergeh(ad, rd); \
  429. out1 = vec_mergeh(gd, bd); \
  430. \
  431. tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1); \
  432. vec_vsx_st(tmp8, 0, dest); \
  433. tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1); \
  434. vec_vsx_st(tmp8, 16, dest); \
  435. \
  436. dest += 32; \
  437. break; \
  438. case AV_PIX_FMT_ABGR: \
  439. out0 = vec_mergeh(ad, bd); \
  440. out1 = vec_mergeh(gd, rd); \
  441. \
  442. tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1); \
  443. vec_vsx_st(tmp8, 0, dest); \
  444. tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1); \
  445. vec_vsx_st(tmp8, 16, dest); \
  446. \
  447. dest += 32; \
  448. break; \
  449. }
  450. static av_always_inline void
  451. yuv2rgb_full_X_vsx_template(SwsContext *c, const int16_t *lumFilter,
  452. const int16_t **lumSrc, int lumFilterSize,
  453. const int16_t *chrFilter, const int16_t **chrUSrc,
  454. const int16_t **chrVSrc, int chrFilterSize,
  455. const int16_t **alpSrc, uint8_t *dest,
  456. int dstW, int y, enum AVPixelFormat target, int hasAlpha)
  457. {
  458. vector int16_t vv;
  459. vector int32_t vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
  460. vector int32_t R_l, R_r, G_l, G_r, B_l, B_r;
  461. vector int32_t tmp, tmp2, tmp3, tmp4;
  462. vector uint16_t rd16, gd16, bd16;
  463. vector uint8_t rd, bd, gd, ad, out0, out1, tmp8;
  464. vector int16_t vlumFilter[MAX_FILTER_SIZE], vchrFilter[MAX_FILTER_SIZE];
  465. const vector int32_t ystart = vec_splats(1 << 9);
  466. const vector int32_t uvstart = vec_splats((1 << 9) - (128 << 19));
  467. const vector uint16_t zero16 = vec_splat_u16(0);
  468. const vector int32_t y_offset = vec_splats(c->yuv2rgb_y_offset);
  469. const vector int32_t y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  470. const vector int32_t y_add = vec_splats(1 << 21);
  471. const vector int32_t v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  472. const vector int32_t v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  473. const vector int32_t u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  474. const vector int32_t u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  475. const vector int32_t rgbclip = vec_splats(1 << 30);
  476. const vector int32_t zero32 = vec_splat_s32(0);
  477. const vector uint32_t shift22 = vec_splats(22U);
  478. const vector uint32_t shift10 = vec_splat_u32(10);
  479. int i, j;
  480. // Various permutations
  481. const vector uint8_t perm3rg0 = (vector uint8_t) {0x0, 0x10, 0,
  482. 0x1, 0x11, 0,
  483. 0x2, 0x12, 0,
  484. 0x3, 0x13, 0,
  485. 0x4, 0x14, 0,
  486. 0x5 };
  487. const vector uint8_t perm3rg1 = (vector uint8_t) { 0x15, 0,
  488. 0x6, 0x16, 0,
  489. 0x7, 0x17, 0 };
  490. const vector uint8_t perm3tb0 = (vector uint8_t) {0x0, 0x1, 0x10,
  491. 0x3, 0x4, 0x11,
  492. 0x6, 0x7, 0x12,
  493. 0x9, 0xa, 0x13,
  494. 0xc, 0xd, 0x14,
  495. 0xf };
  496. const vector uint8_t perm3tb1 = (vector uint8_t) { 0x0, 0x15,
  497. 0x2, 0x3, 0x16,
  498. 0x5, 0x6, 0x17 };
  499. ad = vec_splats((uint8_t) 255);
  500. for (i = 0; i < lumFilterSize; i++)
  501. vlumFilter[i] = vec_splats(lumFilter[i]);
  502. for (i = 0; i < chrFilterSize; i++)
  503. vchrFilter[i] = vec_splats(chrFilter[i]);
  504. for (i = 0; i < dstW; i += 8) {
  505. vy32_l =
  506. vy32_r = ystart;
  507. vu32_l =
  508. vu32_r =
  509. vv32_l =
  510. vv32_r = uvstart;
  511. for (j = 0; j < lumFilterSize; j++) {
  512. vv = vec_ld(0, &lumSrc[j][i]);
  513. tmp = vec_mule(vv, vlumFilter[j]);
  514. tmp2 = vec_mulo(vv, vlumFilter[j]);
  515. tmp3 = vec_mergeh(tmp, tmp2);
  516. tmp4 = vec_mergel(tmp, tmp2);
  517. vy32_l = vec_adds(vy32_l, tmp3);
  518. vy32_r = vec_adds(vy32_r, tmp4);
  519. }
  520. for (j = 0; j < chrFilterSize; j++) {
  521. vv = vec_ld(0, &chrUSrc[j][i]);
  522. tmp = vec_mule(vv, vchrFilter[j]);
  523. tmp2 = vec_mulo(vv, vchrFilter[j]);
  524. tmp3 = vec_mergeh(tmp, tmp2);
  525. tmp4 = vec_mergel(tmp, tmp2);
  526. vu32_l = vec_adds(vu32_l, tmp3);
  527. vu32_r = vec_adds(vu32_r, tmp4);
  528. vv = vec_ld(0, &chrVSrc[j][i]);
  529. tmp = vec_mule(vv, vchrFilter[j]);
  530. tmp2 = vec_mulo(vv, vchrFilter[j]);
  531. tmp3 = vec_mergeh(tmp, tmp2);
  532. tmp4 = vec_mergel(tmp, tmp2);
  533. vv32_l = vec_adds(vv32_l, tmp3);
  534. vv32_r = vec_adds(vv32_r, tmp4);
  535. }
  536. vy32_l = vec_sra(vy32_l, shift10);
  537. vy32_r = vec_sra(vy32_r, shift10);
  538. vu32_l = vec_sra(vu32_l, shift10);
  539. vu32_r = vec_sra(vu32_r, shift10);
  540. vv32_l = vec_sra(vv32_l, shift10);
  541. vv32_r = vec_sra(vv32_r, shift10);
  542. vy32_l = vec_sub(vy32_l, y_offset);
  543. vy32_r = vec_sub(vy32_r, y_offset);
  544. vy32_l = vec_mul(vy32_l, y_coeff);
  545. vy32_r = vec_mul(vy32_r, y_coeff);
  546. vy32_l = vec_add(vy32_l, y_add);
  547. vy32_r = vec_add(vy32_r, y_add);
  548. R_l = vec_mul(vv32_l, v2r_coeff);
  549. R_l = vec_add(R_l, vy32_l);
  550. R_r = vec_mul(vv32_r, v2r_coeff);
  551. R_r = vec_add(R_r, vy32_r);
  552. G_l = vec_mul(vv32_l, v2g_coeff);
  553. tmp32 = vec_mul(vu32_l, u2g_coeff);
  554. G_l = vec_add(G_l, vy32_l);
  555. G_l = vec_add(G_l, tmp32);
  556. G_r = vec_mul(vv32_r, v2g_coeff);
  557. tmp32 = vec_mul(vu32_r, u2g_coeff);
  558. G_r = vec_add(G_r, vy32_r);
  559. G_r = vec_add(G_r, tmp32);
  560. B_l = vec_mul(vu32_l, u2b_coeff);
  561. B_l = vec_add(B_l, vy32_l);
  562. B_r = vec_mul(vu32_r, u2b_coeff);
  563. B_r = vec_add(B_r, vy32_r);
  564. WRITERGB
  565. }
  566. }
  567. #define SETUP(x, buf0, alpha1, buf1, alpha) { \
  568. x = vec_ld(0, buf0); \
  569. tmp = vec_mule(x, alpha1); \
  570. tmp2 = vec_mulo(x, alpha1); \
  571. tmp3 = vec_mergeh(tmp, tmp2); \
  572. tmp4 = vec_mergel(tmp, tmp2); \
  573. \
  574. x = vec_ld(0, buf1); \
  575. tmp = vec_mule(x, alpha); \
  576. tmp2 = vec_mulo(x, alpha); \
  577. tmp5 = vec_mergeh(tmp, tmp2); \
  578. tmp6 = vec_mergel(tmp, tmp2); \
  579. \
  580. tmp3 = vec_add(tmp3, tmp5); \
  581. tmp4 = vec_add(tmp4, tmp6); \
  582. }
  583. static av_always_inline void
  584. yuv2rgb_full_2_vsx_template(SwsContext *c, const int16_t *buf[2],
  585. const int16_t *ubuf[2], const int16_t *vbuf[2],
  586. const int16_t *abuf[2], uint8_t *dest, int dstW,
  587. int yalpha, int uvalpha, int y,
  588. enum AVPixelFormat target, int hasAlpha)
  589. {
  590. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  591. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  592. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
  593. *abuf0 = hasAlpha ? abuf[0] : NULL,
  594. *abuf1 = hasAlpha ? abuf[1] : NULL;
  595. const int16_t yalpha1 = 4096 - yalpha;
  596. const int16_t uvalpha1 = 4096 - uvalpha;
  597. vector int16_t vy, vu, vv, A = vec_splat_s16(0);
  598. vector int32_t vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
  599. vector int32_t R_l, R_r, G_l, G_r, B_l, B_r;
  600. vector int32_t tmp, tmp2, tmp3, tmp4, tmp5, tmp6;
  601. vector uint16_t rd16, gd16, bd16;
  602. vector uint8_t rd, bd, gd, ad, out0, out1, tmp8;
  603. const vector int16_t vyalpha1 = vec_splats(yalpha1);
  604. const vector int16_t vuvalpha1 = vec_splats(uvalpha1);
  605. const vector int16_t vyalpha = vec_splats((int16_t) yalpha);
  606. const vector int16_t vuvalpha = vec_splats((int16_t) uvalpha);
  607. const vector uint16_t zero16 = vec_splat_u16(0);
  608. const vector int32_t y_offset = vec_splats(c->yuv2rgb_y_offset);
  609. const vector int32_t y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  610. const vector int32_t y_add = vec_splats(1 << 21);
  611. const vector int32_t v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  612. const vector int32_t v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  613. const vector int32_t u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  614. const vector int32_t u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  615. const vector int32_t rgbclip = vec_splats(1 << 30);
  616. const vector int32_t zero32 = vec_splat_s32(0);
  617. const vector uint32_t shift19 = vec_splats(19U);
  618. const vector uint32_t shift22 = vec_splats(22U);
  619. const vector uint32_t shift10 = vec_splat_u32(10);
  620. const vector int32_t dec128 = vec_splats(128 << 19);
  621. const vector int32_t add18 = vec_splats(1 << 18);
  622. int i;
  623. // Various permutations
  624. const vector uint8_t perm3rg0 = (vector uint8_t) {0x0, 0x10, 0,
  625. 0x1, 0x11, 0,
  626. 0x2, 0x12, 0,
  627. 0x3, 0x13, 0,
  628. 0x4, 0x14, 0,
  629. 0x5 };
  630. const vector uint8_t perm3rg1 = (vector uint8_t) { 0x15, 0,
  631. 0x6, 0x16, 0,
  632. 0x7, 0x17, 0 };
  633. const vector uint8_t perm3tb0 = (vector uint8_t) {0x0, 0x1, 0x10,
  634. 0x3, 0x4, 0x11,
  635. 0x6, 0x7, 0x12,
  636. 0x9, 0xa, 0x13,
  637. 0xc, 0xd, 0x14,
  638. 0xf };
  639. const vector uint8_t perm3tb1 = (vector uint8_t) { 0x0, 0x15,
  640. 0x2, 0x3, 0x16,
  641. 0x5, 0x6, 0x17 };
  642. av_assert2(yalpha <= 4096U);
  643. av_assert2(uvalpha <= 4096U);
  644. for (i = 0; i < dstW; i += 8) {
  645. SETUP(vy, &buf0[i], vyalpha1, &buf1[i], vyalpha);
  646. vy32_l = vec_sra(tmp3, shift10);
  647. vy32_r = vec_sra(tmp4, shift10);
  648. SETUP(vu, &ubuf0[i], vuvalpha1, &ubuf1[i], vuvalpha);
  649. tmp3 = vec_sub(tmp3, dec128);
  650. tmp4 = vec_sub(tmp4, dec128);
  651. vu32_l = vec_sra(tmp3, shift10);
  652. vu32_r = vec_sra(tmp4, shift10);
  653. SETUP(vv, &vbuf0[i], vuvalpha1, &vbuf1[i], vuvalpha);
  654. tmp3 = vec_sub(tmp3, dec128);
  655. tmp4 = vec_sub(tmp4, dec128);
  656. vv32_l = vec_sra(tmp3, shift10);
  657. vv32_r = vec_sra(tmp4, shift10);
  658. if (hasAlpha) {
  659. SETUP(A, &abuf0[i], vyalpha1, &abuf1[i], vyalpha);
  660. tmp3 = vec_add(tmp3, add18);
  661. tmp4 = vec_add(tmp4, add18);
  662. tmp3 = vec_sra(tmp3, shift19);
  663. tmp4 = vec_sra(tmp4, shift19);
  664. A = vec_packs(tmp3, tmp4);
  665. ad = vec_packsu(A, (vector int16_t) zero16);
  666. } else {
  667. ad = vec_splats((uint8_t) 255);
  668. }
  669. vy32_l = vec_sub(vy32_l, y_offset);
  670. vy32_r = vec_sub(vy32_r, y_offset);
  671. vy32_l = vec_mul(vy32_l, y_coeff);
  672. vy32_r = vec_mul(vy32_r, y_coeff);
  673. vy32_l = vec_add(vy32_l, y_add);
  674. vy32_r = vec_add(vy32_r, y_add);
  675. R_l = vec_mul(vv32_l, v2r_coeff);
  676. R_l = vec_add(R_l, vy32_l);
  677. R_r = vec_mul(vv32_r, v2r_coeff);
  678. R_r = vec_add(R_r, vy32_r);
  679. G_l = vec_mul(vv32_l, v2g_coeff);
  680. tmp32 = vec_mul(vu32_l, u2g_coeff);
  681. G_l = vec_add(G_l, vy32_l);
  682. G_l = vec_add(G_l, tmp32);
  683. G_r = vec_mul(vv32_r, v2g_coeff);
  684. tmp32 = vec_mul(vu32_r, u2g_coeff);
  685. G_r = vec_add(G_r, vy32_r);
  686. G_r = vec_add(G_r, tmp32);
  687. B_l = vec_mul(vu32_l, u2b_coeff);
  688. B_l = vec_add(B_l, vy32_l);
  689. B_r = vec_mul(vu32_r, u2b_coeff);
  690. B_r = vec_add(B_r, vy32_r);
  691. WRITERGB
  692. }
  693. }
  694. static av_always_inline void
  695. yuv2rgb_2_vsx_template(SwsContext *c, const int16_t *buf[2],
  696. const int16_t *ubuf[2], const int16_t *vbuf[2],
  697. const int16_t *abuf[2], uint8_t *dest, int dstW,
  698. int yalpha, int uvalpha, int y,
  699. enum AVPixelFormat target, int hasAlpha)
  700. {
  701. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  702. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  703. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
  704. *abuf0 = hasAlpha ? abuf[0] : NULL,
  705. *abuf1 = hasAlpha ? abuf[1] : NULL;
  706. const int16_t yalpha1 = 4096 - yalpha;
  707. const int16_t uvalpha1 = 4096 - uvalpha;
  708. vector int16_t vy, vu, vv, A = vec_splat_s16(0);
  709. vector int32_t vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
  710. vector int32_t R_l, R_r, G_l, G_r, B_l, B_r, vud32_l, vud32_r, vvd32_l, vvd32_r;
  711. vector int32_t tmp, tmp2, tmp3, tmp4, tmp5, tmp6;
  712. vector uint16_t rd16, gd16, bd16;
  713. vector uint8_t rd, bd, gd, ad, out0, out1, tmp8;
  714. const vector int16_t vyalpha1 = vec_splats(yalpha1);
  715. const vector int16_t vuvalpha1 = vec_splats(uvalpha1);
  716. const vector int16_t vyalpha = vec_splats((int16_t) yalpha);
  717. const vector int16_t vuvalpha = vec_splats((int16_t) uvalpha);
  718. const vector uint16_t zero16 = vec_splat_u16(0);
  719. const vector int32_t y_offset = vec_splats(c->yuv2rgb_y_offset);
  720. const vector int32_t y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  721. const vector int32_t y_add = vec_splats(1 << 21);
  722. const vector int32_t v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  723. const vector int32_t v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  724. const vector int32_t u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  725. const vector int32_t u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  726. const vector int32_t rgbclip = vec_splats(1 << 30);
  727. const vector int32_t zero32 = vec_splat_s32(0);
  728. const vector uint32_t shift19 = vec_splats(19U);
  729. const vector uint32_t shift22 = vec_splats(22U);
  730. const vector uint32_t shift10 = vec_splat_u32(10);
  731. const vector int32_t dec128 = vec_splats(128 << 19);
  732. const vector int32_t add18 = vec_splats(1 << 18);
  733. int i;
  734. // Various permutations
  735. const vector uint8_t doubleleft = (vector uint8_t) {0, 1, 2, 3,
  736. 0, 1, 2, 3,
  737. 4, 5, 6, 7,
  738. 4, 5, 6, 7 };
  739. const vector uint8_t doubleright = (vector uint8_t) {8, 9, 10, 11,
  740. 8, 9, 10, 11,
  741. 12, 13, 14, 15,
  742. 12, 13, 14, 15 };
  743. const vector uint8_t perm3rg0 = (vector uint8_t) {0x0, 0x10, 0,
  744. 0x1, 0x11, 0,
  745. 0x2, 0x12, 0,
  746. 0x3, 0x13, 0,
  747. 0x4, 0x14, 0,
  748. 0x5 };
  749. const vector uint8_t perm3rg1 = (vector uint8_t) { 0x15, 0,
  750. 0x6, 0x16, 0,
  751. 0x7, 0x17, 0 };
  752. const vector uint8_t perm3tb0 = (vector uint8_t) {0x0, 0x1, 0x10,
  753. 0x3, 0x4, 0x11,
  754. 0x6, 0x7, 0x12,
  755. 0x9, 0xa, 0x13,
  756. 0xc, 0xd, 0x14,
  757. 0xf };
  758. const vector uint8_t perm3tb1 = (vector uint8_t) { 0x0, 0x15,
  759. 0x2, 0x3, 0x16,
  760. 0x5, 0x6, 0x17 };
  761. av_assert2(yalpha <= 4096U);
  762. av_assert2(uvalpha <= 4096U);
  763. for (i = 0; i < (dstW + 1) >> 1; i += 8) {
  764. SETUP(vy, &buf0[i * 2], vyalpha1, &buf1[i * 2], vyalpha);
  765. vy32_l = vec_sra(tmp3, shift10);
  766. vy32_r = vec_sra(tmp4, shift10);
  767. SETUP(vu, &ubuf0[i], vuvalpha1, &ubuf1[i], vuvalpha);
  768. tmp3 = vec_sub(tmp3, dec128);
  769. tmp4 = vec_sub(tmp4, dec128);
  770. vu32_l = vec_sra(tmp3, shift10);
  771. vu32_r = vec_sra(tmp4, shift10);
  772. SETUP(vv, &vbuf0[i], vuvalpha1, &vbuf1[i], vuvalpha);
  773. tmp3 = vec_sub(tmp3, dec128);
  774. tmp4 = vec_sub(tmp4, dec128);
  775. vv32_l = vec_sra(tmp3, shift10);
  776. vv32_r = vec_sra(tmp4, shift10);
  777. if (hasAlpha) {
  778. SETUP(A, &abuf0[i], vyalpha1, &abuf1[i], vyalpha);
  779. tmp3 = vec_add(tmp3, add18);
  780. tmp4 = vec_add(tmp4, add18);
  781. tmp3 = vec_sra(tmp3, shift19);
  782. tmp4 = vec_sra(tmp4, shift19);
  783. A = vec_packs(tmp3, tmp4);
  784. ad = vec_packsu(A, (vector int16_t) zero16);
  785. } else {
  786. ad = vec_splats((uint8_t) 255);
  787. }
  788. vy32_l = vec_sub(vy32_l, y_offset);
  789. vy32_r = vec_sub(vy32_r, y_offset);
  790. vy32_l = vec_mul(vy32_l, y_coeff);
  791. vy32_r = vec_mul(vy32_r, y_coeff);
  792. vy32_l = vec_add(vy32_l, y_add);
  793. vy32_r = vec_add(vy32_r, y_add);
  794. // Use the first UV half
  795. vud32_l = vec_perm(vu32_l, vu32_l, doubleleft);
  796. vud32_r = vec_perm(vu32_l, vu32_l, doubleright);
  797. vvd32_l = vec_perm(vv32_l, vv32_l, doubleleft);
  798. vvd32_r = vec_perm(vv32_l, vv32_l, doubleright);
  799. R_l = vec_mul(vvd32_l, v2r_coeff);
  800. R_l = vec_add(R_l, vy32_l);
  801. R_r = vec_mul(vvd32_r, v2r_coeff);
  802. R_r = vec_add(R_r, vy32_r);
  803. G_l = vec_mul(vvd32_l, v2g_coeff);
  804. tmp32 = vec_mul(vud32_l, u2g_coeff);
  805. G_l = vec_add(G_l, vy32_l);
  806. G_l = vec_add(G_l, tmp32);
  807. G_r = vec_mul(vvd32_r, v2g_coeff);
  808. tmp32 = vec_mul(vud32_r, u2g_coeff);
  809. G_r = vec_add(G_r, vy32_r);
  810. G_r = vec_add(G_r, tmp32);
  811. B_l = vec_mul(vud32_l, u2b_coeff);
  812. B_l = vec_add(B_l, vy32_l);
  813. B_r = vec_mul(vud32_r, u2b_coeff);
  814. B_r = vec_add(B_r, vy32_r);
  815. WRITERGB
  816. // New Y for the second half
  817. SETUP(vy, &buf0[i * 2 + 8], vyalpha1, &buf1[i * 2 + 8], vyalpha);
  818. vy32_l = vec_sra(tmp3, shift10);
  819. vy32_r = vec_sra(tmp4, shift10);
  820. vy32_l = vec_sub(vy32_l, y_offset);
  821. vy32_r = vec_sub(vy32_r, y_offset);
  822. vy32_l = vec_mul(vy32_l, y_coeff);
  823. vy32_r = vec_mul(vy32_r, y_coeff);
  824. vy32_l = vec_add(vy32_l, y_add);
  825. vy32_r = vec_add(vy32_r, y_add);
  826. // Second UV half
  827. vud32_l = vec_perm(vu32_r, vu32_r, doubleleft);
  828. vud32_r = vec_perm(vu32_r, vu32_r, doubleright);
  829. vvd32_l = vec_perm(vv32_r, vv32_r, doubleleft);
  830. vvd32_r = vec_perm(vv32_r, vv32_r, doubleright);
  831. R_l = vec_mul(vvd32_l, v2r_coeff);
  832. R_l = vec_add(R_l, vy32_l);
  833. R_r = vec_mul(vvd32_r, v2r_coeff);
  834. R_r = vec_add(R_r, vy32_r);
  835. G_l = vec_mul(vvd32_l, v2g_coeff);
  836. tmp32 = vec_mul(vud32_l, u2g_coeff);
  837. G_l = vec_add(G_l, vy32_l);
  838. G_l = vec_add(G_l, tmp32);
  839. G_r = vec_mul(vvd32_r, v2g_coeff);
  840. tmp32 = vec_mul(vud32_r, u2g_coeff);
  841. G_r = vec_add(G_r, vy32_r);
  842. G_r = vec_add(G_r, tmp32);
  843. B_l = vec_mul(vud32_l, u2b_coeff);
  844. B_l = vec_add(B_l, vy32_l);
  845. B_r = vec_mul(vud32_r, u2b_coeff);
  846. B_r = vec_add(B_r, vy32_r);
  847. WRITERGB
  848. }
  849. }
  850. #undef SETUP
  851. static av_always_inline void
  852. yuv2rgb_full_1_vsx_template(SwsContext *c, const int16_t *buf0,
  853. const int16_t *ubuf[2], const int16_t *vbuf[2],
  854. const int16_t *abuf0, uint8_t *dest, int dstW,
  855. int uvalpha, int y, enum AVPixelFormat target,
  856. int hasAlpha)
  857. {
  858. const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
  859. const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
  860. vector int16_t vy, vu, vv, A = vec_splat_s16(0), tmp16;
  861. vector int32_t vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
  862. vector int32_t R_l, R_r, G_l, G_r, B_l, B_r;
  863. vector uint16_t rd16, gd16, bd16;
  864. vector uint8_t rd, bd, gd, ad, out0, out1, tmp8;
  865. const vector uint16_t zero16 = vec_splat_u16(0);
  866. const vector int32_t y_offset = vec_splats(c->yuv2rgb_y_offset);
  867. const vector int32_t y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  868. const vector int32_t y_add = vec_splats(1 << 21);
  869. const vector int32_t v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  870. const vector int32_t v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  871. const vector int32_t u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  872. const vector int32_t u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  873. const vector int32_t rgbclip = vec_splats(1 << 30);
  874. const vector int32_t zero32 = vec_splat_s32(0);
  875. const vector uint32_t shift2 = vec_splat_u32(2);
  876. const vector uint32_t shift22 = vec_splats(22U);
  877. const vector uint16_t sub7 = vec_splats((uint16_t) (128 << 7));
  878. const vector uint16_t sub8 = vec_splats((uint16_t) (128 << 8));
  879. const vector int16_t mul4 = vec_splat_s16(4);
  880. const vector int16_t mul8 = vec_splat_s16(8);
  881. const vector int16_t add64 = vec_splat_s16(64);
  882. const vector uint16_t shift7 = vec_splat_u16(7);
  883. const vector int16_t max255 = vec_splat_s16(255);
  884. int i;
  885. // Various permutations
  886. const vector uint8_t perm3rg0 = (vector uint8_t) {0x0, 0x10, 0,
  887. 0x1, 0x11, 0,
  888. 0x2, 0x12, 0,
  889. 0x3, 0x13, 0,
  890. 0x4, 0x14, 0,
  891. 0x5 };
  892. const vector uint8_t perm3rg1 = (vector uint8_t) { 0x15, 0,
  893. 0x6, 0x16, 0,
  894. 0x7, 0x17, 0 };
  895. const vector uint8_t perm3tb0 = (vector uint8_t) {0x0, 0x1, 0x10,
  896. 0x3, 0x4, 0x11,
  897. 0x6, 0x7, 0x12,
  898. 0x9, 0xa, 0x13,
  899. 0xc, 0xd, 0x14,
  900. 0xf };
  901. const vector uint8_t perm3tb1 = (vector uint8_t) { 0x0, 0x15,
  902. 0x2, 0x3, 0x16,
  903. 0x5, 0x6, 0x17 };
  904. for (i = 0; i < dstW; i += 8) { // The x86 asm also overwrites padding bytes.
  905. vy = vec_ld(0, &buf0[i]);
  906. vy32_l = vec_unpackh(vy);
  907. vy32_r = vec_unpackl(vy);
  908. vy32_l = vec_sl(vy32_l, shift2);
  909. vy32_r = vec_sl(vy32_r, shift2);
  910. vu = vec_ld(0, &ubuf0[i]);
  911. vv = vec_ld(0, &vbuf0[i]);
  912. if (uvalpha < 2048) {
  913. vu = (vector int16_t) vec_sub((vector uint16_t) vu, sub7);
  914. vv = (vector int16_t) vec_sub((vector uint16_t) vv, sub7);
  915. tmp32 = vec_mule(vu, mul4);
  916. tmp32_2 = vec_mulo(vu, mul4);
  917. vu32_l = vec_mergeh(tmp32, tmp32_2);
  918. vu32_r = vec_mergel(tmp32, tmp32_2);
  919. tmp32 = vec_mule(vv, mul4);
  920. tmp32_2 = vec_mulo(vv, mul4);
  921. vv32_l = vec_mergeh(tmp32, tmp32_2);
  922. vv32_r = vec_mergel(tmp32, tmp32_2);
  923. } else {
  924. tmp16 = vec_ld(0, &ubuf1[i]);
  925. vu = vec_add(vu, tmp16);
  926. vu = (vector int16_t) vec_sub((vector uint16_t) vu, sub8);
  927. tmp16 = vec_ld(0, &vbuf1[i]);
  928. vv = vec_add(vv, tmp16);
  929. vv = (vector int16_t) vec_sub((vector uint16_t) vv, sub8);
  930. vu32_l = vec_mule(vu, mul8);
  931. vu32_r = vec_mulo(vu, mul8);
  932. vv32_l = vec_mule(vv, mul8);
  933. vv32_r = vec_mulo(vv, mul8);
  934. }
  935. if (hasAlpha) {
  936. A = vec_ld(0, &abuf0[i]);
  937. A = vec_add(A, add64);
  938. A = vec_sr(A, shift7);
  939. A = vec_max(A, max255);
  940. ad = vec_packsu(A, (vector int16_t) zero16);
  941. } else {
  942. ad = vec_splats((uint8_t) 255);
  943. }
  944. vy32_l = vec_sub(vy32_l, y_offset);
  945. vy32_r = vec_sub(vy32_r, y_offset);
  946. vy32_l = vec_mul(vy32_l, y_coeff);
  947. vy32_r = vec_mul(vy32_r, y_coeff);
  948. vy32_l = vec_add(vy32_l, y_add);
  949. vy32_r = vec_add(vy32_r, y_add);
  950. R_l = vec_mul(vv32_l, v2r_coeff);
  951. R_l = vec_add(R_l, vy32_l);
  952. R_r = vec_mul(vv32_r, v2r_coeff);
  953. R_r = vec_add(R_r, vy32_r);
  954. G_l = vec_mul(vv32_l, v2g_coeff);
  955. tmp32 = vec_mul(vu32_l, u2g_coeff);
  956. G_l = vec_add(G_l, vy32_l);
  957. G_l = vec_add(G_l, tmp32);
  958. G_r = vec_mul(vv32_r, v2g_coeff);
  959. tmp32 = vec_mul(vu32_r, u2g_coeff);
  960. G_r = vec_add(G_r, vy32_r);
  961. G_r = vec_add(G_r, tmp32);
  962. B_l = vec_mul(vu32_l, u2b_coeff);
  963. B_l = vec_add(B_l, vy32_l);
  964. B_r = vec_mul(vu32_r, u2b_coeff);
  965. B_r = vec_add(B_r, vy32_r);
  966. WRITERGB
  967. }
  968. }
  969. static av_always_inline void
  970. yuv2rgb_1_vsx_template(SwsContext *c, const int16_t *buf0,
  971. const int16_t *ubuf[2], const int16_t *vbuf[2],
  972. const int16_t *abuf0, uint8_t *dest, int dstW,
  973. int uvalpha, int y, enum AVPixelFormat target,
  974. int hasAlpha)
  975. {
  976. const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
  977. const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
  978. vector int16_t vy, vu, vv, A = vec_splat_s16(0), tmp16;
  979. vector int32_t vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
  980. vector int32_t vud32_l, vud32_r, vvd32_l, vvd32_r;
  981. vector int32_t R_l, R_r, G_l, G_r, B_l, B_r;
  982. vector uint16_t rd16, gd16, bd16;
  983. vector uint8_t rd, bd, gd, ad, out0, out1, tmp8;
  984. const vector uint16_t zero16 = vec_splat_u16(0);
  985. const vector int32_t y_offset = vec_splats(c->yuv2rgb_y_offset);
  986. const vector int32_t y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  987. const vector int32_t y_add = vec_splats(1 << 21);
  988. const vector int32_t v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  989. const vector int32_t v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  990. const vector int32_t u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  991. const vector int32_t u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  992. const vector int32_t rgbclip = vec_splats(1 << 30);
  993. const vector int32_t zero32 = vec_splat_s32(0);
  994. const vector uint32_t shift2 = vec_splat_u32(2);
  995. const vector uint32_t shift22 = vec_splats(22U);
  996. const vector uint16_t sub7 = vec_splats((uint16_t) (128 << 7));
  997. const vector uint16_t sub8 = vec_splats((uint16_t) (128 << 8));
  998. const vector int16_t mul4 = vec_splat_s16(4);
  999. const vector int16_t mul8 = vec_splat_s16(8);
  1000. const vector int16_t add64 = vec_splat_s16(64);
  1001. const vector uint16_t shift7 = vec_splat_u16(7);
  1002. const vector int16_t max255 = vec_splat_s16(255);
  1003. int i;
  1004. // Various permutations
  1005. const vector uint8_t doubleleft = (vector uint8_t) {0, 1, 2, 3,
  1006. 0, 1, 2, 3,
  1007. 4, 5, 6, 7,
  1008. 4, 5, 6, 7 };
  1009. const vector uint8_t doubleright = (vector uint8_t) {8, 9, 10, 11,
  1010. 8, 9, 10, 11,
  1011. 12, 13, 14, 15,
  1012. 12, 13, 14, 15 };
  1013. const vector uint8_t perm3rg0 = (vector uint8_t) {0x0, 0x10, 0,
  1014. 0x1, 0x11, 0,
  1015. 0x2, 0x12, 0,
  1016. 0x3, 0x13, 0,
  1017. 0x4, 0x14, 0,
  1018. 0x5 };
  1019. const vector uint8_t perm3rg1 = (vector uint8_t) { 0x15, 0,
  1020. 0x6, 0x16, 0,
  1021. 0x7, 0x17, 0 };
  1022. const vector uint8_t perm3tb0 = (vector uint8_t) {0x0, 0x1, 0x10,
  1023. 0x3, 0x4, 0x11,
  1024. 0x6, 0x7, 0x12,
  1025. 0x9, 0xa, 0x13,
  1026. 0xc, 0xd, 0x14,
  1027. 0xf };
  1028. const vector uint8_t perm3tb1 = (vector uint8_t) { 0x0, 0x15,
  1029. 0x2, 0x3, 0x16,
  1030. 0x5, 0x6, 0x17 };
  1031. for (i = 0; i < (dstW + 1) >> 1; i += 8) { // The x86 asm also overwrites padding bytes.
  1032. vy = vec_ld(0, &buf0[i * 2]);
  1033. vy32_l = vec_unpackh(vy);
  1034. vy32_r = vec_unpackl(vy);
  1035. vy32_l = vec_sl(vy32_l, shift2);
  1036. vy32_r = vec_sl(vy32_r, shift2);
  1037. vu = vec_ld(0, &ubuf0[i]);
  1038. vv = vec_ld(0, &vbuf0[i]);
  1039. if (uvalpha < 2048) {
  1040. vu = (vector int16_t) vec_sub((vector uint16_t) vu, sub7);
  1041. vv = (vector int16_t) vec_sub((vector uint16_t) vv, sub7);
  1042. tmp32 = vec_mule(vu, mul4);
  1043. tmp32_2 = vec_mulo(vu, mul4);
  1044. vu32_l = vec_mergeh(tmp32, tmp32_2);
  1045. vu32_r = vec_mergel(tmp32, tmp32_2);
  1046. tmp32 = vec_mule(vv, mul4);
  1047. tmp32_2 = vec_mulo(vv, mul4);
  1048. vv32_l = vec_mergeh(tmp32, tmp32_2);
  1049. vv32_r = vec_mergel(tmp32, tmp32_2);
  1050. } else {
  1051. tmp16 = vec_ld(0, &ubuf1[i]);
  1052. vu = vec_add(vu, tmp16);
  1053. vu = (vector int16_t) vec_sub((vector uint16_t) vu, sub8);
  1054. tmp16 = vec_ld(0, &vbuf1[i]);
  1055. vv = vec_add(vv, tmp16);
  1056. vv = (vector int16_t) vec_sub((vector uint16_t) vv, sub8);
  1057. vu32_l = vec_mule(vu, mul8);
  1058. vu32_r = vec_mulo(vu, mul8);
  1059. vv32_l = vec_mule(vv, mul8);
  1060. vv32_r = vec_mulo(vv, mul8);
  1061. }
  1062. if (hasAlpha) {
  1063. A = vec_ld(0, &abuf0[i]);
  1064. A = vec_add(A, add64);
  1065. A = vec_sr(A, shift7);
  1066. A = vec_max(A, max255);
  1067. ad = vec_packsu(A, (vector int16_t) zero16);
  1068. } else {
  1069. ad = vec_splats((uint8_t) 255);
  1070. }
  1071. vy32_l = vec_sub(vy32_l, y_offset);
  1072. vy32_r = vec_sub(vy32_r, y_offset);
  1073. vy32_l = vec_mul(vy32_l, y_coeff);
  1074. vy32_r = vec_mul(vy32_r, y_coeff);
  1075. vy32_l = vec_add(vy32_l, y_add);
  1076. vy32_r = vec_add(vy32_r, y_add);
  1077. // Use the first UV half
  1078. vud32_l = vec_perm(vu32_l, vu32_l, doubleleft);
  1079. vud32_r = vec_perm(vu32_l, vu32_l, doubleright);
  1080. vvd32_l = vec_perm(vv32_l, vv32_l, doubleleft);
  1081. vvd32_r = vec_perm(vv32_l, vv32_l, doubleright);
  1082. R_l = vec_mul(vvd32_l, v2r_coeff);
  1083. R_l = vec_add(R_l, vy32_l);
  1084. R_r = vec_mul(vvd32_r, v2r_coeff);
  1085. R_r = vec_add(R_r, vy32_r);
  1086. G_l = vec_mul(vvd32_l, v2g_coeff);
  1087. tmp32 = vec_mul(vud32_l, u2g_coeff);
  1088. G_l = vec_add(G_l, vy32_l);
  1089. G_l = vec_add(G_l, tmp32);
  1090. G_r = vec_mul(vvd32_r, v2g_coeff);
  1091. tmp32 = vec_mul(vud32_r, u2g_coeff);
  1092. G_r = vec_add(G_r, vy32_r);
  1093. G_r = vec_add(G_r, tmp32);
  1094. B_l = vec_mul(vud32_l, u2b_coeff);
  1095. B_l = vec_add(B_l, vy32_l);
  1096. B_r = vec_mul(vud32_r, u2b_coeff);
  1097. B_r = vec_add(B_r, vy32_r);
  1098. WRITERGB
  1099. // New Y for the second half
  1100. vy = vec_ld(16, &buf0[i * 2]);
  1101. vy32_l = vec_unpackh(vy);
  1102. vy32_r = vec_unpackl(vy);
  1103. vy32_l = vec_sl(vy32_l, shift2);
  1104. vy32_r = vec_sl(vy32_r, shift2);
  1105. vy32_l = vec_sub(vy32_l, y_offset);
  1106. vy32_r = vec_sub(vy32_r, y_offset);
  1107. vy32_l = vec_mul(vy32_l, y_coeff);
  1108. vy32_r = vec_mul(vy32_r, y_coeff);
  1109. vy32_l = vec_add(vy32_l, y_add);
  1110. vy32_r = vec_add(vy32_r, y_add);
  1111. // Second UV half
  1112. vud32_l = vec_perm(vu32_r, vu32_r, doubleleft);
  1113. vud32_r = vec_perm(vu32_r, vu32_r, doubleright);
  1114. vvd32_l = vec_perm(vv32_r, vv32_r, doubleleft);
  1115. vvd32_r = vec_perm(vv32_r, vv32_r, doubleright);
  1116. R_l = vec_mul(vvd32_l, v2r_coeff);
  1117. R_l = vec_add(R_l, vy32_l);
  1118. R_r = vec_mul(vvd32_r, v2r_coeff);
  1119. R_r = vec_add(R_r, vy32_r);
  1120. G_l = vec_mul(vvd32_l, v2g_coeff);
  1121. tmp32 = vec_mul(vud32_l, u2g_coeff);
  1122. G_l = vec_add(G_l, vy32_l);
  1123. G_l = vec_add(G_l, tmp32);
  1124. G_r = vec_mul(vvd32_r, v2g_coeff);
  1125. tmp32 = vec_mul(vud32_r, u2g_coeff);
  1126. G_r = vec_add(G_r, vy32_r);
  1127. G_r = vec_add(G_r, tmp32);
  1128. B_l = vec_mul(vud32_l, u2b_coeff);
  1129. B_l = vec_add(B_l, vy32_l);
  1130. B_r = vec_mul(vud32_r, u2b_coeff);
  1131. B_r = vec_add(B_r, vy32_r);
  1132. WRITERGB
  1133. }
  1134. }
  1135. #undef WRITERGB
  1136. #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
  1137. static void name ## ext ## _X_vsx(SwsContext *c, const int16_t *lumFilter, \
  1138. const int16_t **lumSrc, int lumFilterSize, \
  1139. const int16_t *chrFilter, const int16_t **chrUSrc, \
  1140. const int16_t **chrVSrc, int chrFilterSize, \
  1141. const int16_t **alpSrc, uint8_t *dest, int dstW, \
  1142. int y) \
  1143. { \
  1144. name ## base ## _X_vsx_template(c, lumFilter, lumSrc, lumFilterSize, \
  1145. chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
  1146. alpSrc, dest, dstW, y, fmt, hasAlpha); \
  1147. }
  1148. #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
  1149. static void name ## ext ## _2_vsx(SwsContext *c, const int16_t *buf[2], \
  1150. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1151. const int16_t *abuf[2], uint8_t *dest, int dstW, \
  1152. int yalpha, int uvalpha, int y) \
  1153. { \
  1154. name ## base ## _2_vsx_template(c, buf, ubuf, vbuf, abuf, \
  1155. dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
  1156. }
  1157. #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
  1158. static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
  1159. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1160. const int16_t *abuf0, uint8_t *dest, int dstW, \
  1161. int uvalpha, int y) \
  1162. { \
  1163. name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, abuf0, dest, \
  1164. dstW, uvalpha, y, fmt, hasAlpha); \
  1165. }
  1166. YUV2RGBWRAPPER(yuv2, rgb, bgrx32, AV_PIX_FMT_BGRA, 0)
  1167. YUV2RGBWRAPPER(yuv2, rgb, rgbx32, AV_PIX_FMT_RGBA, 0)
  1168. YUV2RGBWRAPPER(yuv2, rgb, xrgb32, AV_PIX_FMT_ARGB, 0)
  1169. YUV2RGBWRAPPER(yuv2, rgb, xbgr32, AV_PIX_FMT_ABGR, 0)
  1170. YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
  1171. YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
  1172. YUV2RGBWRAPPERX2(yuv2, rgb, bgrx32, AV_PIX_FMT_BGRA, 0)
  1173. YUV2RGBWRAPPERX2(yuv2, rgb, rgbx32, AV_PIX_FMT_RGBA, 0)
  1174. YUV2RGBWRAPPERX2(yuv2, rgb, xrgb32, AV_PIX_FMT_ARGB, 0)
  1175. YUV2RGBWRAPPERX2(yuv2, rgb, xbgr32, AV_PIX_FMT_ABGR, 0)
  1176. YUV2RGBWRAPPERX2(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
  1177. YUV2RGBWRAPPERX2(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
  1178. YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
  1179. YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
  1180. YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
  1181. YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
  1182. YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
  1183. YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
  1184. YUV2RGBWRAPPERX2(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
  1185. YUV2RGBWRAPPERX2(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
  1186. YUV2RGBWRAPPERX2(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
  1187. YUV2RGBWRAPPERX2(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
  1188. YUV2RGBWRAPPERX2(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
  1189. YUV2RGBWRAPPERX2(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
  1190. YUV2RGBWRAPPERX(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
  1191. YUV2RGBWRAPPERX(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
  1192. YUV2RGBWRAPPERX(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
  1193. YUV2RGBWRAPPERX(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
  1194. YUV2RGBWRAPPERX(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
  1195. YUV2RGBWRAPPERX(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
  1196. static av_always_inline void
  1197. write422(const vector int16_t vy1, const vector int16_t vy2,
  1198. const vector int16_t vu, const vector int16_t vv,
  1199. uint8_t *dest, const enum AVPixelFormat target)
  1200. {
  1201. vector uint8_t vd1, vd2, tmp;
  1202. const vector uint8_t yuyv1 = (vector uint8_t) {
  1203. 0x0, 0x10, 0x1, 0x18,
  1204. 0x2, 0x11, 0x3, 0x19,
  1205. 0x4, 0x12, 0x5, 0x1a,
  1206. 0x6, 0x13, 0x7, 0x1b };
  1207. const vector uint8_t yuyv2 = (vector uint8_t) {
  1208. 0x8, 0x14, 0x9, 0x1c,
  1209. 0xa, 0x15, 0xb, 0x1d,
  1210. 0xc, 0x16, 0xd, 0x1e,
  1211. 0xe, 0x17, 0xf, 0x1f };
  1212. const vector uint8_t yvyu1 = (vector uint8_t) {
  1213. 0x0, 0x18, 0x1, 0x10,
  1214. 0x2, 0x19, 0x3, 0x11,
  1215. 0x4, 0x1a, 0x5, 0x12,
  1216. 0x6, 0x1b, 0x7, 0x13 };
  1217. const vector uint8_t yvyu2 = (vector uint8_t) {
  1218. 0x8, 0x1c, 0x9, 0x14,
  1219. 0xa, 0x1d, 0xb, 0x15,
  1220. 0xc, 0x1e, 0xd, 0x16,
  1221. 0xe, 0x1f, 0xf, 0x17 };
  1222. const vector uint8_t uyvy1 = (vector uint8_t) {
  1223. 0x10, 0x0, 0x18, 0x1,
  1224. 0x11, 0x2, 0x19, 0x3,
  1225. 0x12, 0x4, 0x1a, 0x5,
  1226. 0x13, 0x6, 0x1b, 0x7 };
  1227. const vector uint8_t uyvy2 = (vector uint8_t) {
  1228. 0x14, 0x8, 0x1c, 0x9,
  1229. 0x15, 0xa, 0x1d, 0xb,
  1230. 0x16, 0xc, 0x1e, 0xd,
  1231. 0x17, 0xe, 0x1f, 0xf };
  1232. vd1 = vec_packsu(vy1, vy2);
  1233. vd2 = vec_packsu(vu, vv);
  1234. switch (target) {
  1235. case AV_PIX_FMT_YUYV422:
  1236. tmp = vec_perm(vd1, vd2, yuyv1);
  1237. vec_st(tmp, 0, dest);
  1238. tmp = vec_perm(vd1, vd2, yuyv2);
  1239. vec_st(tmp, 16, dest);
  1240. break;
  1241. case AV_PIX_FMT_YVYU422:
  1242. tmp = vec_perm(vd1, vd2, yvyu1);
  1243. vec_st(tmp, 0, dest);
  1244. tmp = vec_perm(vd1, vd2, yvyu2);
  1245. vec_st(tmp, 16, dest);
  1246. break;
  1247. case AV_PIX_FMT_UYVY422:
  1248. tmp = vec_perm(vd1, vd2, uyvy1);
  1249. vec_st(tmp, 0, dest);
  1250. tmp = vec_perm(vd1, vd2, uyvy2);
  1251. vec_st(tmp, 16, dest);
  1252. break;
  1253. }
  1254. }
  1255. static av_always_inline void
  1256. yuv2422_X_vsx_template(SwsContext *c, const int16_t *lumFilter,
  1257. const int16_t **lumSrc, int lumFilterSize,
  1258. const int16_t *chrFilter, const int16_t **chrUSrc,
  1259. const int16_t **chrVSrc, int chrFilterSize,
  1260. const int16_t **alpSrc, uint8_t *dest, int dstW,
  1261. int y, enum AVPixelFormat target)
  1262. {
  1263. int i, j;
  1264. vector int16_t vy1, vy2, vu, vv;
  1265. vector int32_t vy32[4], vu32[2], vv32[2], tmp, tmp2, tmp3, tmp4;
  1266. vector int16_t vlumFilter[MAX_FILTER_SIZE], vchrFilter[MAX_FILTER_SIZE];
  1267. const vector int32_t start = vec_splats(1 << 18);
  1268. const vector uint32_t shift19 = vec_splats(19U);
  1269. for (i = 0; i < lumFilterSize; i++)
  1270. vlumFilter[i] = vec_splats(lumFilter[i]);
  1271. for (i = 0; i < chrFilterSize; i++)
  1272. vchrFilter[i] = vec_splats(chrFilter[i]);
  1273. for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
  1274. vy32[0] =
  1275. vy32[1] =
  1276. vy32[2] =
  1277. vy32[3] =
  1278. vu32[0] =
  1279. vu32[1] =
  1280. vv32[0] =
  1281. vv32[1] = start;
  1282. for (j = 0; j < lumFilterSize; j++) {
  1283. vv = vec_ld(0, &lumSrc[j][i * 2]);
  1284. tmp = vec_mule(vv, vlumFilter[j]);
  1285. tmp2 = vec_mulo(vv, vlumFilter[j]);
  1286. tmp3 = vec_mergeh(tmp, tmp2);
  1287. tmp4 = vec_mergel(tmp, tmp2);
  1288. vy32[0] = vec_adds(vy32[0], tmp3);
  1289. vy32[1] = vec_adds(vy32[1], tmp4);
  1290. vv = vec_ld(0, &lumSrc[j][(i + 4) * 2]);
  1291. tmp = vec_mule(vv, vlumFilter[j]);
  1292. tmp2 = vec_mulo(vv, vlumFilter[j]);
  1293. tmp3 = vec_mergeh(tmp, tmp2);
  1294. tmp4 = vec_mergel(tmp, tmp2);
  1295. vy32[2] = vec_adds(vy32[2], tmp3);
  1296. vy32[3] = vec_adds(vy32[3], tmp4);
  1297. }
  1298. for (j = 0; j < chrFilterSize; j++) {
  1299. vv = vec_ld(0, &chrUSrc[j][i]);
  1300. tmp = vec_mule(vv, vchrFilter[j]);
  1301. tmp2 = vec_mulo(vv, vchrFilter[j]);
  1302. tmp3 = vec_mergeh(tmp, tmp2);
  1303. tmp4 = vec_mergel(tmp, tmp2);
  1304. vu32[0] = vec_adds(vu32[0], tmp3);
  1305. vu32[1] = vec_adds(vu32[1], tmp4);
  1306. vv = vec_ld(0, &chrVSrc[j][i]);
  1307. tmp = vec_mule(vv, vchrFilter[j]);
  1308. tmp2 = vec_mulo(vv, vchrFilter[j]);
  1309. tmp3 = vec_mergeh(tmp, tmp2);
  1310. tmp4 = vec_mergel(tmp, tmp2);
  1311. vv32[0] = vec_adds(vv32[0], tmp3);
  1312. vv32[1] = vec_adds(vv32[1], tmp4);
  1313. }
  1314. for (j = 0; j < 4; j++) {
  1315. vy32[j] = vec_sra(vy32[j], shift19);
  1316. }
  1317. for (j = 0; j < 2; j++) {
  1318. vu32[j] = vec_sra(vu32[j], shift19);
  1319. vv32[j] = vec_sra(vv32[j], shift19);
  1320. }
  1321. vy1 = vec_packs(vy32[0], vy32[1]);
  1322. vy2 = vec_packs(vy32[2], vy32[3]);
  1323. vu = vec_packs(vu32[0], vu32[1]);
  1324. vv = vec_packs(vv32[0], vv32[1]);
  1325. write422(vy1, vy2, vu, vv, &dest[i * 4], target);
  1326. }
  1327. }
  1328. #define SETUP(x, buf0, buf1, alpha) { \
  1329. x = vec_ld(0, buf0); \
  1330. tmp = vec_mule(x, alpha); \
  1331. tmp2 = vec_mulo(x, alpha); \
  1332. tmp3 = vec_mergeh(tmp, tmp2); \
  1333. tmp4 = vec_mergel(tmp, tmp2); \
  1334. \
  1335. x = vec_ld(0, buf1); \
  1336. tmp = vec_mule(x, alpha); \
  1337. tmp2 = vec_mulo(x, alpha); \
  1338. tmp5 = vec_mergeh(tmp, tmp2); \
  1339. tmp6 = vec_mergel(tmp, tmp2); \
  1340. \
  1341. tmp3 = vec_add(tmp3, tmp5); \
  1342. tmp4 = vec_add(tmp4, tmp6); \
  1343. \
  1344. tmp3 = vec_sra(tmp3, shift19); \
  1345. tmp4 = vec_sra(tmp4, shift19); \
  1346. x = vec_packs(tmp3, tmp4); \
  1347. }
  1348. static av_always_inline void
  1349. yuv2422_2_vsx_template(SwsContext *c, const int16_t *buf[2],
  1350. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1351. const int16_t *abuf[2], uint8_t *dest, int dstW,
  1352. int yalpha, int uvalpha, int y,
  1353. enum AVPixelFormat target)
  1354. {
  1355. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  1356. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  1357. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
  1358. const int16_t yalpha1 = 4096 - yalpha;
  1359. const int16_t uvalpha1 = 4096 - uvalpha;
  1360. vector int16_t vy1, vy2, vu, vv;
  1361. vector int32_t tmp, tmp2, tmp3, tmp4, tmp5, tmp6;
  1362. const vector int16_t vyalpha1 = vec_splats(yalpha1);
  1363. const vector int16_t vuvalpha1 = vec_splats(uvalpha1);
  1364. const vector uint32_t shift19 = vec_splats(19U);
  1365. int i;
  1366. av_assert2(yalpha <= 4096U);
  1367. av_assert2(uvalpha <= 4096U);
  1368. for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
  1369. SETUP(vy1, &buf0[i * 2], &buf1[i * 2], vyalpha1)
  1370. SETUP(vy2, &buf0[(i + 4) * 2], &buf1[(i + 4) * 2], vyalpha1)
  1371. SETUP(vu, &ubuf0[i], &ubuf1[i], vuvalpha1)
  1372. SETUP(vv, &vbuf0[i], &vbuf1[i], vuvalpha1)
  1373. write422(vy1, vy2, vu, vv, &dest[i * 4], target);
  1374. }
  1375. }
  1376. #undef SETUP
  1377. static av_always_inline void
  1378. yuv2422_1_vsx_template(SwsContext *c, const int16_t *buf0,
  1379. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1380. const int16_t *abuf0, uint8_t *dest, int dstW,
  1381. int uvalpha, int y, enum AVPixelFormat target)
  1382. {
  1383. const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
  1384. vector int16_t vy1, vy2, vu, vv, tmp;
  1385. const vector int16_t add64 = vec_splats((int16_t) 64);
  1386. const vector int16_t add128 = vec_splats((int16_t) 128);
  1387. const vector uint16_t shift7 = vec_splat_u16(7);
  1388. const vector uint16_t shift8 = vec_splat_u16(8);
  1389. int i;
  1390. if (uvalpha < 2048) {
  1391. for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
  1392. vy1 = vec_ld(0, &buf0[i * 2]);
  1393. vy2 = vec_ld(0, &buf0[(i + 4) * 2]);
  1394. vu = vec_ld(0, &ubuf0[i]);
  1395. vv = vec_ld(0, &vbuf0[i]);
  1396. vy1 = vec_add(vy1, add64);
  1397. vy2 = vec_add(vy2, add64);
  1398. vu = vec_add(vu, add64);
  1399. vv = vec_add(vv, add64);
  1400. vy1 = vec_sra(vy1, shift7);
  1401. vy2 = vec_sra(vy2, shift7);
  1402. vu = vec_sra(vu, shift7);
  1403. vv = vec_sra(vv, shift7);
  1404. write422(vy1, vy2, vu, vv, &dest[i * 4], target);
  1405. }
  1406. } else {
  1407. const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
  1408. for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
  1409. vy1 = vec_ld(0, &buf0[i * 2]);
  1410. vy2 = vec_ld(0, &buf0[(i + 4) * 2]);
  1411. vu = vec_ld(0, &ubuf0[i]);
  1412. tmp = vec_ld(0, &ubuf1[i]);
  1413. vu = vec_adds(vu, tmp);
  1414. vv = vec_ld(0, &vbuf0[i]);
  1415. tmp = vec_ld(0, &vbuf1[i]);
  1416. vv = vec_adds(vv, tmp);
  1417. vy1 = vec_add(vy1, add64);
  1418. vy2 = vec_add(vy2, add64);
  1419. vu = vec_adds(vu, add128);
  1420. vv = vec_adds(vv, add128);
  1421. vy1 = vec_sra(vy1, shift7);
  1422. vy2 = vec_sra(vy2, shift7);
  1423. vu = vec_sra(vu, shift8);
  1424. vv = vec_sra(vv, shift8);
  1425. write422(vy1, vy2, vu, vv, &dest[i * 4], target);
  1426. }
  1427. }
  1428. }
  1429. #define YUV2PACKEDWRAPPERX(name, base, ext, fmt) \
  1430. static void name ## ext ## _X_vsx(SwsContext *c, const int16_t *lumFilter, \
  1431. const int16_t **lumSrc, int lumFilterSize, \
  1432. const int16_t *chrFilter, const int16_t **chrUSrc, \
  1433. const int16_t **chrVSrc, int chrFilterSize, \
  1434. const int16_t **alpSrc, uint8_t *dest, int dstW, \
  1435. int y) \
  1436. { \
  1437. name ## base ## _X_vsx_template(c, lumFilter, lumSrc, lumFilterSize, \
  1438. chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
  1439. alpSrc, dest, dstW, y, fmt); \
  1440. }
  1441. #define YUV2PACKEDWRAPPER2(name, base, ext, fmt) \
  1442. YUV2PACKEDWRAPPERX(name, base, ext, fmt) \
  1443. static void name ## ext ## _2_vsx(SwsContext *c, const int16_t *buf[2], \
  1444. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1445. const int16_t *abuf[2], uint8_t *dest, int dstW, \
  1446. int yalpha, int uvalpha, int y) \
  1447. { \
  1448. name ## base ## _2_vsx_template(c, buf, ubuf, vbuf, abuf, \
  1449. dest, dstW, yalpha, uvalpha, y, fmt); \
  1450. }
  1451. #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
  1452. YUV2PACKEDWRAPPER2(name, base, ext, fmt) \
  1453. static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
  1454. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1455. const int16_t *abuf0, uint8_t *dest, int dstW, \
  1456. int uvalpha, int y) \
  1457. { \
  1458. name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, \
  1459. abuf0, dest, dstW, uvalpha, \
  1460. y, fmt); \
  1461. }
  1462. YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
  1463. YUV2PACKEDWRAPPER(yuv2, 422, yvyu422, AV_PIX_FMT_YVYU422)
  1464. YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
  1465. static void hyscale_fast_vsx(SwsContext *c, int16_t *dst, int dstWidth,
  1466. const uint8_t *src, int srcW, int xInc)
  1467. {
  1468. int i;
  1469. unsigned int xpos = 0, xx;
  1470. vector uint8_t vin, vin2, vperm;
  1471. vector int8_t vmul, valpha;
  1472. vector int16_t vtmp, vtmp2, vtmp3, vtmp4;
  1473. vector uint16_t vd_l, vd_r, vcoord16[2];
  1474. vector uint32_t vcoord[4];
  1475. const vector uint32_t vadd = (vector uint32_t) {
  1476. 0,
  1477. xInc * 1,
  1478. xInc * 2,
  1479. xInc * 3,
  1480. };
  1481. const vector uint16_t vadd16 = (vector uint16_t) { // Modulo math
  1482. 0,
  1483. xInc * 1,
  1484. xInc * 2,
  1485. xInc * 3,
  1486. xInc * 4,
  1487. xInc * 5,
  1488. xInc * 6,
  1489. xInc * 7,
  1490. };
  1491. const vector uint32_t vshift16 = vec_splats((uint32_t) 16);
  1492. const vector uint16_t vshift9 = vec_splat_u16(9);
  1493. const vector uint8_t vzero = vec_splat_u8(0);
  1494. const vector uint16_t vshift = vec_splat_u16(7);
  1495. for (i = 0; i < dstWidth; i += 16) {
  1496. vcoord16[0] = vec_splats((uint16_t) xpos);
  1497. vcoord16[1] = vec_splats((uint16_t) (xpos + xInc * 8));
  1498. vcoord16[0] = vec_add(vcoord16[0], vadd16);
  1499. vcoord16[1] = vec_add(vcoord16[1], vadd16);
  1500. vcoord16[0] = vec_sr(vcoord16[0], vshift9);
  1501. vcoord16[1] = vec_sr(vcoord16[1], vshift9);
  1502. valpha = (vector int8_t) vec_pack(vcoord16[0], vcoord16[1]);
  1503. xx = xpos >> 16;
  1504. vin = vec_vsx_ld(0, &src[xx]);
  1505. vcoord[0] = vec_splats(xpos & 0xffff);
  1506. vcoord[1] = vec_splats((xpos & 0xffff) + xInc * 4);
  1507. vcoord[2] = vec_splats((xpos & 0xffff) + xInc * 8);
  1508. vcoord[3] = vec_splats((xpos & 0xffff) + xInc * 12);
  1509. vcoord[0] = vec_add(vcoord[0], vadd);
  1510. vcoord[1] = vec_add(vcoord[1], vadd);
  1511. vcoord[2] = vec_add(vcoord[2], vadd);
  1512. vcoord[3] = vec_add(vcoord[3], vadd);
  1513. vcoord[0] = vec_sr(vcoord[0], vshift16);
  1514. vcoord[1] = vec_sr(vcoord[1], vshift16);
  1515. vcoord[2] = vec_sr(vcoord[2], vshift16);
  1516. vcoord[3] = vec_sr(vcoord[3], vshift16);
  1517. vcoord16[0] = vec_pack(vcoord[0], vcoord[1]);
  1518. vcoord16[1] = vec_pack(vcoord[2], vcoord[3]);
  1519. vperm = vec_pack(vcoord16[0], vcoord16[1]);
  1520. vin = vec_perm(vin, vin, vperm);
  1521. vin2 = vec_vsx_ld(1, &src[xx]);
  1522. vin2 = vec_perm(vin2, vin2, vperm);
  1523. vmul = (vector int8_t) vec_sub(vin2, vin);
  1524. vtmp = vec_mule(vmul, valpha);
  1525. vtmp2 = vec_mulo(vmul, valpha);
  1526. vtmp3 = vec_mergeh(vtmp, vtmp2);
  1527. vtmp4 = vec_mergel(vtmp, vtmp2);
  1528. vd_l = (vector uint16_t) vec_mergeh(vin, vzero);
  1529. vd_r = (vector uint16_t) vec_mergel(vin, vzero);
  1530. vd_l = vec_sl(vd_l, vshift);
  1531. vd_r = vec_sl(vd_r, vshift);
  1532. vd_l = vec_add(vd_l, (vector uint16_t) vtmp3);
  1533. vd_r = vec_add(vd_r, (vector uint16_t) vtmp4);
  1534. vec_st((vector int16_t) vd_l, 0, &dst[i]);
  1535. vec_st((vector int16_t) vd_r, 0, &dst[i + 8]);
  1536. xpos += xInc * 16;
  1537. }
  1538. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  1539. dst[i] = src[srcW-1]*128;
  1540. }
  1541. #define HCSCALE(in, out) \
  1542. vin = vec_vsx_ld(0, &in[xx]); \
  1543. vin = vec_perm(vin, vin, vperm); \
  1544. \
  1545. vin2 = vec_vsx_ld(1, &in[xx]); \
  1546. vin2 = vec_perm(vin2, vin2, vperm); \
  1547. \
  1548. vtmp = vec_mule(vin, valphaxor); \
  1549. vtmp2 = vec_mulo(vin, valphaxor); \
  1550. vtmp3 = vec_mergeh(vtmp, vtmp2); \
  1551. vtmp4 = vec_mergel(vtmp, vtmp2); \
  1552. \
  1553. vtmp = vec_mule(vin2, valpha); \
  1554. vtmp2 = vec_mulo(vin2, valpha); \
  1555. vd_l = vec_mergeh(vtmp, vtmp2); \
  1556. vd_r = vec_mergel(vtmp, vtmp2); \
  1557. \
  1558. vd_l = vec_add(vd_l, vtmp3); \
  1559. vd_r = vec_add(vd_r, vtmp4); \
  1560. \
  1561. vec_st((vector int16_t) vd_l, 0, &out[i]); \
  1562. vec_st((vector int16_t) vd_r, 0, &out[i + 8])
  1563. static void hcscale_fast_vsx(SwsContext *c, int16_t *dst1, int16_t *dst2,
  1564. int dstWidth, const uint8_t *src1,
  1565. const uint8_t *src2, int srcW, int xInc)
  1566. {
  1567. int i;
  1568. unsigned int xpos = 0, xx;
  1569. vector uint8_t vin, vin2, vperm;
  1570. vector uint8_t valpha, valphaxor;
  1571. vector uint16_t vtmp, vtmp2, vtmp3, vtmp4;
  1572. vector uint16_t vd_l, vd_r, vcoord16[2];
  1573. vector uint32_t vcoord[4];
  1574. const vector uint8_t vxor = vec_splats((uint8_t) 127);
  1575. const vector uint32_t vadd = (vector uint32_t) {
  1576. 0,
  1577. xInc * 1,
  1578. xInc * 2,
  1579. xInc * 3,
  1580. };
  1581. const vector uint16_t vadd16 = (vector uint16_t) { // Modulo math
  1582. 0,
  1583. xInc * 1,
  1584. xInc * 2,
  1585. xInc * 3,
  1586. xInc * 4,
  1587. xInc * 5,
  1588. xInc * 6,
  1589. xInc * 7,
  1590. };
  1591. const vector uint32_t vshift16 = vec_splats((uint32_t) 16);
  1592. const vector uint16_t vshift9 = vec_splat_u16(9);
  1593. for (i = 0; i < dstWidth; i += 16) {
  1594. vcoord16[0] = vec_splats((uint16_t) xpos);
  1595. vcoord16[1] = vec_splats((uint16_t) (xpos + xInc * 8));
  1596. vcoord16[0] = vec_add(vcoord16[0], vadd16);
  1597. vcoord16[1] = vec_add(vcoord16[1], vadd16);
  1598. vcoord16[0] = vec_sr(vcoord16[0], vshift9);
  1599. vcoord16[1] = vec_sr(vcoord16[1], vshift9);
  1600. valpha = vec_pack(vcoord16[0], vcoord16[1]);
  1601. valphaxor = vec_xor(valpha, vxor);
  1602. xx = xpos >> 16;
  1603. vcoord[0] = vec_splats(xpos & 0xffff);
  1604. vcoord[1] = vec_splats((xpos & 0xffff) + xInc * 4);
  1605. vcoord[2] = vec_splats((xpos & 0xffff) + xInc * 8);
  1606. vcoord[3] = vec_splats((xpos & 0xffff) + xInc * 12);
  1607. vcoord[0] = vec_add(vcoord[0], vadd);
  1608. vcoord[1] = vec_add(vcoord[1], vadd);
  1609. vcoord[2] = vec_add(vcoord[2], vadd);
  1610. vcoord[3] = vec_add(vcoord[3], vadd);
  1611. vcoord[0] = vec_sr(vcoord[0], vshift16);
  1612. vcoord[1] = vec_sr(vcoord[1], vshift16);
  1613. vcoord[2] = vec_sr(vcoord[2], vshift16);
  1614. vcoord[3] = vec_sr(vcoord[3], vshift16);
  1615. vcoord16[0] = vec_pack(vcoord[0], vcoord[1]);
  1616. vcoord16[1] = vec_pack(vcoord[2], vcoord[3]);
  1617. vperm = vec_pack(vcoord16[0], vcoord16[1]);
  1618. HCSCALE(src1, dst1);
  1619. HCSCALE(src2, dst2);
  1620. xpos += xInc * 16;
  1621. }
  1622. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
  1623. dst1[i] = src1[srcW-1]*128;
  1624. dst2[i] = src2[srcW-1]*128;
  1625. }
  1626. }
  1627. #undef HCSCALE
  1628. static void hScale8To19_vsx(SwsContext *c, int16_t *_dst, int dstW,
  1629. const uint8_t *src, const int16_t *filter,
  1630. const int32_t *filterPos, int filterSize)
  1631. {
  1632. int i, j;
  1633. int32_t *dst = (int32_t *) _dst;
  1634. vector int16_t vfilter, vin;
  1635. vector uint8_t vin8;
  1636. vector int32_t vout;
  1637. const vector uint8_t vzero = vec_splat_u8(0);
  1638. const vector uint8_t vunusedtab[8] = {
  1639. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1640. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
  1641. (vector uint8_t) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
  1642. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1643. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
  1644. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1645. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
  1646. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1647. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1648. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1649. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1650. 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1651. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1652. 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
  1653. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1654. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
  1655. };
  1656. const vector uint8_t vunused = vunusedtab[filterSize % 8];
  1657. if (filterSize == 1) {
  1658. for (i = 0; i < dstW; i++) {
  1659. int srcPos = filterPos[i];
  1660. int val = 0;
  1661. for (j = 0; j < filterSize; j++) {
  1662. val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
  1663. }
  1664. dst[i] = FFMIN(val >> 3, (1 << 19) - 1); // the cubic equation does overflow ...
  1665. }
  1666. } else {
  1667. for (i = 0; i < dstW; i++) {
  1668. const int srcPos = filterPos[i];
  1669. vout = vec_splat_s32(0);
  1670. for (j = 0; j < filterSize; j += 8) {
  1671. vin8 = vec_vsx_ld(0, &src[srcPos + j]);
  1672. vin = (vector int16_t) vec_mergeh(vin8, vzero);
  1673. if (j + 8 > filterSize) // Remove the unused elements on the last round
  1674. vin = vec_perm(vin, (vector int16_t) vzero, vunused);
  1675. vfilter = vec_vsx_ld(0, &filter[filterSize * i + j]);
  1676. vout = vec_msums(vin, vfilter, vout);
  1677. }
  1678. vout = vec_sums(vout, (vector int32_t) vzero);
  1679. dst[i] = FFMIN(vout[3] >> 3, (1 << 19) - 1);
  1680. }
  1681. }
  1682. }
  1683. static void hScale16To19_vsx(SwsContext *c, int16_t *_dst, int dstW,
  1684. const uint8_t *_src, const int16_t *filter,
  1685. const int32_t *filterPos, int filterSize)
  1686. {
  1687. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->srcFormat);
  1688. int i, j;
  1689. int32_t *dst = (int32_t *) _dst;
  1690. const uint16_t *src = (const uint16_t *) _src;
  1691. int bits = desc->comp[0].depth - 1;
  1692. int sh = bits - 4;
  1693. vector int16_t vfilter, vin;
  1694. vector int32_t vout, vtmp, vtmp2, vfilter32_l, vfilter32_r;
  1695. const vector uint8_t vzero = vec_splat_u8(0);
  1696. const vector uint8_t vunusedtab[8] = {
  1697. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1698. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
  1699. (vector uint8_t) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
  1700. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1701. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
  1702. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1703. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
  1704. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1705. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1706. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1707. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1708. 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1709. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1710. 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
  1711. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1712. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
  1713. };
  1714. const vector uint8_t vunused = vunusedtab[filterSize % 8];
  1715. if ((isAnyRGB(c->srcFormat) || c->srcFormat==AV_PIX_FMT_PAL8) && desc->comp[0].depth<16) {
  1716. sh = 9;
  1717. } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
  1718. sh = 16 - 1 - 4;
  1719. }
  1720. if (filterSize == 1) {
  1721. for (i = 0; i < dstW; i++) {
  1722. int srcPos = filterPos[i];
  1723. int val = 0;
  1724. for (j = 0; j < filterSize; j++) {
  1725. val += src[srcPos + j] * filter[filterSize * i + j];
  1726. }
  1727. // filter=14 bit, input=16 bit, output=30 bit, >> 11 makes 19 bit
  1728. dst[i] = FFMIN(val >> sh, (1 << 19) - 1);
  1729. }
  1730. } else {
  1731. for (i = 0; i < dstW; i++) {
  1732. const int srcPos = filterPos[i];
  1733. vout = vec_splat_s32(0);
  1734. for (j = 0; j < filterSize; j += 8) {
  1735. vin = (vector int16_t) vec_vsx_ld(0, &src[srcPos + j]);
  1736. if (j + 8 > filterSize) // Remove the unused elements on the last round
  1737. vin = vec_perm(vin, (vector int16_t) vzero, vunused);
  1738. vfilter = vec_vsx_ld(0, &filter[filterSize * i + j]);
  1739. vfilter32_l = vec_unpackh(vfilter);
  1740. vfilter32_r = vec_unpackl(vfilter);
  1741. vtmp = (vector int32_t) vec_mergeh(vin, (vector int16_t) vzero);
  1742. vtmp2 = (vector int32_t) vec_mergel(vin, (vector int16_t) vzero);
  1743. vtmp = vec_mul(vtmp, vfilter32_l);
  1744. vtmp2 = vec_mul(vtmp2, vfilter32_r);
  1745. vout = vec_adds(vout, vtmp);
  1746. vout = vec_adds(vout, vtmp2);
  1747. }
  1748. vout = vec_sums(vout, (vector int32_t) vzero);
  1749. dst[i] = FFMIN(vout[3] >> sh, (1 << 19) - 1);
  1750. }
  1751. }
  1752. }
  1753. static void hScale16To15_vsx(SwsContext *c, int16_t *dst, int dstW,
  1754. const uint8_t *_src, const int16_t *filter,
  1755. const int32_t *filterPos, int filterSize)
  1756. {
  1757. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->srcFormat);
  1758. int i, j;
  1759. const uint16_t *src = (const uint16_t *) _src;
  1760. int sh = desc->comp[0].depth - 1;
  1761. vector int16_t vfilter, vin;
  1762. vector int32_t vout, vtmp, vtmp2, vfilter32_l, vfilter32_r;
  1763. const vector uint8_t vzero = vec_splat_u8(0);
  1764. const vector uint8_t vunusedtab[8] = {
  1765. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1766. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
  1767. (vector uint8_t) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
  1768. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1769. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
  1770. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1771. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
  1772. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1773. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1774. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1775. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1776. 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1777. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1778. 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
  1779. (vector uint8_t) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1780. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
  1781. };
  1782. const vector uint8_t vunused = vunusedtab[filterSize % 8];
  1783. if (sh<15) {
  1784. sh = isAnyRGB(c->srcFormat) || c->srcFormat==AV_PIX_FMT_PAL8 ? 13 : (desc->comp[0].depth - 1);
  1785. } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
  1786. sh = 16 - 1;
  1787. }
  1788. if (filterSize == 1) {
  1789. for (i = 0; i < dstW; i++) {
  1790. int srcPos = filterPos[i];
  1791. int val = 0;
  1792. for (j = 0; j < filterSize; j++) {
  1793. val += src[srcPos + j] * filter[filterSize * i + j];
  1794. }
  1795. // filter=14 bit, input=16 bit, output=30 bit, >> 15 makes 15 bit
  1796. dst[i] = FFMIN(val >> sh, (1 << 15) - 1);
  1797. }
  1798. } else {
  1799. for (i = 0; i < dstW; i++) {
  1800. const int srcPos = filterPos[i];
  1801. vout = vec_splat_s32(0);
  1802. for (j = 0; j < filterSize; j += 8) {
  1803. vin = (vector int16_t) vec_vsx_ld(0, &src[srcPos + j]);
  1804. if (j + 8 > filterSize) // Remove the unused elements on the last round
  1805. vin = vec_perm(vin, (vector int16_t) vzero, vunused);
  1806. vfilter = vec_vsx_ld(0, &filter[filterSize * i + j]);
  1807. vfilter32_l = vec_unpackh(vfilter);
  1808. vfilter32_r = vec_unpackl(vfilter);
  1809. vtmp = (vector int32_t) vec_mergeh(vin, (vector int16_t) vzero);
  1810. vtmp2 = (vector int32_t) vec_mergel(vin, (vector int16_t) vzero);
  1811. vtmp = vec_mul(vtmp, vfilter32_l);
  1812. vtmp2 = vec_mul(vtmp2, vfilter32_r);
  1813. vout = vec_adds(vout, vtmp);
  1814. vout = vec_adds(vout, vtmp2);
  1815. }
  1816. vout = vec_sums(vout, (vector int32_t) vzero);
  1817. dst[i] = FFMIN(vout[3] >> sh, (1 << 15) - 1);
  1818. }
  1819. }
  1820. }
  1821. #endif /* !HAVE_BIGENDIAN */
  1822. #endif /* HAVE_VSX */
  1823. av_cold void ff_sws_init_swscale_vsx(SwsContext *c)
  1824. {
  1825. #if HAVE_VSX
  1826. enum AVPixelFormat dstFormat = c->dstFormat;
  1827. const int cpu_flags = av_get_cpu_flags();
  1828. const unsigned char power8 = HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8;
  1829. if (!(cpu_flags & AV_CPU_FLAG_VSX))
  1830. return;
  1831. #if !HAVE_BIGENDIAN
  1832. if (c->srcBpc == 8) {
  1833. if (c->dstBpc <= 14) {
  1834. c->hyScale = c->hcScale = hScale_real_vsx;
  1835. if (c->flags & SWS_FAST_BILINEAR && c->dstW >= c->srcW && c->chrDstW >= c->chrSrcW) {
  1836. c->hyscale_fast = hyscale_fast_vsx;
  1837. c->hcscale_fast = hcscale_fast_vsx;
  1838. }
  1839. } else {
  1840. c->hyScale = c->hcScale = hScale8To19_vsx;
  1841. }
  1842. } else {
  1843. if (power8) {
  1844. c->hyScale = c->hcScale = c->dstBpc > 14 ? hScale16To19_vsx
  1845. : hScale16To15_vsx;
  1846. }
  1847. }
  1848. if (!is16BPS(dstFormat) && !isNBPS(dstFormat) && !isSemiPlanarYUV(dstFormat) &&
  1849. dstFormat != AV_PIX_FMT_GRAYF32BE && dstFormat != AV_PIX_FMT_GRAYF32LE &&
  1850. !c->needAlpha) {
  1851. c->yuv2planeX = yuv2planeX_vsx;
  1852. }
  1853. #endif
  1854. if (!(c->flags & (SWS_BITEXACT | SWS_FULL_CHR_H_INT)) && !c->needAlpha) {
  1855. switch (c->dstBpc) {
  1856. case 8:
  1857. c->yuv2plane1 = yuv2plane1_8_vsx;
  1858. break;
  1859. #if !HAVE_BIGENDIAN
  1860. case 9:
  1861. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_vsx : yuv2plane1_9LE_vsx;
  1862. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_vsx : yuv2planeX_9LE_vsx;
  1863. break;
  1864. case 10:
  1865. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_vsx : yuv2plane1_10LE_vsx;
  1866. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_vsx : yuv2planeX_10LE_vsx;
  1867. break;
  1868. case 12:
  1869. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_vsx : yuv2plane1_12LE_vsx;
  1870. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_vsx : yuv2planeX_12LE_vsx;
  1871. break;
  1872. case 14:
  1873. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_vsx : yuv2plane1_14LE_vsx;
  1874. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_vsx : yuv2planeX_14LE_vsx;
  1875. break;
  1876. case 16:
  1877. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_vsx : yuv2plane1_16LE_vsx;
  1878. #if HAVE_POWER8
  1879. if (cpu_flags & AV_CPU_FLAG_POWER8) {
  1880. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_vsx : yuv2planeX_16LE_vsx;
  1881. }
  1882. #endif /* HAVE_POWER8 */
  1883. break;
  1884. #endif /* !HAVE_BIGENDIAN */
  1885. }
  1886. }
  1887. if (c->flags & SWS_BITEXACT)
  1888. return;
  1889. #if !HAVE_BIGENDIAN
  1890. if (c->flags & SWS_FULL_CHR_H_INT) {
  1891. switch (dstFormat) {
  1892. case AV_PIX_FMT_RGB24:
  1893. if (power8) {
  1894. c->yuv2packed1 = yuv2rgb24_full_1_vsx;
  1895. c->yuv2packed2 = yuv2rgb24_full_2_vsx;
  1896. c->yuv2packedX = yuv2rgb24_full_X_vsx;
  1897. }
  1898. break;
  1899. case AV_PIX_FMT_BGR24:
  1900. if (power8) {
  1901. c->yuv2packed1 = yuv2bgr24_full_1_vsx;
  1902. c->yuv2packed2 = yuv2bgr24_full_2_vsx;
  1903. c->yuv2packedX = yuv2bgr24_full_X_vsx;
  1904. }
  1905. break;
  1906. case AV_PIX_FMT_BGRA:
  1907. if (power8) {
  1908. if (!c->needAlpha) {
  1909. c->yuv2packed1 = yuv2bgrx32_full_1_vsx;
  1910. c->yuv2packed2 = yuv2bgrx32_full_2_vsx;
  1911. c->yuv2packedX = yuv2bgrx32_full_X_vsx;
  1912. }
  1913. }
  1914. break;
  1915. case AV_PIX_FMT_RGBA:
  1916. if (power8) {
  1917. if (!c->needAlpha) {
  1918. c->yuv2packed1 = yuv2rgbx32_full_1_vsx;
  1919. c->yuv2packed2 = yuv2rgbx32_full_2_vsx;
  1920. c->yuv2packedX = yuv2rgbx32_full_X_vsx;
  1921. }
  1922. }
  1923. break;
  1924. case AV_PIX_FMT_ARGB:
  1925. if (power8) {
  1926. if (!c->needAlpha) {
  1927. c->yuv2packed1 = yuv2xrgb32_full_1_vsx;
  1928. c->yuv2packed2 = yuv2xrgb32_full_2_vsx;
  1929. c->yuv2packedX = yuv2xrgb32_full_X_vsx;
  1930. }
  1931. }
  1932. break;
  1933. case AV_PIX_FMT_ABGR:
  1934. if (power8) {
  1935. if (!c->needAlpha) {
  1936. c->yuv2packed1 = yuv2xbgr32_full_1_vsx;
  1937. c->yuv2packed2 = yuv2xbgr32_full_2_vsx;
  1938. c->yuv2packedX = yuv2xbgr32_full_X_vsx;
  1939. }
  1940. }
  1941. break;
  1942. }
  1943. } else { /* !SWS_FULL_CHR_H_INT */
  1944. switch (dstFormat) {
  1945. case AV_PIX_FMT_YUYV422:
  1946. c->yuv2packed1 = yuv2yuyv422_1_vsx;
  1947. c->yuv2packed2 = yuv2yuyv422_2_vsx;
  1948. c->yuv2packedX = yuv2yuyv422_X_vsx;
  1949. break;
  1950. case AV_PIX_FMT_YVYU422:
  1951. c->yuv2packed1 = yuv2yvyu422_1_vsx;
  1952. c->yuv2packed2 = yuv2yvyu422_2_vsx;
  1953. c->yuv2packedX = yuv2yvyu422_X_vsx;
  1954. break;
  1955. case AV_PIX_FMT_UYVY422:
  1956. c->yuv2packed1 = yuv2uyvy422_1_vsx;
  1957. c->yuv2packed2 = yuv2uyvy422_2_vsx;
  1958. c->yuv2packedX = yuv2uyvy422_X_vsx;
  1959. break;
  1960. case AV_PIX_FMT_BGRA:
  1961. if (power8) {
  1962. if (!c->needAlpha) {
  1963. c->yuv2packed1 = yuv2bgrx32_1_vsx;
  1964. c->yuv2packed2 = yuv2bgrx32_2_vsx;
  1965. }
  1966. }
  1967. break;
  1968. case AV_PIX_FMT_RGBA:
  1969. if (power8) {
  1970. if (!c->needAlpha) {
  1971. c->yuv2packed1 = yuv2rgbx32_1_vsx;
  1972. c->yuv2packed2 = yuv2rgbx32_2_vsx;
  1973. }
  1974. }
  1975. break;
  1976. case AV_PIX_FMT_ARGB:
  1977. if (power8) {
  1978. if (!c->needAlpha) {
  1979. c->yuv2packed1 = yuv2xrgb32_1_vsx;
  1980. c->yuv2packed2 = yuv2xrgb32_2_vsx;
  1981. }
  1982. }
  1983. break;
  1984. case AV_PIX_FMT_ABGR:
  1985. if (power8) {
  1986. if (!c->needAlpha) {
  1987. c->yuv2packed1 = yuv2xbgr32_1_vsx;
  1988. c->yuv2packed2 = yuv2xbgr32_2_vsx;
  1989. }
  1990. }
  1991. break;
  1992. case AV_PIX_FMT_RGB24:
  1993. if (power8) {
  1994. c->yuv2packed1 = yuv2rgb24_1_vsx;
  1995. c->yuv2packed2 = yuv2rgb24_2_vsx;
  1996. }
  1997. break;
  1998. case AV_PIX_FMT_BGR24:
  1999. if (power8) {
  2000. c->yuv2packed1 = yuv2bgr24_1_vsx;
  2001. c->yuv2packed2 = yuv2bgr24_2_vsx;
  2002. }
  2003. break;
  2004. }
  2005. }
  2006. #endif /* !HAVE_BIGENDIAN */
  2007. #endif /* HAVE_VSX */
  2008. }