2
0

inv_txfm_vsx.c 95 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828
  1. /*
  2. * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <math.h>
  11. #include <stdlib.h>
  12. #include <string.h>
  13. #include "vpx_dsp/ppc/bitdepth_conversion_vsx.h"
  14. #include "vpx_dsp/ppc/types_vsx.h"
  15. #include "vpx_dsp/ppc/inv_txfm_vsx.h"
  16. #include "./vpx_dsp_rtcd.h"
  17. #include "vpx_dsp/inv_txfm.h"
  18. static const int16x8_t cospi1_v = { 16364, 16364, 16364, 16364,
  19. 16364, 16364, 16364, 16364 };
  20. static const int16x8_t cospi1m_v = { -16364, -16364, -16364, -16364,
  21. -16364, -16364, -16364, -16364 };
  22. static const int16x8_t cospi2_v = { 16305, 16305, 16305, 16305,
  23. 16305, 16305, 16305, 16305 };
  24. static const int16x8_t cospi2m_v = { -16305, -16305, -16305, -16305,
  25. -16305, -16305, -16305, -16305 };
  26. static const int16x8_t cospi3_v = { 16207, 16207, 16207, 16207,
  27. 16207, 16207, 16207, 16207 };
  28. static const int16x8_t cospi4_v = { 16069, 16069, 16069, 16069,
  29. 16069, 16069, 16069, 16069 };
  30. static const int16x8_t cospi4m_v = { -16069, -16069, -16069, -16069,
  31. -16069, -16069, -16069, -16069 };
  32. static const int16x8_t cospi5_v = { 15893, 15893, 15893, 15893,
  33. 15893, 15893, 15893, 15893 };
  34. static const int16x8_t cospi5m_v = { -15893, -15893, -15893, -15893,
  35. -15893, -15893, -15893, -15893 };
  36. static const int16x8_t cospi6_v = { 15679, 15679, 15679, 15679,
  37. 15679, 15679, 15679, 15679 };
  38. static const int16x8_t cospi7_v = { 15426, 15426, 15426, 15426,
  39. 15426, 15426, 15426, 15426 };
  40. static const int16x8_t cospi8_v = { 15137, 15137, 15137, 15137,
  41. 15137, 15137, 15137, 15137 };
  42. static const int16x8_t cospi8m_v = { -15137, -15137, -15137, -15137,
  43. -15137, -15137, -15137, -15137 };
  44. static const int16x8_t cospi9_v = { 14811, 14811, 14811, 14811,
  45. 14811, 14811, 14811, 14811 };
  46. static const int16x8_t cospi9m_v = { -14811, -14811, -14811, -14811,
  47. -14811, -14811, -14811, -14811 };
  48. static const int16x8_t cospi10_v = { 14449, 14449, 14449, 14449,
  49. 14449, 14449, 14449, 14449 };
  50. static const int16x8_t cospi10m_v = { -14449, -14449, -14449, -14449,
  51. -14449, -14449, -14449, -14449 };
  52. static const int16x8_t cospi11_v = { 14053, 14053, 14053, 14053,
  53. 14053, 14053, 14053, 14053 };
  54. static const int16x8_t cospi12_v = { 13623, 13623, 13623, 13623,
  55. 13623, 13623, 13623, 13623 };
  56. static const int16x8_t cospi12m_v = { -13623, -13623, -13623, -13623,
  57. -13623, -13623, -13623, -13623 };
  58. static const int16x8_t cospi13_v = { 13160, 13160, 13160, 13160,
  59. 13160, 13160, 13160, 13160 };
  60. static const int16x8_t cospi13m_v = { -13160, -13160, -13160, -13160,
  61. -13160, -13160, -13160, -13160 };
  62. static const int16x8_t cospi14_v = { 12665, 12665, 12665, 12665,
  63. 12665, 12665, 12665, 12665 };
  64. static const int16x8_t cospi15_v = { 12140, 12140, 12140, 12140,
  65. 12140, 12140, 12140, 12140 };
  66. static const int16x8_t cospi16_v = { 11585, 11585, 11585, 11585,
  67. 11585, 11585, 11585, 11585 };
  68. static const int16x8_t cospi16m_v = { -11585, -11585, -11585, -11585,
  69. -11585, -11585, -11585, -11585 };
  70. static const int16x8_t cospi17_v = { 11003, 11003, 11003, 11003,
  71. 11003, 11003, 11003, 11003 };
  72. static const int16x8_t cospi17m_v = { -11003, -11003, -11003, -11003,
  73. -11003, -11003, -11003, -11003 };
  74. static const int16x8_t cospi18_v = { 10394, 10394, 10394, 10394,
  75. 10394, 10394, 10394, 10394 };
  76. static const int16x8_t cospi18m_v = { -10394, -10394, -10394, -10394,
  77. -10394, -10394, -10394, -10394 };
  78. static const int16x8_t cospi19_v = { 9760, 9760, 9760, 9760,
  79. 9760, 9760, 9760, 9760 };
  80. static const int16x8_t cospi20_v = { 9102, 9102, 9102, 9102,
  81. 9102, 9102, 9102, 9102 };
  82. static const int16x8_t cospi20m_v = { -9102, -9102, -9102, -9102,
  83. -9102, -9102, -9102, -9102 };
  84. static const int16x8_t cospi21_v = { 8423, 8423, 8423, 8423,
  85. 8423, 8423, 8423, 8423 };
  86. static const int16x8_t cospi21m_v = { -8423, -8423, -8423, -8423,
  87. -8423, -8423, -8423, -8423 };
  88. static const int16x8_t cospi22_v = { 7723, 7723, 7723, 7723,
  89. 7723, 7723, 7723, 7723 };
  90. static const int16x8_t cospi23_v = { 7005, 7005, 7005, 7005,
  91. 7005, 7005, 7005, 7005 };
  92. static const int16x8_t cospi24_v = { 6270, 6270, 6270, 6270,
  93. 6270, 6270, 6270, 6270 };
  94. static const int16x8_t cospi24m_v = { -6270, -6270, -6270, -6270,
  95. -6270, -6270, -6270, -6270 };
  96. static const int16x8_t cospi25_v = { 5520, 5520, 5520, 5520,
  97. 5520, 5520, 5520, 5520 };
  98. static const int16x8_t cospi25m_v = { -5520, -5520, -5520, -5520,
  99. -5520, -5520, -5520, -5520 };
  100. static const int16x8_t cospi26_v = { 4756, 4756, 4756, 4756,
  101. 4756, 4756, 4756, 4756 };
  102. static const int16x8_t cospi26m_v = { -4756, -4756, -4756, -4756,
  103. -4756, -4756, -4756, -4756 };
  104. static const int16x8_t cospi27_v = { 3981, 3981, 3981, 3981,
  105. 3981, 3981, 3981, 3981 };
  106. static const int16x8_t cospi28_v = { 3196, 3196, 3196, 3196,
  107. 3196, 3196, 3196, 3196 };
  108. static const int16x8_t cospi28m_v = { -3196, -3196, -3196, -3196,
  109. -3196, -3196, -3196, -3196 };
  110. static const int16x8_t cospi29_v = { 2404, 2404, 2404, 2404,
  111. 2404, 2404, 2404, 2404 };
  112. static const int16x8_t cospi29m_v = { -2404, -2404, -2404, -2404,
  113. -2404, -2404, -2404, -2404 };
  114. static const int16x8_t cospi30_v = { 1606, 1606, 1606, 1606,
  115. 1606, 1606, 1606, 1606 };
  116. static const int16x8_t cospi31_v = { 804, 804, 804, 804, 804, 804, 804, 804 };
  117. static const int16x8_t sinpi_1_9_v = { 5283, 5283, 5283, 5283,
  118. 5283, 5283, 5283, 5283 };
  119. static const int16x8_t sinpi_2_9_v = { 9929, 9929, 9929, 9929,
  120. 9929, 9929, 9929, 9929 };
  121. static const int16x8_t sinpi_3_9_v = { 13377, 13377, 13377, 13377,
  122. 13377, 13377, 13377, 13377 };
  123. static const int16x8_t sinpi_4_9_v = { 15212, 15212, 15212, 15212,
  124. 15212, 15212, 15212, 15212 };
  125. static uint8x16_t tr8_mask0 = {
  126. 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  127. 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
  128. };
  129. static uint8x16_t tr8_mask1 = {
  130. 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF,
  131. 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F
  132. };
  133. #define ROUND_SHIFT_INIT \
  134. const int32x4_t shift = vec_sl(vec_splat_s32(1), vec_splat_u32(13)); \
  135. const uint32x4_t shift14 = vec_splat_u32(14);
  136. #define DCT_CONST_ROUND_SHIFT(vec) vec = vec_sra(vec_add(vec, shift), shift14);
  137. #define PIXEL_ADD_INIT \
  138. int16x8_t add8 = vec_splat_s16(8); \
  139. uint16x8_t shift4 = vec_splat_u16(4);
  140. #define PIXEL_ADD4(out, in) out = vec_sra(vec_add(in, add8), shift4);
  141. #define IDCT4(in0, in1, out0, out1) \
  142. t0 = vec_add(in0, in1); \
  143. t1 = vec_sub(in0, in1); \
  144. tmp16_0 = vec_mergeh(t0, t1); \
  145. temp1 = vec_sra(vec_add(vec_mule(tmp16_0, cospi16_v), shift), shift14); \
  146. temp2 = vec_sra(vec_add(vec_mulo(tmp16_0, cospi16_v), shift), shift14); \
  147. \
  148. tmp16_0 = vec_mergel(in0, in1); \
  149. temp3 = vec_sub(vec_mule(tmp16_0, cospi24_v), vec_mulo(tmp16_0, cospi8_v)); \
  150. DCT_CONST_ROUND_SHIFT(temp3); \
  151. temp4 = vec_add(vec_mule(tmp16_0, cospi8_v), vec_mulo(tmp16_0, cospi24_v)); \
  152. DCT_CONST_ROUND_SHIFT(temp4); \
  153. \
  154. step0 = vec_packs(temp1, temp2); \
  155. step1 = vec_packs(temp4, temp3); \
  156. out0 = vec_add(step0, step1); \
  157. out1 = vec_sub(step0, step1); \
  158. out1 = vec_perm(out1, out1, mask0);
  159. #define PACK_STORE(v0, v1) \
  160. tmp16_0 = vec_add(vec_perm(d_u0, d_u1, tr8_mask0), v0); \
  161. tmp16_1 = vec_add(vec_perm(d_u2, d_u3, tr8_mask0), v1); \
  162. output_v = vec_packsu(tmp16_0, tmp16_1); \
  163. \
  164. vec_vsx_st(output_v, 0, tmp_dest); \
  165. for (i = 0; i < 4; i++) \
  166. for (j = 0; j < 4; j++) dest[j * stride + i] = tmp_dest[j * 4 + i];
  167. void vpx_round_store4x4_vsx(int16x8_t *in, int16x8_t *out, uint8_t *dest,
  168. int stride) {
  169. int i, j;
  170. uint8x16_t dest0 = vec_vsx_ld(0, dest);
  171. uint8x16_t dest1 = vec_vsx_ld(stride, dest);
  172. uint8x16_t dest2 = vec_vsx_ld(2 * stride, dest);
  173. uint8x16_t dest3 = vec_vsx_ld(3 * stride, dest);
  174. uint8x16_t zerov = vec_splat_u8(0);
  175. int16x8_t d_u0 = (int16x8_t)vec_mergeh(dest0, zerov);
  176. int16x8_t d_u1 = (int16x8_t)vec_mergeh(dest1, zerov);
  177. int16x8_t d_u2 = (int16x8_t)vec_mergeh(dest2, zerov);
  178. int16x8_t d_u3 = (int16x8_t)vec_mergeh(dest3, zerov);
  179. int16x8_t tmp16_0, tmp16_1;
  180. uint8x16_t output_v;
  181. uint8_t tmp_dest[16];
  182. PIXEL_ADD_INIT;
  183. PIXEL_ADD4(out[0], in[0]);
  184. PIXEL_ADD4(out[1], in[1]);
  185. PACK_STORE(out[0], out[1]);
  186. }
  187. void vpx_idct4_vsx(int16x8_t *in, int16x8_t *out) {
  188. int32x4_t temp1, temp2, temp3, temp4;
  189. int16x8_t step0, step1, tmp16_0;
  190. uint8x16_t mask0 = { 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF,
  191. 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 };
  192. int16x8_t t0 = vec_mergeh(in[0], in[1]);
  193. int16x8_t t1 = vec_mergel(in[0], in[1]);
  194. ROUND_SHIFT_INIT
  195. in[0] = vec_mergeh(t0, t1);
  196. in[1] = vec_mergel(t0, t1);
  197. IDCT4(in[0], in[1], out[0], out[1]);
  198. }
  199. void vpx_idct4x4_16_add_vsx(const tran_low_t *input, uint8_t *dest,
  200. int stride) {
  201. int16x8_t in[2], out[2];
  202. in[0] = load_tran_low(0, input);
  203. in[1] = load_tran_low(8 * sizeof(*input), input);
  204. // Rows
  205. vpx_idct4_vsx(in, out);
  206. // Columns
  207. vpx_idct4_vsx(out, in);
  208. vpx_round_store4x4_vsx(in, out, dest, stride);
  209. }
  210. #define TRANSPOSE8x8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
  211. out3, out4, out5, out6, out7) \
  212. out0 = vec_mergeh(in0, in1); \
  213. out1 = vec_mergel(in0, in1); \
  214. out2 = vec_mergeh(in2, in3); \
  215. out3 = vec_mergel(in2, in3); \
  216. out4 = vec_mergeh(in4, in5); \
  217. out5 = vec_mergel(in4, in5); \
  218. out6 = vec_mergeh(in6, in7); \
  219. out7 = vec_mergel(in6, in7); \
  220. in0 = (int16x8_t)vec_mergeh((int32x4_t)out0, (int32x4_t)out2); \
  221. in1 = (int16x8_t)vec_mergel((int32x4_t)out0, (int32x4_t)out2); \
  222. in2 = (int16x8_t)vec_mergeh((int32x4_t)out1, (int32x4_t)out3); \
  223. in3 = (int16x8_t)vec_mergel((int32x4_t)out1, (int32x4_t)out3); \
  224. in4 = (int16x8_t)vec_mergeh((int32x4_t)out4, (int32x4_t)out6); \
  225. in5 = (int16x8_t)vec_mergel((int32x4_t)out4, (int32x4_t)out6); \
  226. in6 = (int16x8_t)vec_mergeh((int32x4_t)out5, (int32x4_t)out7); \
  227. in7 = (int16x8_t)vec_mergel((int32x4_t)out5, (int32x4_t)out7); \
  228. out0 = vec_perm(in0, in4, tr8_mask0); \
  229. out1 = vec_perm(in0, in4, tr8_mask1); \
  230. out2 = vec_perm(in1, in5, tr8_mask0); \
  231. out3 = vec_perm(in1, in5, tr8_mask1); \
  232. out4 = vec_perm(in2, in6, tr8_mask0); \
  233. out5 = vec_perm(in2, in6, tr8_mask1); \
  234. out6 = vec_perm(in3, in7, tr8_mask0); \
  235. out7 = vec_perm(in3, in7, tr8_mask1);
  236. /* for the: temp1 = step[x] * cospi_q - step[y] * cospi_z
  237. * temp2 = step[x] * cospi_z + step[y] * cospi_q */
  238. #define STEP8_0(inpt0, inpt1, outpt0, outpt1, cospi0, cospi1) \
  239. tmp16_0 = vec_mergeh(inpt0, inpt1); \
  240. tmp16_1 = vec_mergel(inpt0, inpt1); \
  241. temp10 = vec_sub(vec_mule(tmp16_0, cospi0), vec_mulo(tmp16_0, cospi1)); \
  242. temp11 = vec_sub(vec_mule(tmp16_1, cospi0), vec_mulo(tmp16_1, cospi1)); \
  243. DCT_CONST_ROUND_SHIFT(temp10); \
  244. DCT_CONST_ROUND_SHIFT(temp11); \
  245. outpt0 = vec_packs(temp10, temp11); \
  246. temp10 = vec_add(vec_mule(tmp16_0, cospi1), vec_mulo(tmp16_0, cospi0)); \
  247. temp11 = vec_add(vec_mule(tmp16_1, cospi1), vec_mulo(tmp16_1, cospi0)); \
  248. DCT_CONST_ROUND_SHIFT(temp10); \
  249. DCT_CONST_ROUND_SHIFT(temp11); \
  250. outpt1 = vec_packs(temp10, temp11);
  251. #define STEP8_1(inpt0, inpt1, outpt0, outpt1, cospi) \
  252. tmp16_2 = vec_sub(inpt0, inpt1); \
  253. tmp16_3 = vec_add(inpt0, inpt1); \
  254. tmp16_0 = vec_mergeh(tmp16_2, tmp16_3); \
  255. tmp16_1 = vec_mergel(tmp16_2, tmp16_3); \
  256. temp10 = vec_mule(tmp16_0, cospi); \
  257. temp11 = vec_mule(tmp16_1, cospi); \
  258. DCT_CONST_ROUND_SHIFT(temp10); \
  259. DCT_CONST_ROUND_SHIFT(temp11); \
  260. outpt0 = vec_packs(temp10, temp11); \
  261. temp10 = vec_mulo(tmp16_0, cospi); \
  262. temp11 = vec_mulo(tmp16_1, cospi); \
  263. DCT_CONST_ROUND_SHIFT(temp10); \
  264. DCT_CONST_ROUND_SHIFT(temp11); \
  265. outpt1 = vec_packs(temp10, temp11);
  266. #define IDCT8(in0, in1, in2, in3, in4, in5, in6, in7) \
  267. /* stage 1 */ \
  268. step0 = in0; \
  269. step2 = in4; \
  270. step1 = in2; \
  271. step3 = in6; \
  272. \
  273. STEP8_0(in1, in7, step4, step7, cospi28_v, cospi4_v); \
  274. STEP8_0(in5, in3, step5, step6, cospi12_v, cospi20_v); \
  275. \
  276. /* stage 2 */ \
  277. STEP8_1(step0, step2, in1, in0, cospi16_v); \
  278. STEP8_0(step1, step3, in2, in3, cospi24_v, cospi8_v); \
  279. in4 = vec_add(step4, step5); \
  280. in5 = vec_sub(step4, step5); \
  281. in6 = vec_sub(step7, step6); \
  282. in7 = vec_add(step6, step7); \
  283. \
  284. /* stage 3 */ \
  285. step0 = vec_add(in0, in3); \
  286. step1 = vec_add(in1, in2); \
  287. step2 = vec_sub(in1, in2); \
  288. step3 = vec_sub(in0, in3); \
  289. step4 = in4; \
  290. STEP8_1(in6, in5, step5, step6, cospi16_v); \
  291. step7 = in7; \
  292. \
  293. /* stage 4 */ \
  294. in0 = vec_add(step0, step7); \
  295. in1 = vec_add(step1, step6); \
  296. in2 = vec_add(step2, step5); \
  297. in3 = vec_add(step3, step4); \
  298. in4 = vec_sub(step3, step4); \
  299. in5 = vec_sub(step2, step5); \
  300. in6 = vec_sub(step1, step6); \
  301. in7 = vec_sub(step0, step7);
  302. #define PIXEL_ADD(in, out, add, shiftx) \
  303. out = vec_add(vec_sra(vec_add(in, add), shiftx), out);
  304. void vpx_idct8_vsx(int16x8_t *in, int16x8_t *out) {
  305. int16x8_t step0, step1, step2, step3, step4, step5, step6, step7;
  306. int16x8_t tmp16_0, tmp16_1, tmp16_2, tmp16_3;
  307. int32x4_t temp10, temp11;
  308. ROUND_SHIFT_INIT;
  309. TRANSPOSE8x8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], out[0],
  310. out[1], out[2], out[3], out[4], out[5], out[6], out[7]);
  311. IDCT8(out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7]);
  312. }
  313. void vpx_round_store8x8_vsx(int16x8_t *in, uint8_t *dest, int stride) {
  314. uint8x16_t zerov = vec_splat_u8(0);
  315. uint8x16_t dest0 = vec_vsx_ld(0, dest);
  316. uint8x16_t dest1 = vec_vsx_ld(stride, dest);
  317. uint8x16_t dest2 = vec_vsx_ld(2 * stride, dest);
  318. uint8x16_t dest3 = vec_vsx_ld(3 * stride, dest);
  319. uint8x16_t dest4 = vec_vsx_ld(4 * stride, dest);
  320. uint8x16_t dest5 = vec_vsx_ld(5 * stride, dest);
  321. uint8x16_t dest6 = vec_vsx_ld(6 * stride, dest);
  322. uint8x16_t dest7 = vec_vsx_ld(7 * stride, dest);
  323. int16x8_t d_u0 = (int16x8_t)vec_mergeh(dest0, zerov);
  324. int16x8_t d_u1 = (int16x8_t)vec_mergeh(dest1, zerov);
  325. int16x8_t d_u2 = (int16x8_t)vec_mergeh(dest2, zerov);
  326. int16x8_t d_u3 = (int16x8_t)vec_mergeh(dest3, zerov);
  327. int16x8_t d_u4 = (int16x8_t)vec_mergeh(dest4, zerov);
  328. int16x8_t d_u5 = (int16x8_t)vec_mergeh(dest5, zerov);
  329. int16x8_t d_u6 = (int16x8_t)vec_mergeh(dest6, zerov);
  330. int16x8_t d_u7 = (int16x8_t)vec_mergeh(dest7, zerov);
  331. int16x8_t add = vec_sl(vec_splat_s16(8), vec_splat_u16(1));
  332. uint16x8_t shift5 = vec_splat_u16(5);
  333. uint8x16_t output0, output1, output2, output3;
  334. PIXEL_ADD(in[0], d_u0, add, shift5);
  335. PIXEL_ADD(in[1], d_u1, add, shift5);
  336. PIXEL_ADD(in[2], d_u2, add, shift5);
  337. PIXEL_ADD(in[3], d_u3, add, shift5);
  338. PIXEL_ADD(in[4], d_u4, add, shift5);
  339. PIXEL_ADD(in[5], d_u5, add, shift5);
  340. PIXEL_ADD(in[6], d_u6, add, shift5);
  341. PIXEL_ADD(in[7], d_u7, add, shift5);
  342. output0 = vec_packsu(d_u0, d_u1);
  343. output1 = vec_packsu(d_u2, d_u3);
  344. output2 = vec_packsu(d_u4, d_u5);
  345. output3 = vec_packsu(d_u6, d_u7);
  346. vec_vsx_st(xxpermdi(output0, dest0, 1), 0, dest);
  347. vec_vsx_st(xxpermdi(output0, dest1, 3), stride, dest);
  348. vec_vsx_st(xxpermdi(output1, dest2, 1), 2 * stride, dest);
  349. vec_vsx_st(xxpermdi(output1, dest3, 3), 3 * stride, dest);
  350. vec_vsx_st(xxpermdi(output2, dest4, 1), 4 * stride, dest);
  351. vec_vsx_st(xxpermdi(output2, dest5, 3), 5 * stride, dest);
  352. vec_vsx_st(xxpermdi(output3, dest6, 1), 6 * stride, dest);
  353. vec_vsx_st(xxpermdi(output3, dest7, 3), 7 * stride, dest);
  354. }
  355. void vpx_idct8x8_64_add_vsx(const tran_low_t *input, uint8_t *dest,
  356. int stride) {
  357. int16x8_t src[8], tmp[8];
  358. src[0] = load_tran_low(0, input);
  359. src[1] = load_tran_low(8 * sizeof(*input), input);
  360. src[2] = load_tran_low(16 * sizeof(*input), input);
  361. src[3] = load_tran_low(24 * sizeof(*input), input);
  362. src[4] = load_tran_low(32 * sizeof(*input), input);
  363. src[5] = load_tran_low(40 * sizeof(*input), input);
  364. src[6] = load_tran_low(48 * sizeof(*input), input);
  365. src[7] = load_tran_low(56 * sizeof(*input), input);
  366. vpx_idct8_vsx(src, tmp);
  367. vpx_idct8_vsx(tmp, src);
  368. vpx_round_store8x8_vsx(src, dest, stride);
  369. }
  370. #define STEP16_1(inpt0, inpt1, outpt0, outpt1, cospi) \
  371. tmp16_0 = vec_mergeh(inpt0, inpt1); \
  372. tmp16_1 = vec_mergel(inpt0, inpt1); \
  373. temp10 = vec_mule(tmp16_0, cospi); \
  374. temp11 = vec_mule(tmp16_1, cospi); \
  375. temp20 = vec_mulo(tmp16_0, cospi); \
  376. temp21 = vec_mulo(tmp16_1, cospi); \
  377. temp30 = vec_sub(temp10, temp20); \
  378. temp10 = vec_add(temp10, temp20); \
  379. temp20 = vec_sub(temp11, temp21); \
  380. temp21 = vec_add(temp11, temp21); \
  381. DCT_CONST_ROUND_SHIFT(temp30); \
  382. DCT_CONST_ROUND_SHIFT(temp20); \
  383. outpt0 = vec_packs(temp30, temp20); \
  384. DCT_CONST_ROUND_SHIFT(temp10); \
  385. DCT_CONST_ROUND_SHIFT(temp21); \
  386. outpt1 = vec_packs(temp10, temp21);
  387. #define IDCT16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, inA, inB, \
  388. inC, inD, inE, inF, out0, out1, out2, out3, out4, out5, out6, \
  389. out7, out8, out9, outA, outB, outC, outD, outE, outF) \
  390. /* stage 1 */ \
  391. /* out0 = in0; */ \
  392. out1 = in8; \
  393. out2 = in4; \
  394. out3 = inC; \
  395. out4 = in2; \
  396. out5 = inA; \
  397. out6 = in6; \
  398. out7 = inE; \
  399. out8 = in1; \
  400. out9 = in9; \
  401. outA = in5; \
  402. outB = inD; \
  403. outC = in3; \
  404. outD = inB; \
  405. outE = in7; \
  406. outF = inF; \
  407. \
  408. /* stage 2 */ \
  409. /* in0 = out0; */ \
  410. in1 = out1; \
  411. in2 = out2; \
  412. in3 = out3; \
  413. in4 = out4; \
  414. in5 = out5; \
  415. in6 = out6; \
  416. in7 = out7; \
  417. \
  418. STEP8_0(out8, outF, in8, inF, cospi30_v, cospi2_v); \
  419. STEP8_0(out9, outE, in9, inE, cospi14_v, cospi18_v); \
  420. STEP8_0(outA, outD, inA, inD, cospi22_v, cospi10_v); \
  421. STEP8_0(outB, outC, inB, inC, cospi6_v, cospi26_v); \
  422. \
  423. /* stage 3 */ \
  424. out0 = in0; \
  425. out1 = in1; \
  426. out2 = in2; \
  427. out3 = in3; \
  428. \
  429. STEP8_0(in4, in7, out4, out7, cospi28_v, cospi4_v); \
  430. STEP8_0(in5, in6, out5, out6, cospi12_v, cospi20_v); \
  431. \
  432. out8 = vec_add(in8, in9); \
  433. out9 = vec_sub(in8, in9); \
  434. outA = vec_sub(inB, inA); \
  435. outB = vec_add(inA, inB); \
  436. outC = vec_add(inC, inD); \
  437. outD = vec_sub(inC, inD); \
  438. outE = vec_sub(inF, inE); \
  439. outF = vec_add(inE, inF); \
  440. \
  441. /* stage 4 */ \
  442. STEP16_1(out0, out1, in1, in0, cospi16_v); \
  443. STEP8_0(out2, out3, in2, in3, cospi24_v, cospi8_v); \
  444. in4 = vec_add(out4, out5); \
  445. in5 = vec_sub(out4, out5); \
  446. in6 = vec_sub(out7, out6); \
  447. in7 = vec_add(out6, out7); \
  448. \
  449. in8 = out8; \
  450. inF = outF; \
  451. tmp16_0 = vec_mergeh(out9, outE); \
  452. tmp16_1 = vec_mergel(out9, outE); \
  453. temp10 = vec_sub(vec_mulo(tmp16_0, cospi24_v), vec_mule(tmp16_0, cospi8_v)); \
  454. temp11 = vec_sub(vec_mulo(tmp16_1, cospi24_v), vec_mule(tmp16_1, cospi8_v)); \
  455. DCT_CONST_ROUND_SHIFT(temp10); \
  456. DCT_CONST_ROUND_SHIFT(temp11); \
  457. in9 = vec_packs(temp10, temp11); \
  458. temp10 = vec_add(vec_mule(tmp16_0, cospi24_v), vec_mulo(tmp16_0, cospi8_v)); \
  459. temp11 = vec_add(vec_mule(tmp16_1, cospi24_v), vec_mulo(tmp16_1, cospi8_v)); \
  460. DCT_CONST_ROUND_SHIFT(temp10); \
  461. DCT_CONST_ROUND_SHIFT(temp11); \
  462. inE = vec_packs(temp10, temp11); \
  463. \
  464. tmp16_0 = vec_mergeh(outA, outD); \
  465. tmp16_1 = vec_mergel(outA, outD); \
  466. temp10 = \
  467. vec_sub(vec_mule(tmp16_0, cospi24m_v), vec_mulo(tmp16_0, cospi8_v)); \
  468. temp11 = \
  469. vec_sub(vec_mule(tmp16_1, cospi24m_v), vec_mulo(tmp16_1, cospi8_v)); \
  470. DCT_CONST_ROUND_SHIFT(temp10); \
  471. DCT_CONST_ROUND_SHIFT(temp11); \
  472. inA = vec_packs(temp10, temp11); \
  473. temp10 = vec_sub(vec_mulo(tmp16_0, cospi24_v), vec_mule(tmp16_0, cospi8_v)); \
  474. temp11 = vec_sub(vec_mulo(tmp16_1, cospi24_v), vec_mule(tmp16_1, cospi8_v)); \
  475. DCT_CONST_ROUND_SHIFT(temp10); \
  476. DCT_CONST_ROUND_SHIFT(temp11); \
  477. inD = vec_packs(temp10, temp11); \
  478. \
  479. inB = outB; \
  480. inC = outC; \
  481. \
  482. /* stage 5 */ \
  483. out0 = vec_add(in0, in3); \
  484. out1 = vec_add(in1, in2); \
  485. out2 = vec_sub(in1, in2); \
  486. out3 = vec_sub(in0, in3); \
  487. out4 = in4; \
  488. STEP16_1(in6, in5, out5, out6, cospi16_v); \
  489. out7 = in7; \
  490. \
  491. out8 = vec_add(in8, inB); \
  492. out9 = vec_add(in9, inA); \
  493. outA = vec_sub(in9, inA); \
  494. outB = vec_sub(in8, inB); \
  495. outC = vec_sub(inF, inC); \
  496. outD = vec_sub(inE, inD); \
  497. outE = vec_add(inD, inE); \
  498. outF = vec_add(inC, inF); \
  499. \
  500. /* stage 6 */ \
  501. in0 = vec_add(out0, out7); \
  502. in1 = vec_add(out1, out6); \
  503. in2 = vec_add(out2, out5); \
  504. in3 = vec_add(out3, out4); \
  505. in4 = vec_sub(out3, out4); \
  506. in5 = vec_sub(out2, out5); \
  507. in6 = vec_sub(out1, out6); \
  508. in7 = vec_sub(out0, out7); \
  509. in8 = out8; \
  510. in9 = out9; \
  511. STEP16_1(outD, outA, inA, inD, cospi16_v); \
  512. STEP16_1(outC, outB, inB, inC, cospi16_v); \
  513. inE = outE; \
  514. inF = outF; \
  515. \
  516. /* stage 7 */ \
  517. out0 = vec_add(in0, inF); \
  518. out1 = vec_add(in1, inE); \
  519. out2 = vec_add(in2, inD); \
  520. out3 = vec_add(in3, inC); \
  521. out4 = vec_add(in4, inB); \
  522. out5 = vec_add(in5, inA); \
  523. out6 = vec_add(in6, in9); \
  524. out7 = vec_add(in7, in8); \
  525. out8 = vec_sub(in7, in8); \
  526. out9 = vec_sub(in6, in9); \
  527. outA = vec_sub(in5, inA); \
  528. outB = vec_sub(in4, inB); \
  529. outC = vec_sub(in3, inC); \
  530. outD = vec_sub(in2, inD); \
  531. outE = vec_sub(in1, inE); \
  532. outF = vec_sub(in0, inF);
  533. #define PIXEL_ADD_STORE16(in0, in1, dst, offset) \
  534. d_uh = (int16x8_t)vec_mergeh(dst, zerov); \
  535. d_ul = (int16x8_t)vec_mergel(dst, zerov); \
  536. PIXEL_ADD(in0, d_uh, add, shift6); \
  537. PIXEL_ADD(in1, d_ul, add, shift6); \
  538. vec_vsx_st(vec_packsu(d_uh, d_ul), offset, dest);
  539. static void half_idct16x8_vsx(int16x8_t *src) {
  540. int16x8_t tmp0[8], tmp1[8];
  541. int32x4_t temp10, temp11, temp20, temp21, temp30;
  542. int16x8_t tmp16_0, tmp16_1;
  543. ROUND_SHIFT_INIT;
  544. TRANSPOSE8x8(src[0], src[2], src[4], src[6], src[8], src[10], src[12],
  545. src[14], tmp0[0], tmp0[1], tmp0[2], tmp0[3], tmp0[4], tmp0[5],
  546. tmp0[6], tmp0[7]);
  547. TRANSPOSE8x8(src[1], src[3], src[5], src[7], src[9], src[11], src[13],
  548. src[15], tmp1[0], tmp1[1], tmp1[2], tmp1[3], tmp1[4], tmp1[5],
  549. tmp1[6], tmp1[7]);
  550. IDCT16(tmp0[0], tmp0[1], tmp0[2], tmp0[3], tmp0[4], tmp0[5], tmp0[6], tmp0[7],
  551. tmp1[0], tmp1[1], tmp1[2], tmp1[3], tmp1[4], tmp1[5], tmp1[6], tmp1[7],
  552. src[0], src[2], src[4], src[6], src[8], src[10], src[12], src[14],
  553. src[1], src[3], src[5], src[7], src[9], src[11], src[13], src[15]);
  554. }
  555. void vpx_idct16_vsx(int16x8_t *src0, int16x8_t *src1) {
  556. int16x8_t tmp0[8], tmp1[8], tmp2[8], tmp3[8];
  557. int32x4_t temp10, temp11, temp20, temp21, temp30;
  558. int16x8_t tmp16_0, tmp16_1;
  559. ROUND_SHIFT_INIT;
  560. TRANSPOSE8x8(src0[0], src0[2], src0[4], src0[6], src0[8], src0[10], src0[12],
  561. src0[14], tmp0[0], tmp0[1], tmp0[2], tmp0[3], tmp0[4], tmp0[5],
  562. tmp0[6], tmp0[7]);
  563. TRANSPOSE8x8(src0[1], src0[3], src0[5], src0[7], src0[9], src0[11], src0[13],
  564. src0[15], tmp1[0], tmp1[1], tmp1[2], tmp1[3], tmp1[4], tmp1[5],
  565. tmp1[6], tmp1[7]);
  566. TRANSPOSE8x8(src1[0], src1[2], src1[4], src1[6], src1[8], src1[10], src1[12],
  567. src1[14], tmp2[0], tmp2[1], tmp2[2], tmp2[3], tmp2[4], tmp2[5],
  568. tmp2[6], tmp2[7]);
  569. TRANSPOSE8x8(src1[1], src1[3], src1[5], src1[7], src1[9], src1[11], src1[13],
  570. src1[15], tmp3[0], tmp3[1], tmp3[2], tmp3[3], tmp3[4], tmp3[5],
  571. tmp3[6], tmp3[7]);
  572. IDCT16(tmp0[0], tmp0[1], tmp0[2], tmp0[3], tmp0[4], tmp0[5], tmp0[6], tmp0[7],
  573. tmp1[0], tmp1[1], tmp1[2], tmp1[3], tmp1[4], tmp1[5], tmp1[6], tmp1[7],
  574. src0[0], src0[2], src0[4], src0[6], src0[8], src0[10], src0[12],
  575. src0[14], src1[0], src1[2], src1[4], src1[6], src1[8], src1[10],
  576. src1[12], src1[14]);
  577. IDCT16(tmp2[0], tmp2[1], tmp2[2], tmp2[3], tmp2[4], tmp2[5], tmp2[6], tmp2[7],
  578. tmp3[0], tmp3[1], tmp3[2], tmp3[3], tmp3[4], tmp3[5], tmp3[6], tmp3[7],
  579. src0[1], src0[3], src0[5], src0[7], src0[9], src0[11], src0[13],
  580. src0[15], src1[1], src1[3], src1[5], src1[7], src1[9], src1[11],
  581. src1[13], src1[15]);
  582. }
  583. void vpx_round_store16x16_vsx(int16x8_t *src0, int16x8_t *src1, uint8_t *dest,
  584. int stride) {
  585. uint8x16_t destv[16];
  586. int16x8_t d_uh, d_ul;
  587. uint8x16_t zerov = vec_splat_u8(0);
  588. uint16x8_t shift6 = vec_splat_u16(6);
  589. int16x8_t add = vec_sl(vec_splat_s16(8), vec_splat_u16(2));
  590. // load dest
  591. LOAD_INPUT16(vec_vsx_ld, dest, 0, stride, destv);
  592. PIXEL_ADD_STORE16(src0[0], src0[1], destv[0], 0);
  593. PIXEL_ADD_STORE16(src0[2], src0[3], destv[1], stride);
  594. PIXEL_ADD_STORE16(src0[4], src0[5], destv[2], 2 * stride);
  595. PIXEL_ADD_STORE16(src0[6], src0[7], destv[3], 3 * stride);
  596. PIXEL_ADD_STORE16(src0[8], src0[9], destv[4], 4 * stride);
  597. PIXEL_ADD_STORE16(src0[10], src0[11], destv[5], 5 * stride);
  598. PIXEL_ADD_STORE16(src0[12], src0[13], destv[6], 6 * stride);
  599. PIXEL_ADD_STORE16(src0[14], src0[15], destv[7], 7 * stride);
  600. PIXEL_ADD_STORE16(src1[0], src1[1], destv[8], 8 * stride);
  601. PIXEL_ADD_STORE16(src1[2], src1[3], destv[9], 9 * stride);
  602. PIXEL_ADD_STORE16(src1[4], src1[5], destv[10], 10 * stride);
  603. PIXEL_ADD_STORE16(src1[6], src1[7], destv[11], 11 * stride);
  604. PIXEL_ADD_STORE16(src1[8], src1[9], destv[12], 12 * stride);
  605. PIXEL_ADD_STORE16(src1[10], src1[11], destv[13], 13 * stride);
  606. PIXEL_ADD_STORE16(src1[12], src1[13], destv[14], 14 * stride);
  607. PIXEL_ADD_STORE16(src1[14], src1[15], destv[15], 15 * stride);
  608. }
  609. void vpx_idct16x16_256_add_vsx(const tran_low_t *input, uint8_t *dest,
  610. int stride) {
  611. int16x8_t src0[16], src1[16];
  612. int16x8_t tmp0[8], tmp1[8], tmp2[8], tmp3[8];
  613. int32x4_t temp10, temp11, temp20, temp21, temp30;
  614. int16x8_t tmp16_0, tmp16_1;
  615. ROUND_SHIFT_INIT;
  616. LOAD_INPUT16(load_tran_low, input, 0, 8 * sizeof(*input), src0);
  617. LOAD_INPUT16(load_tran_low, input, 8 * 8 * 2 * sizeof(*input),
  618. 8 * sizeof(*input), src1);
  619. // transform rows
  620. // transform the upper half of 16x16 matrix
  621. half_idct16x8_vsx(src0);
  622. TRANSPOSE8x8(src0[0], src0[2], src0[4], src0[6], src0[8], src0[10], src0[12],
  623. src0[14], tmp0[0], tmp0[1], tmp0[2], tmp0[3], tmp0[4], tmp0[5],
  624. tmp0[6], tmp0[7]);
  625. TRANSPOSE8x8(src0[1], src0[3], src0[5], src0[7], src0[9], src0[11], src0[13],
  626. src0[15], tmp1[0], tmp1[1], tmp1[2], tmp1[3], tmp1[4], tmp1[5],
  627. tmp1[6], tmp1[7]);
  628. // transform the lower half of 16x16 matrix
  629. half_idct16x8_vsx(src1);
  630. TRANSPOSE8x8(src1[0], src1[2], src1[4], src1[6], src1[8], src1[10], src1[12],
  631. src1[14], tmp2[0], tmp2[1], tmp2[2], tmp2[3], tmp2[4], tmp2[5],
  632. tmp2[6], tmp2[7]);
  633. TRANSPOSE8x8(src1[1], src1[3], src1[5], src1[7], src1[9], src1[11], src1[13],
  634. src1[15], tmp3[0], tmp3[1], tmp3[2], tmp3[3], tmp3[4], tmp3[5],
  635. tmp3[6], tmp3[7]);
  636. // transform columns
  637. // left half first
  638. IDCT16(tmp0[0], tmp0[1], tmp0[2], tmp0[3], tmp0[4], tmp0[5], tmp0[6], tmp0[7],
  639. tmp2[0], tmp2[1], tmp2[2], tmp2[3], tmp2[4], tmp2[5], tmp2[6], tmp2[7],
  640. src0[0], src0[2], src0[4], src0[6], src0[8], src0[10], src0[12],
  641. src0[14], src1[0], src1[2], src1[4], src1[6], src1[8], src1[10],
  642. src1[12], src1[14]);
  643. // right half
  644. IDCT16(tmp1[0], tmp1[1], tmp1[2], tmp1[3], tmp1[4], tmp1[5], tmp1[6], tmp1[7],
  645. tmp3[0], tmp3[1], tmp3[2], tmp3[3], tmp3[4], tmp3[5], tmp3[6], tmp3[7],
  646. src0[1], src0[3], src0[5], src0[7], src0[9], src0[11], src0[13],
  647. src0[15], src1[1], src1[3], src1[5], src1[7], src1[9], src1[11],
  648. src1[13], src1[15]);
  649. vpx_round_store16x16_vsx(src0, src1, dest, stride);
  650. }
  651. #define LOAD_8x32(load, in00, in01, in02, in03, in10, in11, in12, in13, in20, \
  652. in21, in22, in23, in30, in31, in32, in33, in40, in41, in42, \
  653. in43, in50, in51, in52, in53, in60, in61, in62, in63, in70, \
  654. in71, in72, in73, offset) \
  655. /* load the first row from the 8x32 block*/ \
  656. in00 = load(offset, input); \
  657. in01 = load(offset + 16, input); \
  658. in02 = load(offset + 2 * 16, input); \
  659. in03 = load(offset + 3 * 16, input); \
  660. \
  661. in10 = load(offset + 4 * 16, input); \
  662. in11 = load(offset + 5 * 16, input); \
  663. in12 = load(offset + 6 * 16, input); \
  664. in13 = load(offset + 7 * 16, input); \
  665. \
  666. in20 = load(offset + 8 * 16, input); \
  667. in21 = load(offset + 9 * 16, input); \
  668. in22 = load(offset + 10 * 16, input); \
  669. in23 = load(offset + 11 * 16, input); \
  670. \
  671. in30 = load(offset + 12 * 16, input); \
  672. in31 = load(offset + 13 * 16, input); \
  673. in32 = load(offset + 14 * 16, input); \
  674. in33 = load(offset + 15 * 16, input); \
  675. \
  676. in40 = load(offset + 16 * 16, input); \
  677. in41 = load(offset + 17 * 16, input); \
  678. in42 = load(offset + 18 * 16, input); \
  679. in43 = load(offset + 19 * 16, input); \
  680. \
  681. in50 = load(offset + 20 * 16, input); \
  682. in51 = load(offset + 21 * 16, input); \
  683. in52 = load(offset + 22 * 16, input); \
  684. in53 = load(offset + 23 * 16, input); \
  685. \
  686. in60 = load(offset + 24 * 16, input); \
  687. in61 = load(offset + 25 * 16, input); \
  688. in62 = load(offset + 26 * 16, input); \
  689. in63 = load(offset + 27 * 16, input); \
  690. \
  691. /* load the last row from the 8x32 block*/ \
  692. in70 = load(offset + 28 * 16, input); \
  693. in71 = load(offset + 29 * 16, input); \
  694. in72 = load(offset + 30 * 16, input); \
  695. in73 = load(offset + 31 * 16, input);
  696. /* for the: temp1 = -step[x] * cospi_q + step[y] * cospi_z
  697. * temp2 = step[x] * cospi_z + step[y] * cospi_q */
  698. #define STEP32(inpt0, inpt1, outpt0, outpt1, cospi0, cospi1) \
  699. tmp16_0 = vec_mergeh(inpt0, inpt1); \
  700. tmp16_1 = vec_mergel(inpt0, inpt1); \
  701. temp10 = vec_sub(vec_mulo(tmp16_0, cospi1), vec_mule(tmp16_0, cospi0)); \
  702. temp11 = vec_sub(vec_mulo(tmp16_1, cospi1), vec_mule(tmp16_1, cospi0)); \
  703. DCT_CONST_ROUND_SHIFT(temp10); \
  704. DCT_CONST_ROUND_SHIFT(temp11); \
  705. outpt0 = vec_packs(temp10, temp11); \
  706. temp10 = vec_add(vec_mule(tmp16_0, cospi1), vec_mulo(tmp16_0, cospi0)); \
  707. temp11 = vec_add(vec_mule(tmp16_1, cospi1), vec_mulo(tmp16_1, cospi0)); \
  708. DCT_CONST_ROUND_SHIFT(temp10); \
  709. DCT_CONST_ROUND_SHIFT(temp11); \
  710. outpt1 = vec_packs(temp10, temp11);
  711. /* for the: temp1 = -step[x] * cospi_q - step[y] * cospi_z
  712. * temp2 = -step[x] * cospi_z + step[y] * cospi_q */
  713. #define STEP32_1(inpt0, inpt1, outpt0, outpt1, cospi0, cospi1, cospi1m) \
  714. tmp16_0 = vec_mergeh(inpt0, inpt1); \
  715. tmp16_1 = vec_mergel(inpt0, inpt1); \
  716. temp10 = vec_sub(vec_mulo(tmp16_0, cospi1m), vec_mule(tmp16_0, cospi0)); \
  717. temp11 = vec_sub(vec_mulo(tmp16_1, cospi1m), vec_mule(tmp16_1, cospi0)); \
  718. DCT_CONST_ROUND_SHIFT(temp10); \
  719. DCT_CONST_ROUND_SHIFT(temp11); \
  720. outpt0 = vec_packs(temp10, temp11); \
  721. temp10 = vec_sub(vec_mulo(tmp16_0, cospi0), vec_mule(tmp16_0, cospi1)); \
  722. temp11 = vec_sub(vec_mulo(tmp16_1, cospi0), vec_mule(tmp16_1, cospi1)); \
  723. DCT_CONST_ROUND_SHIFT(temp10); \
  724. DCT_CONST_ROUND_SHIFT(temp11); \
  725. outpt1 = vec_packs(temp10, temp11);
  726. #define IDCT32(in0, in1, in2, in3, out) \
  727. \
  728. /* stage 1 */ \
  729. /* out[0][0] = in[0][0]; */ \
  730. out[0][1] = in2[0]; \
  731. out[0][2] = in1[0]; \
  732. out[0][3] = in3[0]; \
  733. out[0][4] = in0[4]; \
  734. out[0][5] = in2[4]; \
  735. out[0][6] = in1[4]; \
  736. out[0][7] = in3[4]; \
  737. out[1][0] = in0[2]; \
  738. out[1][1] = in2[2]; \
  739. out[1][2] = in1[2]; \
  740. out[1][3] = in3[2]; \
  741. out[1][4] = in0[6]; \
  742. out[1][5] = in2[6]; \
  743. out[1][6] = in1[6]; \
  744. out[1][7] = in3[6]; \
  745. \
  746. STEP8_0(in0[1], in3[7], out[2][0], out[3][7], cospi31_v, cospi1_v); \
  747. STEP8_0(in2[1], in1[7], out[2][1], out[3][6], cospi15_v, cospi17_v); \
  748. STEP8_0(in1[1], in2[7], out[2][2], out[3][5], cospi23_v, cospi9_v); \
  749. STEP8_0(in3[1], in0[7], out[2][3], out[3][4], cospi7_v, cospi25_v); \
  750. STEP8_0(in0[5], in3[3], out[2][4], out[3][3], cospi27_v, cospi5_v); \
  751. STEP8_0(in2[5], in1[3], out[2][5], out[3][2], cospi11_v, cospi21_v); \
  752. STEP8_0(in1[5], in2[3], out[2][6], out[3][1], cospi19_v, cospi13_v); \
  753. STEP8_0(in3[5], in0[3], out[2][7], out[3][0], cospi3_v, cospi29_v); \
  754. \
  755. /* stage 2 */ \
  756. /* in0[0] = out[0][0]; */ \
  757. in0[1] = out[0][1]; \
  758. in0[2] = out[0][2]; \
  759. in0[3] = out[0][3]; \
  760. in0[4] = out[0][4]; \
  761. in0[5] = out[0][5]; \
  762. in0[6] = out[0][6]; \
  763. in0[7] = out[0][7]; \
  764. \
  765. STEP8_0(out[1][0], out[1][7], in1[0], in1[7], cospi30_v, cospi2_v); \
  766. STEP8_0(out[1][1], out[1][6], in1[1], in1[6], cospi14_v, cospi18_v); \
  767. STEP8_0(out[1][2], out[1][5], in1[2], in1[5], cospi22_v, cospi10_v); \
  768. STEP8_0(out[1][3], out[1][4], in1[3], in1[4], cospi6_v, cospi26_v); \
  769. \
  770. in2[0] = vec_add(out[2][0], out[2][1]); \
  771. in2[1] = vec_sub(out[2][0], out[2][1]); \
  772. in2[2] = vec_sub(out[2][3], out[2][2]); \
  773. in2[3] = vec_add(out[2][3], out[2][2]); \
  774. in2[4] = vec_add(out[2][4], out[2][5]); \
  775. in2[5] = vec_sub(out[2][4], out[2][5]); \
  776. in2[6] = vec_sub(out[2][7], out[2][6]); \
  777. in2[7] = vec_add(out[2][7], out[2][6]); \
  778. in3[0] = vec_add(out[3][0], out[3][1]); \
  779. in3[1] = vec_sub(out[3][0], out[3][1]); \
  780. in3[2] = vec_sub(out[3][3], out[3][2]); \
  781. in3[3] = vec_add(out[3][3], out[3][2]); \
  782. in3[4] = vec_add(out[3][4], out[3][5]); \
  783. in3[5] = vec_sub(out[3][4], out[3][5]); \
  784. in3[6] = vec_sub(out[3][7], out[3][6]); \
  785. in3[7] = vec_add(out[3][6], out[3][7]); \
  786. \
  787. /* stage 3 */ \
  788. out[0][0] = in0[0]; \
  789. out[0][1] = in0[1]; \
  790. out[0][2] = in0[2]; \
  791. out[0][3] = in0[3]; \
  792. \
  793. STEP8_0(in0[4], in0[7], out[0][4], out[0][7], cospi28_v, cospi4_v); \
  794. STEP8_0(in0[5], in0[6], out[0][5], out[0][6], cospi12_v, cospi20_v); \
  795. \
  796. out[1][0] = vec_add(in1[0], in1[1]); \
  797. out[1][1] = vec_sub(in1[0], in1[1]); \
  798. out[1][2] = vec_sub(in1[3], in1[2]); \
  799. out[1][3] = vec_add(in1[2], in1[3]); \
  800. out[1][4] = vec_add(in1[4], in1[5]); \
  801. out[1][5] = vec_sub(in1[4], in1[5]); \
  802. out[1][6] = vec_sub(in1[7], in1[6]); \
  803. out[1][7] = vec_add(in1[6], in1[7]); \
  804. \
  805. out[2][0] = in2[0]; \
  806. out[3][7] = in3[7]; \
  807. STEP32(in2[1], in3[6], out[2][1], out[3][6], cospi4_v, cospi28_v); \
  808. STEP32_1(in2[2], in3[5], out[2][2], out[3][5], cospi28_v, cospi4_v, \
  809. cospi4m_v); \
  810. out[2][3] = in2[3]; \
  811. out[2][4] = in2[4]; \
  812. STEP32(in2[5], in3[2], out[2][5], out[3][2], cospi20_v, cospi12_v); \
  813. STEP32_1(in2[6], in3[1], out[2][6], out[3][1], cospi12_v, cospi20_v, \
  814. cospi20m_v); \
  815. out[2][7] = in2[7]; \
  816. out[3][0] = in3[0]; \
  817. out[3][3] = in3[3]; \
  818. out[3][4] = in3[4]; \
  819. \
  820. /* stage 4 */ \
  821. STEP16_1(out[0][0], out[0][1], in0[1], in0[0], cospi16_v); \
  822. STEP8_0(out[0][2], out[0][3], in0[2], in0[3], cospi24_v, cospi8_v); \
  823. in0[4] = vec_add(out[0][4], out[0][5]); \
  824. in0[5] = vec_sub(out[0][4], out[0][5]); \
  825. in0[6] = vec_sub(out[0][7], out[0][6]); \
  826. in0[7] = vec_add(out[0][7], out[0][6]); \
  827. \
  828. in1[0] = out[1][0]; \
  829. in1[7] = out[1][7]; \
  830. STEP32(out[1][1], out[1][6], in1[1], in1[6], cospi8_v, cospi24_v); \
  831. STEP32_1(out[1][2], out[1][5], in1[2], in1[5], cospi24_v, cospi8_v, \
  832. cospi8m_v); \
  833. in1[3] = out[1][3]; \
  834. in1[4] = out[1][4]; \
  835. \
  836. in2[0] = vec_add(out[2][0], out[2][3]); \
  837. in2[1] = vec_add(out[2][1], out[2][2]); \
  838. in2[2] = vec_sub(out[2][1], out[2][2]); \
  839. in2[3] = vec_sub(out[2][0], out[2][3]); \
  840. in2[4] = vec_sub(out[2][7], out[2][4]); \
  841. in2[5] = vec_sub(out[2][6], out[2][5]); \
  842. in2[6] = vec_add(out[2][5], out[2][6]); \
  843. in2[7] = vec_add(out[2][4], out[2][7]); \
  844. \
  845. in3[0] = vec_add(out[3][0], out[3][3]); \
  846. in3[1] = vec_add(out[3][1], out[3][2]); \
  847. in3[2] = vec_sub(out[3][1], out[3][2]); \
  848. in3[3] = vec_sub(out[3][0], out[3][3]); \
  849. in3[4] = vec_sub(out[3][7], out[3][4]); \
  850. in3[5] = vec_sub(out[3][6], out[3][5]); \
  851. in3[6] = vec_add(out[3][5], out[3][6]); \
  852. in3[7] = vec_add(out[3][4], out[3][7]); \
  853. \
  854. /* stage 5 */ \
  855. out[0][0] = vec_add(in0[0], in0[3]); \
  856. out[0][1] = vec_add(in0[1], in0[2]); \
  857. out[0][2] = vec_sub(in0[1], in0[2]); \
  858. out[0][3] = vec_sub(in0[0], in0[3]); \
  859. out[0][4] = in0[4]; \
  860. STEP16_1(in0[6], in0[5], out[0][5], out[0][6], cospi16_v); \
  861. out[0][7] = in0[7]; \
  862. \
  863. out[1][0] = vec_add(in1[0], in1[3]); \
  864. out[1][1] = vec_add(in1[1], in1[2]); \
  865. out[1][2] = vec_sub(in1[1], in1[2]); \
  866. out[1][3] = vec_sub(in1[0], in1[3]); \
  867. out[1][4] = vec_sub(in1[7], in1[4]); \
  868. out[1][5] = vec_sub(in1[6], in1[5]); \
  869. out[1][6] = vec_add(in1[5], in1[6]); \
  870. out[1][7] = vec_add(in1[4], in1[7]); \
  871. \
  872. out[2][0] = in2[0]; \
  873. out[2][1] = in2[1]; \
  874. STEP32(in2[2], in3[5], out[2][2], out[3][5], cospi8_v, cospi24_v); \
  875. STEP32(in2[3], in3[4], out[2][3], out[3][4], cospi8_v, cospi24_v); \
  876. STEP32_1(in2[4], in3[3], out[2][4], out[3][3], cospi24_v, cospi8_v, \
  877. cospi8m_v); \
  878. STEP32_1(in2[5], in3[2], out[2][5], out[3][2], cospi24_v, cospi8_v, \
  879. cospi8m_v); \
  880. out[2][6] = in2[6]; \
  881. out[2][7] = in2[7]; \
  882. out[3][0] = in3[0]; \
  883. out[3][1] = in3[1]; \
  884. out[3][6] = in3[6]; \
  885. out[3][7] = in3[7]; \
  886. \
  887. /* stage 6 */ \
  888. in0[0] = vec_add(out[0][0], out[0][7]); \
  889. in0[1] = vec_add(out[0][1], out[0][6]); \
  890. in0[2] = vec_add(out[0][2], out[0][5]); \
  891. in0[3] = vec_add(out[0][3], out[0][4]); \
  892. in0[4] = vec_sub(out[0][3], out[0][4]); \
  893. in0[5] = vec_sub(out[0][2], out[0][5]); \
  894. in0[6] = vec_sub(out[0][1], out[0][6]); \
  895. in0[7] = vec_sub(out[0][0], out[0][7]); \
  896. in1[0] = out[1][0]; \
  897. in1[1] = out[1][1]; \
  898. STEP16_1(out[1][5], out[1][2], in1[2], in1[5], cospi16_v); \
  899. STEP16_1(out[1][4], out[1][3], in1[3], in1[4], cospi16_v); \
  900. in1[6] = out[1][6]; \
  901. in1[7] = out[1][7]; \
  902. \
  903. in2[0] = vec_add(out[2][0], out[2][7]); \
  904. in2[1] = vec_add(out[2][1], out[2][6]); \
  905. in2[2] = vec_add(out[2][2], out[2][5]); \
  906. in2[3] = vec_add(out[2][3], out[2][4]); \
  907. in2[4] = vec_sub(out[2][3], out[2][4]); \
  908. in2[5] = vec_sub(out[2][2], out[2][5]); \
  909. in2[6] = vec_sub(out[2][1], out[2][6]); \
  910. in2[7] = vec_sub(out[2][0], out[2][7]); \
  911. \
  912. in3[0] = vec_sub(out[3][7], out[3][0]); \
  913. in3[1] = vec_sub(out[3][6], out[3][1]); \
  914. in3[2] = vec_sub(out[3][5], out[3][2]); \
  915. in3[3] = vec_sub(out[3][4], out[3][3]); \
  916. in3[4] = vec_add(out[3][4], out[3][3]); \
  917. in3[5] = vec_add(out[3][5], out[3][2]); \
  918. in3[6] = vec_add(out[3][6], out[3][1]); \
  919. in3[7] = vec_add(out[3][7], out[3][0]); \
  920. \
  921. /* stage 7 */ \
  922. out[0][0] = vec_add(in0[0], in1[7]); \
  923. out[0][1] = vec_add(in0[1], in1[6]); \
  924. out[0][2] = vec_add(in0[2], in1[5]); \
  925. out[0][3] = vec_add(in0[3], in1[4]); \
  926. out[0][4] = vec_add(in0[4], in1[3]); \
  927. out[0][5] = vec_add(in0[5], in1[2]); \
  928. out[0][6] = vec_add(in0[6], in1[1]); \
  929. out[0][7] = vec_add(in0[7], in1[0]); \
  930. out[1][0] = vec_sub(in0[7], in1[0]); \
  931. out[1][1] = vec_sub(in0[6], in1[1]); \
  932. out[1][2] = vec_sub(in0[5], in1[2]); \
  933. out[1][3] = vec_sub(in0[4], in1[3]); \
  934. out[1][4] = vec_sub(in0[3], in1[4]); \
  935. out[1][5] = vec_sub(in0[2], in1[5]); \
  936. out[1][6] = vec_sub(in0[1], in1[6]); \
  937. out[1][7] = vec_sub(in0[0], in1[7]); \
  938. \
  939. out[2][0] = in2[0]; \
  940. out[2][1] = in2[1]; \
  941. out[2][2] = in2[2]; \
  942. out[2][3] = in2[3]; \
  943. STEP16_1(in3[3], in2[4], out[2][4], out[3][3], cospi16_v); \
  944. STEP16_1(in3[2], in2[5], out[2][5], out[3][2], cospi16_v); \
  945. STEP16_1(in3[1], in2[6], out[2][6], out[3][1], cospi16_v); \
  946. STEP16_1(in3[0], in2[7], out[2][7], out[3][0], cospi16_v); \
  947. out[3][4] = in3[4]; \
  948. out[3][5] = in3[5]; \
  949. out[3][6] = in3[6]; \
  950. out[3][7] = in3[7]; \
  951. \
  952. /* final */ \
  953. in0[0] = vec_add(out[0][0], out[3][7]); \
  954. in0[1] = vec_add(out[0][1], out[3][6]); \
  955. in0[2] = vec_add(out[0][2], out[3][5]); \
  956. in0[3] = vec_add(out[0][3], out[3][4]); \
  957. in0[4] = vec_add(out[0][4], out[3][3]); \
  958. in0[5] = vec_add(out[0][5], out[3][2]); \
  959. in0[6] = vec_add(out[0][6], out[3][1]); \
  960. in0[7] = vec_add(out[0][7], out[3][0]); \
  961. in1[0] = vec_add(out[1][0], out[2][7]); \
  962. in1[1] = vec_add(out[1][1], out[2][6]); \
  963. in1[2] = vec_add(out[1][2], out[2][5]); \
  964. in1[3] = vec_add(out[1][3], out[2][4]); \
  965. in1[4] = vec_add(out[1][4], out[2][3]); \
  966. in1[5] = vec_add(out[1][5], out[2][2]); \
  967. in1[6] = vec_add(out[1][6], out[2][1]); \
  968. in1[7] = vec_add(out[1][7], out[2][0]); \
  969. in2[0] = vec_sub(out[1][7], out[2][0]); \
  970. in2[1] = vec_sub(out[1][6], out[2][1]); \
  971. in2[2] = vec_sub(out[1][5], out[2][2]); \
  972. in2[3] = vec_sub(out[1][4], out[2][3]); \
  973. in2[4] = vec_sub(out[1][3], out[2][4]); \
  974. in2[5] = vec_sub(out[1][2], out[2][5]); \
  975. in2[6] = vec_sub(out[1][1], out[2][6]); \
  976. in2[7] = vec_sub(out[1][0], out[2][7]); \
  977. in3[0] = vec_sub(out[0][7], out[3][0]); \
  978. in3[1] = vec_sub(out[0][6], out[3][1]); \
  979. in3[2] = vec_sub(out[0][5], out[3][2]); \
  980. in3[3] = vec_sub(out[0][4], out[3][3]); \
  981. in3[4] = vec_sub(out[0][3], out[3][4]); \
  982. in3[5] = vec_sub(out[0][2], out[3][5]); \
  983. in3[6] = vec_sub(out[0][1], out[3][6]); \
  984. in3[7] = vec_sub(out[0][0], out[3][7]);
  985. // NOT A FULL TRANSPOSE! Transposes just each 8x8 block in each row,
  986. // does not transpose rows
  987. #define TRANSPOSE_8x32(in, out) \
  988. /* transpose 4 of 8x8 blocks */ \
  989. TRANSPOSE8x8(in[0][0], in[0][1], in[0][2], in[0][3], in[0][4], in[0][5], \
  990. in[0][6], in[0][7], out[0][0], out[0][1], out[0][2], out[0][3], \
  991. out[0][4], out[0][5], out[0][6], out[0][7]); \
  992. TRANSPOSE8x8(in[1][0], in[1][1], in[1][2], in[1][3], in[1][4], in[1][5], \
  993. in[1][6], in[1][7], out[1][0], out[1][1], out[1][2], out[1][3], \
  994. out[1][4], out[1][5], out[1][6], out[1][7]); \
  995. TRANSPOSE8x8(in[2][0], in[2][1], in[2][2], in[2][3], in[2][4], in[2][5], \
  996. in[2][6], in[2][7], out[2][0], out[2][1], out[2][2], out[2][3], \
  997. out[2][4], out[2][5], out[2][6], out[2][7]); \
  998. TRANSPOSE8x8(in[3][0], in[3][1], in[3][2], in[3][3], in[3][4], in[3][5], \
  999. in[3][6], in[3][7], out[3][0], out[3][1], out[3][2], out[3][3], \
  1000. out[3][4], out[3][5], out[3][6], out[3][7]);
  1001. #define PIXEL_ADD_STORE32(in0, in1, in2, in3, step) \
  1002. dst = vec_vsx_ld((step)*stride, dest); \
  1003. d_uh = (int16x8_t)vec_mergeh(dst, zerov); \
  1004. d_ul = (int16x8_t)vec_mergel(dst, zerov); \
  1005. PIXEL_ADD(in0, d_uh, add, shift6); \
  1006. PIXEL_ADD(in1, d_ul, add, shift6); \
  1007. vec_vsx_st(vec_packsu(d_uh, d_ul), (step)*stride, dest); \
  1008. dst = vec_vsx_ld((step)*stride + 16, dest); \
  1009. d_uh = (int16x8_t)vec_mergeh(dst, zerov); \
  1010. d_ul = (int16x8_t)vec_mergel(dst, zerov); \
  1011. PIXEL_ADD(in2, d_uh, add, shift6); \
  1012. PIXEL_ADD(in3, d_ul, add, shift6); \
  1013. vec_vsx_st(vec_packsu(d_uh, d_ul), (step)*stride + 16, dest);
  1014. #define ADD_STORE_BLOCK(in, offset) \
  1015. PIXEL_ADD_STORE32(in[0][0], in[1][0], in[2][0], in[3][0], (offset) + 0); \
  1016. PIXEL_ADD_STORE32(in[0][1], in[1][1], in[2][1], in[3][1], (offset) + 1); \
  1017. PIXEL_ADD_STORE32(in[0][2], in[1][2], in[2][2], in[3][2], (offset) + 2); \
  1018. PIXEL_ADD_STORE32(in[0][3], in[1][3], in[2][3], in[3][3], (offset) + 3); \
  1019. PIXEL_ADD_STORE32(in[0][4], in[1][4], in[2][4], in[3][4], (offset) + 4); \
  1020. PIXEL_ADD_STORE32(in[0][5], in[1][5], in[2][5], in[3][5], (offset) + 5); \
  1021. PIXEL_ADD_STORE32(in[0][6], in[1][6], in[2][6], in[3][6], (offset) + 6); \
  1022. PIXEL_ADD_STORE32(in[0][7], in[1][7], in[2][7], in[3][7], (offset) + 7);
  1023. void vpx_idct32x32_1024_add_vsx(const tran_low_t *input, uint8_t *dest,
  1024. int stride) {
  1025. int16x8_t src0[4][8], src1[4][8], src2[4][8], src3[4][8], tmp[4][8];
  1026. int16x8_t tmp16_0, tmp16_1;
  1027. int32x4_t temp10, temp11, temp20, temp21, temp30;
  1028. uint8x16_t dst;
  1029. int16x8_t d_uh, d_ul;
  1030. int16x8_t add = vec_sl(vec_splat_s16(8), vec_splat_u16(2));
  1031. uint16x8_t shift6 = vec_splat_u16(6);
  1032. uint8x16_t zerov = vec_splat_u8(0);
  1033. ROUND_SHIFT_INIT;
  1034. LOAD_8x32(load_tran_low, src0[0][0], src0[1][0], src0[2][0], src0[3][0],
  1035. src0[0][1], src0[1][1], src0[2][1], src0[3][1], src0[0][2],
  1036. src0[1][2], src0[2][2], src0[3][2], src0[0][3], src0[1][3],
  1037. src0[2][3], src0[3][3], src0[0][4], src0[1][4], src0[2][4],
  1038. src0[3][4], src0[0][5], src0[1][5], src0[2][5], src0[3][5],
  1039. src0[0][6], src0[1][6], src0[2][6], src0[3][6], src0[0][7],
  1040. src0[1][7], src0[2][7], src0[3][7], 0);
  1041. // Rows
  1042. // transpose the first row of 8x8 blocks
  1043. TRANSPOSE_8x32(src0, tmp);
  1044. // transform the 32x8 column
  1045. IDCT32(tmp[0], tmp[1], tmp[2], tmp[3], src0);
  1046. TRANSPOSE_8x32(tmp, src0);
  1047. LOAD_8x32(load_tran_low, src1[0][0], src1[1][0], src1[2][0], src1[3][0],
  1048. src1[0][1], src1[1][1], src1[2][1], src1[3][1], src1[0][2],
  1049. src1[1][2], src1[2][2], src1[3][2], src1[0][3], src1[1][3],
  1050. src1[2][3], src1[3][3], src1[0][4], src1[1][4], src1[2][4],
  1051. src1[3][4], src1[0][5], src1[1][5], src1[2][5], src1[3][5],
  1052. src1[0][6], src1[1][6], src1[2][6], src1[3][6], src1[0][7],
  1053. src1[1][7], src1[2][7], src1[3][7], 512);
  1054. TRANSPOSE_8x32(src1, tmp);
  1055. IDCT32(tmp[0], tmp[1], tmp[2], tmp[3], src1);
  1056. TRANSPOSE_8x32(tmp, src1);
  1057. LOAD_8x32(load_tran_low, src2[0][0], src2[1][0], src2[2][0], src2[3][0],
  1058. src2[0][1], src2[1][1], src2[2][1], src2[3][1], src2[0][2],
  1059. src2[1][2], src2[2][2], src2[3][2], src2[0][3], src2[1][3],
  1060. src2[2][3], src2[3][3], src2[0][4], src2[1][4], src2[2][4],
  1061. src2[3][4], src2[0][5], src2[1][5], src2[2][5], src2[3][5],
  1062. src2[0][6], src2[1][6], src2[2][6], src2[3][6], src2[0][7],
  1063. src2[1][7], src2[2][7], src2[3][7], 1024);
  1064. TRANSPOSE_8x32(src2, tmp);
  1065. IDCT32(tmp[0], tmp[1], tmp[2], tmp[3], src2);
  1066. TRANSPOSE_8x32(tmp, src2);
  1067. LOAD_8x32(load_tran_low, src3[0][0], src3[1][0], src3[2][0], src3[3][0],
  1068. src3[0][1], src3[1][1], src3[2][1], src3[3][1], src3[0][2],
  1069. src3[1][2], src3[2][2], src3[3][2], src3[0][3], src3[1][3],
  1070. src3[2][3], src3[3][3], src3[0][4], src3[1][4], src3[2][4],
  1071. src3[3][4], src3[0][5], src3[1][5], src3[2][5], src3[3][5],
  1072. src3[0][6], src3[1][6], src3[2][6], src3[3][6], src3[0][7],
  1073. src3[1][7], src3[2][7], src3[3][7], 1536);
  1074. TRANSPOSE_8x32(src3, tmp);
  1075. IDCT32(tmp[0], tmp[1], tmp[2], tmp[3], src3);
  1076. TRANSPOSE_8x32(tmp, src3);
  1077. // Columns
  1078. IDCT32(src0[0], src1[0], src2[0], src3[0], tmp);
  1079. IDCT32(src0[1], src1[1], src2[1], src3[1], tmp);
  1080. IDCT32(src0[2], src1[2], src2[2], src3[2], tmp);
  1081. IDCT32(src0[3], src1[3], src2[3], src3[3], tmp);
  1082. ADD_STORE_BLOCK(src0, 0);
  1083. ADD_STORE_BLOCK(src1, 8);
  1084. ADD_STORE_BLOCK(src2, 16);
  1085. ADD_STORE_BLOCK(src3, 24);
  1086. }
  1087. #define TRANSFORM_COLS \
  1088. v32_a = vec_add(v32_a, v32_c); \
  1089. v32_d = vec_sub(v32_d, v32_b); \
  1090. v32_e = vec_sub(v32_a, v32_d); \
  1091. v32_e = vec_sra(v32_e, one); \
  1092. v32_b = vec_sub(v32_e, v32_b); \
  1093. v32_c = vec_sub(v32_e, v32_c); \
  1094. v32_a = vec_sub(v32_a, v32_b); \
  1095. v32_d = vec_add(v32_d, v32_c); \
  1096. v_a = vec_packs(v32_a, v32_b); \
  1097. v_c = vec_packs(v32_c, v32_d);
  1098. #define TRANSPOSE_WHT \
  1099. tmp_a = vec_mergeh(v_a, v_c); \
  1100. tmp_c = vec_mergel(v_a, v_c); \
  1101. v_a = vec_mergeh(tmp_a, tmp_c); \
  1102. v_c = vec_mergel(tmp_a, tmp_c);
  1103. void vpx_iwht4x4_16_add_vsx(const tran_low_t *input, uint8_t *dest,
  1104. int stride) {
  1105. int16x8_t v_a = load_tran_low(0, input);
  1106. int16x8_t v_c = load_tran_low(8 * sizeof(*input), input);
  1107. int16x8_t tmp_a, tmp_c;
  1108. uint16x8_t two = vec_splat_u16(2);
  1109. uint32x4_t one = vec_splat_u32(1);
  1110. int16x8_t tmp16_0, tmp16_1;
  1111. int32x4_t v32_a, v32_c, v32_d, v32_b, v32_e;
  1112. uint8x16_t dest0 = vec_vsx_ld(0, dest);
  1113. uint8x16_t dest1 = vec_vsx_ld(stride, dest);
  1114. uint8x16_t dest2 = vec_vsx_ld(2 * stride, dest);
  1115. uint8x16_t dest3 = vec_vsx_ld(3 * stride, dest);
  1116. int16x8_t d_u0 = (int16x8_t)unpack_to_u16_h(dest0);
  1117. int16x8_t d_u1 = (int16x8_t)unpack_to_u16_h(dest1);
  1118. int16x8_t d_u2 = (int16x8_t)unpack_to_u16_h(dest2);
  1119. int16x8_t d_u3 = (int16x8_t)unpack_to_u16_h(dest3);
  1120. uint8x16_t output_v;
  1121. uint8_t tmp_dest[16];
  1122. int i, j;
  1123. v_a = vec_sra(v_a, two);
  1124. v_c = vec_sra(v_c, two);
  1125. TRANSPOSE_WHT;
  1126. v32_a = vec_unpackh(v_a);
  1127. v32_c = vec_unpackl(v_a);
  1128. v32_d = vec_unpackh(v_c);
  1129. v32_b = vec_unpackl(v_c);
  1130. TRANSFORM_COLS;
  1131. TRANSPOSE_WHT;
  1132. v32_a = vec_unpackh(v_a);
  1133. v32_c = vec_unpackl(v_a);
  1134. v32_d = vec_unpackh(v_c);
  1135. v32_b = vec_unpackl(v_c);
  1136. TRANSFORM_COLS;
  1137. PACK_STORE(v_a, v_c);
  1138. }
  1139. void vp9_iadst4_vsx(int16x8_t *in, int16x8_t *out) {
  1140. int16x8_t sinpi_1_3_v, sinpi_4_2_v, sinpi_2_3_v, sinpi_1_4_v, sinpi_12_n3_v;
  1141. int32x4_t v_v[5], u_v[4];
  1142. int32x4_t zerov = vec_splat_s32(0);
  1143. int16x8_t tmp0, tmp1;
  1144. int16x8_t zero16v = vec_splat_s16(0);
  1145. uint32x4_t shift16 = vec_sl(vec_splat_u32(8), vec_splat_u32(1));
  1146. ROUND_SHIFT_INIT;
  1147. sinpi_1_3_v = vec_mergel(sinpi_1_9_v, sinpi_3_9_v);
  1148. sinpi_4_2_v = vec_mergel(sinpi_4_9_v, sinpi_2_9_v);
  1149. sinpi_2_3_v = vec_mergel(sinpi_2_9_v, sinpi_3_9_v);
  1150. sinpi_1_4_v = vec_mergel(sinpi_1_9_v, sinpi_4_9_v);
  1151. sinpi_12_n3_v = vec_mergel(vec_add(sinpi_1_9_v, sinpi_2_9_v),
  1152. vec_sub(zero16v, sinpi_3_9_v));
  1153. tmp0 = (int16x8_t)vec_mergeh((int32x4_t)in[0], (int32x4_t)in[1]);
  1154. tmp1 = (int16x8_t)vec_mergel((int32x4_t)in[0], (int32x4_t)in[1]);
  1155. in[0] = (int16x8_t)vec_mergeh((int32x4_t)tmp0, (int32x4_t)tmp1);
  1156. in[1] = (int16x8_t)vec_mergel((int32x4_t)tmp0, (int32x4_t)tmp1);
  1157. v_v[0] = vec_msum(in[0], sinpi_1_3_v, zerov);
  1158. v_v[1] = vec_msum(in[1], sinpi_4_2_v, zerov);
  1159. v_v[2] = vec_msum(in[0], sinpi_2_3_v, zerov);
  1160. v_v[3] = vec_msum(in[1], sinpi_1_4_v, zerov);
  1161. v_v[4] = vec_msum(in[0], sinpi_12_n3_v, zerov);
  1162. in[0] = vec_sub(in[0], in[1]);
  1163. in[1] = (int16x8_t)vec_sra((int32x4_t)in[1], shift16);
  1164. in[0] = vec_add(in[0], in[1]);
  1165. in[0] = (int16x8_t)vec_sl((int32x4_t)in[0], shift16);
  1166. u_v[0] = vec_add(v_v[0], v_v[1]);
  1167. u_v[1] = vec_sub(v_v[2], v_v[3]);
  1168. u_v[2] = vec_msum(in[0], sinpi_1_3_v, zerov);
  1169. u_v[3] = vec_sub(v_v[1], v_v[3]);
  1170. u_v[3] = vec_add(u_v[3], v_v[4]);
  1171. DCT_CONST_ROUND_SHIFT(u_v[0]);
  1172. DCT_CONST_ROUND_SHIFT(u_v[1]);
  1173. DCT_CONST_ROUND_SHIFT(u_v[2]);
  1174. DCT_CONST_ROUND_SHIFT(u_v[3]);
  1175. out[0] = vec_packs(u_v[0], u_v[1]);
  1176. out[1] = vec_packs(u_v[2], u_v[3]);
  1177. }
  1178. #define MSUM_ROUND_SHIFT(a, b, cospi) \
  1179. b = vec_msums(a, cospi, zerov); \
  1180. DCT_CONST_ROUND_SHIFT(b);
  1181. #define IADST_WRAPLOW(in0, in1, tmp0, tmp1, out, cospi) \
  1182. MSUM_ROUND_SHIFT(in0, tmp0, cospi); \
  1183. MSUM_ROUND_SHIFT(in1, tmp1, cospi); \
  1184. out = vec_packs(tmp0, tmp1);
  1185. void vp9_iadst8_vsx(int16x8_t *in, int16x8_t *out) {
  1186. int32x4_t tmp0[16], tmp1[16];
  1187. int32x4_t zerov = vec_splat_s32(0);
  1188. int16x8_t zero16v = vec_splat_s16(0);
  1189. int16x8_t cospi_p02_p30_v = vec_mergel(cospi2_v, cospi30_v);
  1190. int16x8_t cospi_p30_m02_v = vec_mergel(cospi30_v, cospi2m_v);
  1191. int16x8_t cospi_p10_p22_v = vec_mergel(cospi10_v, cospi22_v);
  1192. int16x8_t cospi_p22_m10_v = vec_mergel(cospi22_v, cospi10m_v);
  1193. int16x8_t cospi_p18_p14_v = vec_mergel(cospi18_v, cospi14_v);
  1194. int16x8_t cospi_p14_m18_v = vec_mergel(cospi14_v, cospi18m_v);
  1195. int16x8_t cospi_p26_p06_v = vec_mergel(cospi26_v, cospi6_v);
  1196. int16x8_t cospi_p06_m26_v = vec_mergel(cospi6_v, cospi26m_v);
  1197. int16x8_t cospi_p08_p24_v = vec_mergel(cospi8_v, cospi24_v);
  1198. int16x8_t cospi_p24_m08_v = vec_mergel(cospi24_v, cospi8m_v);
  1199. int16x8_t cospi_m24_p08_v = vec_mergel(cospi24m_v, cospi8_v);
  1200. int16x8_t cospi_p16_m16_v = vec_mergel(cospi16_v, cospi16m_v);
  1201. ROUND_SHIFT_INIT;
  1202. TRANSPOSE8x8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], out[0],
  1203. out[1], out[2], out[3], out[4], out[5], out[6], out[7]);
  1204. // stage 1
  1205. // interleave and multiply/add into 32-bit integer
  1206. in[0] = vec_mergeh(out[7], out[0]);
  1207. in[1] = vec_mergel(out[7], out[0]);
  1208. in[2] = vec_mergeh(out[5], out[2]);
  1209. in[3] = vec_mergel(out[5], out[2]);
  1210. in[4] = vec_mergeh(out[3], out[4]);
  1211. in[5] = vec_mergel(out[3], out[4]);
  1212. in[6] = vec_mergeh(out[1], out[6]);
  1213. in[7] = vec_mergel(out[1], out[6]);
  1214. tmp1[0] = vec_msum(in[0], cospi_p02_p30_v, zerov);
  1215. tmp1[1] = vec_msum(in[1], cospi_p02_p30_v, zerov);
  1216. tmp1[2] = vec_msum(in[0], cospi_p30_m02_v, zerov);
  1217. tmp1[3] = vec_msum(in[1], cospi_p30_m02_v, zerov);
  1218. tmp1[4] = vec_msum(in[2], cospi_p10_p22_v, zerov);
  1219. tmp1[5] = vec_msum(in[3], cospi_p10_p22_v, zerov);
  1220. tmp1[6] = vec_msum(in[2], cospi_p22_m10_v, zerov);
  1221. tmp1[7] = vec_msum(in[3], cospi_p22_m10_v, zerov);
  1222. tmp1[8] = vec_msum(in[4], cospi_p18_p14_v, zerov);
  1223. tmp1[9] = vec_msum(in[5], cospi_p18_p14_v, zerov);
  1224. tmp1[10] = vec_msum(in[4], cospi_p14_m18_v, zerov);
  1225. tmp1[11] = vec_msum(in[5], cospi_p14_m18_v, zerov);
  1226. tmp1[12] = vec_msum(in[6], cospi_p26_p06_v, zerov);
  1227. tmp1[13] = vec_msum(in[7], cospi_p26_p06_v, zerov);
  1228. tmp1[14] = vec_msum(in[6], cospi_p06_m26_v, zerov);
  1229. tmp1[15] = vec_msum(in[7], cospi_p06_m26_v, zerov);
  1230. tmp0[0] = vec_add(tmp1[0], tmp1[8]);
  1231. tmp0[1] = vec_add(tmp1[1], tmp1[9]);
  1232. tmp0[2] = vec_add(tmp1[2], tmp1[10]);
  1233. tmp0[3] = vec_add(tmp1[3], tmp1[11]);
  1234. tmp0[4] = vec_add(tmp1[4], tmp1[12]);
  1235. tmp0[5] = vec_add(tmp1[5], tmp1[13]);
  1236. tmp0[6] = vec_add(tmp1[6], tmp1[14]);
  1237. tmp0[7] = vec_add(tmp1[7], tmp1[15]);
  1238. tmp0[8] = vec_sub(tmp1[0], tmp1[8]);
  1239. tmp0[9] = vec_sub(tmp1[1], tmp1[9]);
  1240. tmp0[10] = vec_sub(tmp1[2], tmp1[10]);
  1241. tmp0[11] = vec_sub(tmp1[3], tmp1[11]);
  1242. tmp0[12] = vec_sub(tmp1[4], tmp1[12]);
  1243. tmp0[13] = vec_sub(tmp1[5], tmp1[13]);
  1244. tmp0[14] = vec_sub(tmp1[6], tmp1[14]);
  1245. tmp0[15] = vec_sub(tmp1[7], tmp1[15]);
  1246. // shift and rounding
  1247. DCT_CONST_ROUND_SHIFT(tmp0[0]);
  1248. DCT_CONST_ROUND_SHIFT(tmp0[1]);
  1249. DCT_CONST_ROUND_SHIFT(tmp0[2]);
  1250. DCT_CONST_ROUND_SHIFT(tmp0[3]);
  1251. DCT_CONST_ROUND_SHIFT(tmp0[4]);
  1252. DCT_CONST_ROUND_SHIFT(tmp0[5]);
  1253. DCT_CONST_ROUND_SHIFT(tmp0[6]);
  1254. DCT_CONST_ROUND_SHIFT(tmp0[7]);
  1255. DCT_CONST_ROUND_SHIFT(tmp0[8]);
  1256. DCT_CONST_ROUND_SHIFT(tmp0[9]);
  1257. DCT_CONST_ROUND_SHIFT(tmp0[10]);
  1258. DCT_CONST_ROUND_SHIFT(tmp0[11]);
  1259. DCT_CONST_ROUND_SHIFT(tmp0[12]);
  1260. DCT_CONST_ROUND_SHIFT(tmp0[13]);
  1261. DCT_CONST_ROUND_SHIFT(tmp0[14]);
  1262. DCT_CONST_ROUND_SHIFT(tmp0[15]);
  1263. // back to 16-bit
  1264. out[0] = vec_packs(tmp0[0], tmp0[1]);
  1265. out[1] = vec_packs(tmp0[2], tmp0[3]);
  1266. out[2] = vec_packs(tmp0[4], tmp0[5]);
  1267. out[3] = vec_packs(tmp0[6], tmp0[7]);
  1268. out[4] = vec_packs(tmp0[8], tmp0[9]);
  1269. out[5] = vec_packs(tmp0[10], tmp0[11]);
  1270. out[6] = vec_packs(tmp0[12], tmp0[13]);
  1271. out[7] = vec_packs(tmp0[14], tmp0[15]);
  1272. // stage 2
  1273. in[0] = vec_add(out[0], out[2]);
  1274. in[1] = vec_add(out[1], out[3]);
  1275. in[2] = vec_sub(out[0], out[2]);
  1276. in[3] = vec_sub(out[1], out[3]);
  1277. in[4] = vec_mergeh(out[4], out[5]);
  1278. in[5] = vec_mergel(out[4], out[5]);
  1279. in[6] = vec_mergeh(out[6], out[7]);
  1280. in[7] = vec_mergel(out[6], out[7]);
  1281. tmp1[0] = vec_msum(in[4], cospi_p08_p24_v, zerov);
  1282. tmp1[1] = vec_msum(in[5], cospi_p08_p24_v, zerov);
  1283. tmp1[2] = vec_msum(in[4], cospi_p24_m08_v, zerov);
  1284. tmp1[3] = vec_msum(in[5], cospi_p24_m08_v, zerov);
  1285. tmp1[4] = vec_msum(in[6], cospi_m24_p08_v, zerov);
  1286. tmp1[5] = vec_msum(in[7], cospi_m24_p08_v, zerov);
  1287. tmp1[6] = vec_msum(in[6], cospi_p08_p24_v, zerov);
  1288. tmp1[7] = vec_msum(in[7], cospi_p08_p24_v, zerov);
  1289. tmp0[0] = vec_add(tmp1[0], tmp1[4]);
  1290. tmp0[1] = vec_add(tmp1[1], tmp1[5]);
  1291. tmp0[2] = vec_add(tmp1[2], tmp1[6]);
  1292. tmp0[3] = vec_add(tmp1[3], tmp1[7]);
  1293. tmp0[4] = vec_sub(tmp1[0], tmp1[4]);
  1294. tmp0[5] = vec_sub(tmp1[1], tmp1[5]);
  1295. tmp0[6] = vec_sub(tmp1[2], tmp1[6]);
  1296. tmp0[7] = vec_sub(tmp1[3], tmp1[7]);
  1297. DCT_CONST_ROUND_SHIFT(tmp0[0]);
  1298. DCT_CONST_ROUND_SHIFT(tmp0[1]);
  1299. DCT_CONST_ROUND_SHIFT(tmp0[2]);
  1300. DCT_CONST_ROUND_SHIFT(tmp0[3]);
  1301. DCT_CONST_ROUND_SHIFT(tmp0[4]);
  1302. DCT_CONST_ROUND_SHIFT(tmp0[5]);
  1303. DCT_CONST_ROUND_SHIFT(tmp0[6]);
  1304. DCT_CONST_ROUND_SHIFT(tmp0[7]);
  1305. in[4] = vec_packs(tmp0[0], tmp0[1]);
  1306. in[5] = vec_packs(tmp0[2], tmp0[3]);
  1307. in[6] = vec_packs(tmp0[4], tmp0[5]);
  1308. in[7] = vec_packs(tmp0[6], tmp0[7]);
  1309. // stage 3
  1310. out[0] = vec_mergeh(in[2], in[3]);
  1311. out[1] = vec_mergel(in[2], in[3]);
  1312. out[2] = vec_mergeh(in[6], in[7]);
  1313. out[3] = vec_mergel(in[6], in[7]);
  1314. IADST_WRAPLOW(out[0], out[1], tmp0[0], tmp0[1], in[2], cospi16_v);
  1315. IADST_WRAPLOW(out[0], out[1], tmp0[0], tmp0[1], in[3], cospi_p16_m16_v);
  1316. IADST_WRAPLOW(out[2], out[3], tmp0[0], tmp0[1], in[6], cospi16_v);
  1317. IADST_WRAPLOW(out[2], out[3], tmp0[0], tmp0[1], in[7], cospi_p16_m16_v);
  1318. out[0] = in[0];
  1319. out[2] = in[6];
  1320. out[4] = in[3];
  1321. out[6] = in[5];
  1322. out[1] = vec_sub(zero16v, in[4]);
  1323. out[3] = vec_sub(zero16v, in[2]);
  1324. out[5] = vec_sub(zero16v, in[7]);
  1325. out[7] = vec_sub(zero16v, in[1]);
  1326. }
  1327. static void iadst16x8_vsx(int16x8_t *in, int16x8_t *out) {
  1328. int32x4_t tmp0[32], tmp1[32];
  1329. int16x8_t tmp16_0[8];
  1330. int16x8_t cospi_p01_p31 = vec_mergel(cospi1_v, cospi31_v);
  1331. int16x8_t cospi_p31_m01 = vec_mergel(cospi31_v, cospi1m_v);
  1332. int16x8_t cospi_p05_p27 = vec_mergel(cospi5_v, cospi27_v);
  1333. int16x8_t cospi_p27_m05 = vec_mergel(cospi27_v, cospi5m_v);
  1334. int16x8_t cospi_p09_p23 = vec_mergel(cospi9_v, cospi23_v);
  1335. int16x8_t cospi_p23_m09 = vec_mergel(cospi23_v, cospi9m_v);
  1336. int16x8_t cospi_p13_p19 = vec_mergel(cospi13_v, cospi19_v);
  1337. int16x8_t cospi_p19_m13 = vec_mergel(cospi19_v, cospi13m_v);
  1338. int16x8_t cospi_p17_p15 = vec_mergel(cospi17_v, cospi15_v);
  1339. int16x8_t cospi_p15_m17 = vec_mergel(cospi15_v, cospi17m_v);
  1340. int16x8_t cospi_p21_p11 = vec_mergel(cospi21_v, cospi11_v);
  1341. int16x8_t cospi_p11_m21 = vec_mergel(cospi11_v, cospi21m_v);
  1342. int16x8_t cospi_p25_p07 = vec_mergel(cospi25_v, cospi7_v);
  1343. int16x8_t cospi_p07_m25 = vec_mergel(cospi7_v, cospi25m_v);
  1344. int16x8_t cospi_p29_p03 = vec_mergel(cospi29_v, cospi3_v);
  1345. int16x8_t cospi_p03_m29 = vec_mergel(cospi3_v, cospi29m_v);
  1346. int16x8_t cospi_p04_p28 = vec_mergel(cospi4_v, cospi28_v);
  1347. int16x8_t cospi_p28_m04 = vec_mergel(cospi28_v, cospi4m_v);
  1348. int16x8_t cospi_p20_p12 = vec_mergel(cospi20_v, cospi12_v);
  1349. int16x8_t cospi_p12_m20 = vec_mergel(cospi12_v, cospi20m_v);
  1350. int16x8_t cospi_m28_p04 = vec_mergel(cospi28m_v, cospi4_v);
  1351. int16x8_t cospi_m12_p20 = vec_mergel(cospi12m_v, cospi20_v);
  1352. int16x8_t cospi_p08_p24 = vec_mergel(cospi8_v, cospi24_v);
  1353. int16x8_t cospi_p24_m08 = vec_mergel(cospi24_v, cospi8m_v);
  1354. int16x8_t cospi_m24_p08 = vec_mergel(cospi24m_v, cospi8_v);
  1355. int32x4_t zerov = vec_splat_s32(0);
  1356. ROUND_SHIFT_INIT;
  1357. tmp16_0[0] = vec_mergeh(in[15], in[0]);
  1358. tmp16_0[1] = vec_mergel(in[15], in[0]);
  1359. tmp16_0[2] = vec_mergeh(in[13], in[2]);
  1360. tmp16_0[3] = vec_mergel(in[13], in[2]);
  1361. tmp16_0[4] = vec_mergeh(in[11], in[4]);
  1362. tmp16_0[5] = vec_mergel(in[11], in[4]);
  1363. tmp16_0[6] = vec_mergeh(in[9], in[6]);
  1364. tmp16_0[7] = vec_mergel(in[9], in[6]);
  1365. tmp16_0[8] = vec_mergeh(in[7], in[8]);
  1366. tmp16_0[9] = vec_mergel(in[7], in[8]);
  1367. tmp16_0[10] = vec_mergeh(in[5], in[10]);
  1368. tmp16_0[11] = vec_mergel(in[5], in[10]);
  1369. tmp16_0[12] = vec_mergeh(in[3], in[12]);
  1370. tmp16_0[13] = vec_mergel(in[3], in[12]);
  1371. tmp16_0[14] = vec_mergeh(in[1], in[14]);
  1372. tmp16_0[15] = vec_mergel(in[1], in[14]);
  1373. tmp0[0] = vec_msum(tmp16_0[0], cospi_p01_p31, zerov);
  1374. tmp0[1] = vec_msum(tmp16_0[1], cospi_p01_p31, zerov);
  1375. tmp0[2] = vec_msum(tmp16_0[0], cospi_p31_m01, zerov);
  1376. tmp0[3] = vec_msum(tmp16_0[1], cospi_p31_m01, zerov);
  1377. tmp0[4] = vec_msum(tmp16_0[2], cospi_p05_p27, zerov);
  1378. tmp0[5] = vec_msum(tmp16_0[3], cospi_p05_p27, zerov);
  1379. tmp0[6] = vec_msum(tmp16_0[2], cospi_p27_m05, zerov);
  1380. tmp0[7] = vec_msum(tmp16_0[3], cospi_p27_m05, zerov);
  1381. tmp0[8] = vec_msum(tmp16_0[4], cospi_p09_p23, zerov);
  1382. tmp0[9] = vec_msum(tmp16_0[5], cospi_p09_p23, zerov);
  1383. tmp0[10] = vec_msum(tmp16_0[4], cospi_p23_m09, zerov);
  1384. tmp0[11] = vec_msum(tmp16_0[5], cospi_p23_m09, zerov);
  1385. tmp0[12] = vec_msum(tmp16_0[6], cospi_p13_p19, zerov);
  1386. tmp0[13] = vec_msum(tmp16_0[7], cospi_p13_p19, zerov);
  1387. tmp0[14] = vec_msum(tmp16_0[6], cospi_p19_m13, zerov);
  1388. tmp0[15] = vec_msum(tmp16_0[7], cospi_p19_m13, zerov);
  1389. tmp0[16] = vec_msum(tmp16_0[8], cospi_p17_p15, zerov);
  1390. tmp0[17] = vec_msum(tmp16_0[9], cospi_p17_p15, zerov);
  1391. tmp0[18] = vec_msum(tmp16_0[8], cospi_p15_m17, zerov);
  1392. tmp0[19] = vec_msum(tmp16_0[9], cospi_p15_m17, zerov);
  1393. tmp0[20] = vec_msum(tmp16_0[10], cospi_p21_p11, zerov);
  1394. tmp0[21] = vec_msum(tmp16_0[11], cospi_p21_p11, zerov);
  1395. tmp0[22] = vec_msum(tmp16_0[10], cospi_p11_m21, zerov);
  1396. tmp0[23] = vec_msum(tmp16_0[11], cospi_p11_m21, zerov);
  1397. tmp0[24] = vec_msum(tmp16_0[12], cospi_p25_p07, zerov);
  1398. tmp0[25] = vec_msum(tmp16_0[13], cospi_p25_p07, zerov);
  1399. tmp0[26] = vec_msum(tmp16_0[12], cospi_p07_m25, zerov);
  1400. tmp0[27] = vec_msum(tmp16_0[13], cospi_p07_m25, zerov);
  1401. tmp0[28] = vec_msum(tmp16_0[14], cospi_p29_p03, zerov);
  1402. tmp0[29] = vec_msum(tmp16_0[15], cospi_p29_p03, zerov);
  1403. tmp0[30] = vec_msum(tmp16_0[14], cospi_p03_m29, zerov);
  1404. tmp0[31] = vec_msum(tmp16_0[15], cospi_p03_m29, zerov);
  1405. tmp1[0] = vec_add(tmp0[0], tmp0[16]);
  1406. tmp1[1] = vec_add(tmp0[1], tmp0[17]);
  1407. tmp1[2] = vec_add(tmp0[2], tmp0[18]);
  1408. tmp1[3] = vec_add(tmp0[3], tmp0[19]);
  1409. tmp1[4] = vec_add(tmp0[4], tmp0[20]);
  1410. tmp1[5] = vec_add(tmp0[5], tmp0[21]);
  1411. tmp1[6] = vec_add(tmp0[6], tmp0[22]);
  1412. tmp1[7] = vec_add(tmp0[7], tmp0[23]);
  1413. tmp1[8] = vec_add(tmp0[8], tmp0[24]);
  1414. tmp1[9] = vec_add(tmp0[9], tmp0[25]);
  1415. tmp1[10] = vec_add(tmp0[10], tmp0[26]);
  1416. tmp1[11] = vec_add(tmp0[11], tmp0[27]);
  1417. tmp1[12] = vec_add(tmp0[12], tmp0[28]);
  1418. tmp1[13] = vec_add(tmp0[13], tmp0[29]);
  1419. tmp1[14] = vec_add(tmp0[14], tmp0[30]);
  1420. tmp1[15] = vec_add(tmp0[15], tmp0[31]);
  1421. tmp1[16] = vec_sub(tmp0[0], tmp0[16]);
  1422. tmp1[17] = vec_sub(tmp0[1], tmp0[17]);
  1423. tmp1[18] = vec_sub(tmp0[2], tmp0[18]);
  1424. tmp1[19] = vec_sub(tmp0[3], tmp0[19]);
  1425. tmp1[20] = vec_sub(tmp0[4], tmp0[20]);
  1426. tmp1[21] = vec_sub(tmp0[5], tmp0[21]);
  1427. tmp1[22] = vec_sub(tmp0[6], tmp0[22]);
  1428. tmp1[23] = vec_sub(tmp0[7], tmp0[23]);
  1429. tmp1[24] = vec_sub(tmp0[8], tmp0[24]);
  1430. tmp1[25] = vec_sub(tmp0[9], tmp0[25]);
  1431. tmp1[26] = vec_sub(tmp0[10], tmp0[26]);
  1432. tmp1[27] = vec_sub(tmp0[11], tmp0[27]);
  1433. tmp1[28] = vec_sub(tmp0[12], tmp0[28]);
  1434. tmp1[29] = vec_sub(tmp0[13], tmp0[29]);
  1435. tmp1[30] = vec_sub(tmp0[14], tmp0[30]);
  1436. tmp1[31] = vec_sub(tmp0[15], tmp0[31]);
  1437. DCT_CONST_ROUND_SHIFT(tmp1[0]);
  1438. DCT_CONST_ROUND_SHIFT(tmp1[1]);
  1439. DCT_CONST_ROUND_SHIFT(tmp1[2]);
  1440. DCT_CONST_ROUND_SHIFT(tmp1[3]);
  1441. DCT_CONST_ROUND_SHIFT(tmp1[4]);
  1442. DCT_CONST_ROUND_SHIFT(tmp1[5]);
  1443. DCT_CONST_ROUND_SHIFT(tmp1[6]);
  1444. DCT_CONST_ROUND_SHIFT(tmp1[7]);
  1445. DCT_CONST_ROUND_SHIFT(tmp1[8]);
  1446. DCT_CONST_ROUND_SHIFT(tmp1[9]);
  1447. DCT_CONST_ROUND_SHIFT(tmp1[10]);
  1448. DCT_CONST_ROUND_SHIFT(tmp1[11]);
  1449. DCT_CONST_ROUND_SHIFT(tmp1[12]);
  1450. DCT_CONST_ROUND_SHIFT(tmp1[13]);
  1451. DCT_CONST_ROUND_SHIFT(tmp1[14]);
  1452. DCT_CONST_ROUND_SHIFT(tmp1[15]);
  1453. DCT_CONST_ROUND_SHIFT(tmp1[16]);
  1454. DCT_CONST_ROUND_SHIFT(tmp1[17]);
  1455. DCT_CONST_ROUND_SHIFT(tmp1[18]);
  1456. DCT_CONST_ROUND_SHIFT(tmp1[19]);
  1457. DCT_CONST_ROUND_SHIFT(tmp1[20]);
  1458. DCT_CONST_ROUND_SHIFT(tmp1[21]);
  1459. DCT_CONST_ROUND_SHIFT(tmp1[22]);
  1460. DCT_CONST_ROUND_SHIFT(tmp1[23]);
  1461. DCT_CONST_ROUND_SHIFT(tmp1[24]);
  1462. DCT_CONST_ROUND_SHIFT(tmp1[25]);
  1463. DCT_CONST_ROUND_SHIFT(tmp1[26]);
  1464. DCT_CONST_ROUND_SHIFT(tmp1[27]);
  1465. DCT_CONST_ROUND_SHIFT(tmp1[28]);
  1466. DCT_CONST_ROUND_SHIFT(tmp1[29]);
  1467. DCT_CONST_ROUND_SHIFT(tmp1[30]);
  1468. DCT_CONST_ROUND_SHIFT(tmp1[31]);
  1469. in[0] = vec_packs(tmp1[0], tmp1[1]);
  1470. in[1] = vec_packs(tmp1[2], tmp1[3]);
  1471. in[2] = vec_packs(tmp1[4], tmp1[5]);
  1472. in[3] = vec_packs(tmp1[6], tmp1[7]);
  1473. in[4] = vec_packs(tmp1[8], tmp1[9]);
  1474. in[5] = vec_packs(tmp1[10], tmp1[11]);
  1475. in[6] = vec_packs(tmp1[12], tmp1[13]);
  1476. in[7] = vec_packs(tmp1[14], tmp1[15]);
  1477. in[8] = vec_packs(tmp1[16], tmp1[17]);
  1478. in[9] = vec_packs(tmp1[18], tmp1[19]);
  1479. in[10] = vec_packs(tmp1[20], tmp1[21]);
  1480. in[11] = vec_packs(tmp1[22], tmp1[23]);
  1481. in[12] = vec_packs(tmp1[24], tmp1[25]);
  1482. in[13] = vec_packs(tmp1[26], tmp1[27]);
  1483. in[14] = vec_packs(tmp1[28], tmp1[29]);
  1484. in[15] = vec_packs(tmp1[30], tmp1[31]);
  1485. // stage 2
  1486. tmp16_0[0] = vec_mergeh(in[8], in[9]);
  1487. tmp16_0[1] = vec_mergel(in[8], in[9]);
  1488. tmp16_0[2] = vec_mergeh(in[10], in[11]);
  1489. tmp16_0[3] = vec_mergel(in[10], in[11]);
  1490. tmp16_0[4] = vec_mergeh(in[12], in[13]);
  1491. tmp16_0[5] = vec_mergel(in[12], in[13]);
  1492. tmp16_0[6] = vec_mergeh(in[14], in[15]);
  1493. tmp16_0[7] = vec_mergel(in[14], in[15]);
  1494. tmp0[0] = vec_msum(tmp16_0[0], cospi_p04_p28, zerov);
  1495. tmp0[1] = vec_msum(tmp16_0[1], cospi_p04_p28, zerov);
  1496. tmp0[2] = vec_msum(tmp16_0[0], cospi_p28_m04, zerov);
  1497. tmp0[3] = vec_msum(tmp16_0[1], cospi_p28_m04, zerov);
  1498. tmp0[4] = vec_msum(tmp16_0[2], cospi_p20_p12, zerov);
  1499. tmp0[5] = vec_msum(tmp16_0[3], cospi_p20_p12, zerov);
  1500. tmp0[6] = vec_msum(tmp16_0[2], cospi_p12_m20, zerov);
  1501. tmp0[7] = vec_msum(tmp16_0[3], cospi_p12_m20, zerov);
  1502. tmp0[8] = vec_msum(tmp16_0[4], cospi_m28_p04, zerov);
  1503. tmp0[9] = vec_msum(tmp16_0[5], cospi_m28_p04, zerov);
  1504. tmp0[10] = vec_msum(tmp16_0[4], cospi_p04_p28, zerov);
  1505. tmp0[11] = vec_msum(tmp16_0[5], cospi_p04_p28, zerov);
  1506. tmp0[12] = vec_msum(tmp16_0[6], cospi_m12_p20, zerov);
  1507. tmp0[13] = vec_msum(tmp16_0[7], cospi_m12_p20, zerov);
  1508. tmp0[14] = vec_msum(tmp16_0[6], cospi_p20_p12, zerov);
  1509. tmp0[15] = vec_msum(tmp16_0[7], cospi_p20_p12, zerov);
  1510. tmp1[0] = vec_add(tmp0[0], tmp0[8]);
  1511. tmp1[1] = vec_add(tmp0[1], tmp0[9]);
  1512. tmp1[2] = vec_add(tmp0[2], tmp0[10]);
  1513. tmp1[3] = vec_add(tmp0[3], tmp0[11]);
  1514. tmp1[4] = vec_add(tmp0[4], tmp0[12]);
  1515. tmp1[5] = vec_add(tmp0[5], tmp0[13]);
  1516. tmp1[6] = vec_add(tmp0[6], tmp0[14]);
  1517. tmp1[7] = vec_add(tmp0[7], tmp0[15]);
  1518. tmp1[8] = vec_sub(tmp0[0], tmp0[8]);
  1519. tmp1[9] = vec_sub(tmp0[1], tmp0[9]);
  1520. tmp1[10] = vec_sub(tmp0[2], tmp0[10]);
  1521. tmp1[11] = vec_sub(tmp0[3], tmp0[11]);
  1522. tmp1[12] = vec_sub(tmp0[4], tmp0[12]);
  1523. tmp1[13] = vec_sub(tmp0[5], tmp0[13]);
  1524. tmp1[14] = vec_sub(tmp0[6], tmp0[14]);
  1525. tmp1[15] = vec_sub(tmp0[7], tmp0[15]);
  1526. DCT_CONST_ROUND_SHIFT(tmp1[0]);
  1527. DCT_CONST_ROUND_SHIFT(tmp1[1]);
  1528. DCT_CONST_ROUND_SHIFT(tmp1[2]);
  1529. DCT_CONST_ROUND_SHIFT(tmp1[3]);
  1530. DCT_CONST_ROUND_SHIFT(tmp1[4]);
  1531. DCT_CONST_ROUND_SHIFT(tmp1[5]);
  1532. DCT_CONST_ROUND_SHIFT(tmp1[6]);
  1533. DCT_CONST_ROUND_SHIFT(tmp1[7]);
  1534. DCT_CONST_ROUND_SHIFT(tmp1[8]);
  1535. DCT_CONST_ROUND_SHIFT(tmp1[9]);
  1536. DCT_CONST_ROUND_SHIFT(tmp1[10]);
  1537. DCT_CONST_ROUND_SHIFT(tmp1[11]);
  1538. DCT_CONST_ROUND_SHIFT(tmp1[12]);
  1539. DCT_CONST_ROUND_SHIFT(tmp1[13]);
  1540. DCT_CONST_ROUND_SHIFT(tmp1[14]);
  1541. DCT_CONST_ROUND_SHIFT(tmp1[15]);
  1542. tmp16_0[0] = vec_add(in[0], in[4]);
  1543. tmp16_0[1] = vec_add(in[1], in[5]);
  1544. tmp16_0[2] = vec_add(in[2], in[6]);
  1545. tmp16_0[3] = vec_add(in[3], in[7]);
  1546. tmp16_0[4] = vec_sub(in[0], in[4]);
  1547. tmp16_0[5] = vec_sub(in[1], in[5]);
  1548. tmp16_0[6] = vec_sub(in[2], in[6]);
  1549. tmp16_0[7] = vec_sub(in[3], in[7]);
  1550. tmp16_0[8] = vec_packs(tmp1[0], tmp1[1]);
  1551. tmp16_0[9] = vec_packs(tmp1[2], tmp1[3]);
  1552. tmp16_0[10] = vec_packs(tmp1[4], tmp1[5]);
  1553. tmp16_0[11] = vec_packs(tmp1[6], tmp1[7]);
  1554. tmp16_0[12] = vec_packs(tmp1[8], tmp1[9]);
  1555. tmp16_0[13] = vec_packs(tmp1[10], tmp1[11]);
  1556. tmp16_0[14] = vec_packs(tmp1[12], tmp1[13]);
  1557. tmp16_0[15] = vec_packs(tmp1[14], tmp1[15]);
  1558. // stage 3
  1559. in[0] = vec_mergeh(tmp16_0[4], tmp16_0[5]);
  1560. in[1] = vec_mergel(tmp16_0[4], tmp16_0[5]);
  1561. in[2] = vec_mergeh(tmp16_0[6], tmp16_0[7]);
  1562. in[3] = vec_mergel(tmp16_0[6], tmp16_0[7]);
  1563. in[4] = vec_mergeh(tmp16_0[12], tmp16_0[13]);
  1564. in[5] = vec_mergel(tmp16_0[12], tmp16_0[13]);
  1565. in[6] = vec_mergeh(tmp16_0[14], tmp16_0[15]);
  1566. in[7] = vec_mergel(tmp16_0[14], tmp16_0[15]);
  1567. tmp0[0] = vec_msum(in[0], cospi_p08_p24, zerov);
  1568. tmp0[1] = vec_msum(in[1], cospi_p08_p24, zerov);
  1569. tmp0[2] = vec_msum(in[0], cospi_p24_m08, zerov);
  1570. tmp0[3] = vec_msum(in[1], cospi_p24_m08, zerov);
  1571. tmp0[4] = vec_msum(in[2], cospi_m24_p08, zerov);
  1572. tmp0[5] = vec_msum(in[3], cospi_m24_p08, zerov);
  1573. tmp0[6] = vec_msum(in[2], cospi_p08_p24, zerov);
  1574. tmp0[7] = vec_msum(in[3], cospi_p08_p24, zerov);
  1575. tmp0[8] = vec_msum(in[4], cospi_p08_p24, zerov);
  1576. tmp0[9] = vec_msum(in[5], cospi_p08_p24, zerov);
  1577. tmp0[10] = vec_msum(in[4], cospi_p24_m08, zerov);
  1578. tmp0[11] = vec_msum(in[5], cospi_p24_m08, zerov);
  1579. tmp0[12] = vec_msum(in[6], cospi_m24_p08, zerov);
  1580. tmp0[13] = vec_msum(in[7], cospi_m24_p08, zerov);
  1581. tmp0[14] = vec_msum(in[6], cospi_p08_p24, zerov);
  1582. tmp0[15] = vec_msum(in[7], cospi_p08_p24, zerov);
  1583. tmp1[0] = vec_add(tmp0[0], tmp0[4]);
  1584. tmp1[1] = vec_add(tmp0[1], tmp0[5]);
  1585. tmp1[2] = vec_add(tmp0[2], tmp0[6]);
  1586. tmp1[3] = vec_add(tmp0[3], tmp0[7]);
  1587. tmp1[4] = vec_sub(tmp0[0], tmp0[4]);
  1588. tmp1[5] = vec_sub(tmp0[1], tmp0[5]);
  1589. tmp1[6] = vec_sub(tmp0[2], tmp0[6]);
  1590. tmp1[7] = vec_sub(tmp0[3], tmp0[7]);
  1591. tmp1[8] = vec_add(tmp0[8], tmp0[12]);
  1592. tmp1[9] = vec_add(tmp0[9], tmp0[13]);
  1593. tmp1[10] = vec_add(tmp0[10], tmp0[14]);
  1594. tmp1[11] = vec_add(tmp0[11], tmp0[15]);
  1595. tmp1[12] = vec_sub(tmp0[8], tmp0[12]);
  1596. tmp1[13] = vec_sub(tmp0[9], tmp0[13]);
  1597. tmp1[14] = vec_sub(tmp0[10], tmp0[14]);
  1598. tmp1[15] = vec_sub(tmp0[11], tmp0[15]);
  1599. DCT_CONST_ROUND_SHIFT(tmp1[0]);
  1600. DCT_CONST_ROUND_SHIFT(tmp1[1]);
  1601. DCT_CONST_ROUND_SHIFT(tmp1[2]);
  1602. DCT_CONST_ROUND_SHIFT(tmp1[3]);
  1603. DCT_CONST_ROUND_SHIFT(tmp1[4]);
  1604. DCT_CONST_ROUND_SHIFT(tmp1[5]);
  1605. DCT_CONST_ROUND_SHIFT(tmp1[6]);
  1606. DCT_CONST_ROUND_SHIFT(tmp1[7]);
  1607. DCT_CONST_ROUND_SHIFT(tmp1[8]);
  1608. DCT_CONST_ROUND_SHIFT(tmp1[9]);
  1609. DCT_CONST_ROUND_SHIFT(tmp1[10]);
  1610. DCT_CONST_ROUND_SHIFT(tmp1[11]);
  1611. DCT_CONST_ROUND_SHIFT(tmp1[12]);
  1612. DCT_CONST_ROUND_SHIFT(tmp1[13]);
  1613. DCT_CONST_ROUND_SHIFT(tmp1[14]);
  1614. DCT_CONST_ROUND_SHIFT(tmp1[15]);
  1615. in[0] = vec_add(tmp16_0[0], tmp16_0[2]);
  1616. in[1] = vec_add(tmp16_0[1], tmp16_0[3]);
  1617. in[2] = vec_sub(tmp16_0[0], tmp16_0[2]);
  1618. in[3] = vec_sub(tmp16_0[1], tmp16_0[3]);
  1619. in[4] = vec_packs(tmp1[0], tmp1[1]);
  1620. in[5] = vec_packs(tmp1[2], tmp1[3]);
  1621. in[6] = vec_packs(tmp1[4], tmp1[5]);
  1622. in[7] = vec_packs(tmp1[6], tmp1[7]);
  1623. in[8] = vec_add(tmp16_0[8], tmp16_0[10]);
  1624. in[9] = vec_add(tmp16_0[9], tmp16_0[11]);
  1625. in[10] = vec_sub(tmp16_0[8], tmp16_0[10]);
  1626. in[11] = vec_sub(tmp16_0[9], tmp16_0[11]);
  1627. in[12] = vec_packs(tmp1[8], tmp1[9]);
  1628. in[13] = vec_packs(tmp1[10], tmp1[11]);
  1629. in[14] = vec_packs(tmp1[12], tmp1[13]);
  1630. in[15] = vec_packs(tmp1[14], tmp1[15]);
  1631. // stage 4
  1632. out[0] = vec_mergeh(in[2], in[3]);
  1633. out[1] = vec_mergel(in[2], in[3]);
  1634. out[2] = vec_mergeh(in[6], in[7]);
  1635. out[3] = vec_mergel(in[6], in[7]);
  1636. out[4] = vec_mergeh(in[10], in[11]);
  1637. out[5] = vec_mergel(in[10], in[11]);
  1638. out[6] = vec_mergeh(in[14], in[15]);
  1639. out[7] = vec_mergel(in[14], in[15]);
  1640. }
  1641. void vpx_iadst16_vsx(int16x8_t *src0, int16x8_t *src1) {
  1642. int16x8_t tmp0[16], tmp1[16], tmp2[8];
  1643. int32x4_t tmp3, tmp4;
  1644. int16x8_t zero16v = vec_splat_s16(0);
  1645. int32x4_t zerov = vec_splat_s32(0);
  1646. int16x8_t cospi_p16_m16 = vec_mergel(cospi16_v, cospi16m_v);
  1647. int16x8_t cospi_m16_p16 = vec_mergel(cospi16m_v, cospi16_v);
  1648. ROUND_SHIFT_INIT;
  1649. TRANSPOSE8x8(src0[0], src0[2], src0[4], src0[6], src0[8], src0[10], src0[12],
  1650. src0[14], tmp0[0], tmp0[1], tmp0[2], tmp0[3], tmp0[4], tmp0[5],
  1651. tmp0[6], tmp0[7]);
  1652. TRANSPOSE8x8(src1[0], src1[2], src1[4], src1[6], src1[8], src1[10], src1[12],
  1653. src1[14], tmp1[0], tmp1[1], tmp1[2], tmp1[3], tmp1[4], tmp1[5],
  1654. tmp1[6], tmp1[7]);
  1655. TRANSPOSE8x8(src0[1], src0[3], src0[5], src0[7], src0[9], src0[11], src0[13],
  1656. src0[15], tmp0[8], tmp0[9], tmp0[10], tmp0[11], tmp0[12],
  1657. tmp0[13], tmp0[14], tmp0[15]);
  1658. TRANSPOSE8x8(src1[1], src1[3], src1[5], src1[7], src1[9], src1[11], src1[13],
  1659. src1[15], tmp1[8], tmp1[9], tmp1[10], tmp1[11], tmp1[12],
  1660. tmp1[13], tmp1[14], tmp1[15]);
  1661. iadst16x8_vsx(tmp0, tmp2);
  1662. IADST_WRAPLOW(tmp2[0], tmp2[1], tmp3, tmp4, src0[14], cospi16m_v);
  1663. IADST_WRAPLOW(tmp2[0], tmp2[1], tmp3, tmp4, src1[0], cospi_p16_m16);
  1664. IADST_WRAPLOW(tmp2[2], tmp2[3], tmp3, tmp4, src0[8], cospi16_v);
  1665. IADST_WRAPLOW(tmp2[2], tmp2[3], tmp3, tmp4, src1[6], cospi_m16_p16);
  1666. IADST_WRAPLOW(tmp2[4], tmp2[5], tmp3, tmp4, src0[12], cospi16_v);
  1667. IADST_WRAPLOW(tmp2[4], tmp2[5], tmp3, tmp4, src1[2], cospi_m16_p16);
  1668. IADST_WRAPLOW(tmp2[6], tmp2[7], tmp3, tmp4, src0[10], cospi16m_v);
  1669. IADST_WRAPLOW(tmp2[6], tmp2[7], tmp3, tmp4, src1[4], cospi_p16_m16);
  1670. src0[0] = tmp0[0];
  1671. src0[2] = vec_sub(zero16v, tmp0[8]);
  1672. src0[4] = tmp0[12];
  1673. src0[6] = vec_sub(zero16v, tmp0[4]);
  1674. src1[8] = tmp0[5];
  1675. src1[10] = vec_sub(zero16v, tmp0[13]);
  1676. src1[12] = tmp0[9];
  1677. src1[14] = vec_sub(zero16v, tmp0[1]);
  1678. iadst16x8_vsx(tmp1, tmp2);
  1679. IADST_WRAPLOW(tmp2[0], tmp2[1], tmp3, tmp4, src0[15], cospi16m_v);
  1680. IADST_WRAPLOW(tmp2[0], tmp2[1], tmp3, tmp4, src1[1], cospi_p16_m16);
  1681. IADST_WRAPLOW(tmp2[2], tmp2[3], tmp3, tmp4, src0[9], cospi16_v);
  1682. IADST_WRAPLOW(tmp2[2], tmp2[3], tmp3, tmp4, src1[7], cospi_m16_p16);
  1683. IADST_WRAPLOW(tmp2[4], tmp2[5], tmp3, tmp4, src0[13], cospi16_v);
  1684. IADST_WRAPLOW(tmp2[4], tmp2[5], tmp3, tmp4, src1[3], cospi_m16_p16);
  1685. IADST_WRAPLOW(tmp2[6], tmp2[7], tmp3, tmp4, src0[11], cospi16m_v);
  1686. IADST_WRAPLOW(tmp2[6], tmp2[7], tmp3, tmp4, src1[5], cospi_p16_m16);
  1687. src0[1] = tmp1[0];
  1688. src0[3] = vec_sub(zero16v, tmp1[8]);
  1689. src0[5] = tmp1[12];
  1690. src0[7] = vec_sub(zero16v, tmp1[4]);
  1691. src1[9] = tmp1[5];
  1692. src1[11] = vec_sub(zero16v, tmp1[13]);
  1693. src1[13] = tmp1[9];
  1694. src1[15] = vec_sub(zero16v, tmp1[1]);
  1695. }