fwd_dct32x32_msa.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948
  1. /*
  2. * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "./vpx_dsp_rtcd.h"
  11. #include "vpx_dsp/mips/fwd_txfm_msa.h"
  12. static void fdct8x32_1d_column_load_butterfly(const int16_t *input,
  13. int32_t src_stride,
  14. int16_t *temp_buff) {
  15. v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
  16. v8i16 step0, step1, step2, step3;
  17. v8i16 in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1;
  18. v8i16 step0_1, step1_1, step2_1, step3_1;
  19. /* 1st and 2nd set */
  20. LD_SH4(input, src_stride, in0, in1, in2, in3);
  21. LD_SH4(input + (28 * src_stride), src_stride, in4, in5, in6, in7);
  22. LD_SH4(input + (4 * src_stride), src_stride, in0_1, in1_1, in2_1, in3_1);
  23. LD_SH4(input + (24 * src_stride), src_stride, in4_1, in5_1, in6_1, in7_1);
  24. SLLI_4V(in0, in1, in2, in3, 2);
  25. SLLI_4V(in4, in5, in6, in7, 2);
  26. SLLI_4V(in0_1, in1_1, in2_1, in3_1, 2);
  27. SLLI_4V(in4_1, in5_1, in6_1, in7_1, 2);
  28. BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2,
  29. step3, in4, in5, in6, in7);
  30. BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, step0_1,
  31. step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1);
  32. ST_SH4(step0, step1, step2, step3, temp_buff, 8);
  33. ST_SH4(in4, in5, in6, in7, temp_buff + (28 * 8), 8);
  34. ST_SH4(step0_1, step1_1, step2_1, step3_1, temp_buff + (4 * 8), 8);
  35. ST_SH4(in4_1, in5_1, in6_1, in7_1, temp_buff + (24 * 8), 8);
  36. /* 3rd and 4th set */
  37. LD_SH4(input + (8 * src_stride), src_stride, in0, in1, in2, in3);
  38. LD_SH4(input + (20 * src_stride), src_stride, in4, in5, in6, in7);
  39. LD_SH4(input + (12 * src_stride), src_stride, in0_1, in1_1, in2_1, in3_1);
  40. LD_SH4(input + (16 * src_stride), src_stride, in4_1, in5_1, in6_1, in7_1);
  41. SLLI_4V(in0, in1, in2, in3, 2);
  42. SLLI_4V(in4, in5, in6, in7, 2);
  43. SLLI_4V(in0_1, in1_1, in2_1, in3_1, 2);
  44. SLLI_4V(in4_1, in5_1, in6_1, in7_1, 2);
  45. BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2,
  46. step3, in4, in5, in6, in7);
  47. BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, step0_1,
  48. step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1);
  49. ST_SH4(step0, step1, step2, step3, temp_buff + (8 * 8), 8);
  50. ST_SH4(in4, in5, in6, in7, temp_buff + (20 * 8), 8);
  51. ST_SH4(step0_1, step1_1, step2_1, step3_1, temp_buff + (12 * 8), 8);
  52. ST_SH4(in4_1, in5_1, in6_1, in7_1, temp_buff + (15 * 8) + 8, 8);
  53. }
  54. static void fdct8x32_1d_column_even_store(int16_t *input, int16_t *temp) {
  55. v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
  56. v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
  57. v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
  58. v8i16 temp0, temp1;
  59. /* fdct even */
  60. LD_SH4(input, 8, in0, in1, in2, in3);
  61. LD_SH4(input + 96, 8, in12, in13, in14, in15);
  62. BUTTERFLY_8(in0, in1, in2, in3, in12, in13, in14, in15, vec0, vec1, vec2,
  63. vec3, in12, in13, in14, in15);
  64. LD_SH4(input + 32, 8, in4, in5, in6, in7);
  65. LD_SH4(input + 64, 8, in8, in9, in10, in11);
  66. BUTTERFLY_8(in4, in5, in6, in7, in8, in9, in10, in11, vec4, vec5, vec6, vec7,
  67. in8, in9, in10, in11);
  68. /* Stage 3 */
  69. ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
  70. BUTTERFLY_4(in0, in1, in2, in3, temp0, in4, in1, in0);
  71. DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
  72. FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
  73. ST_SH(temp0, temp);
  74. ST_SH(temp1, temp + 512);
  75. DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
  76. FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
  77. ST_SH(temp0, temp + 256);
  78. ST_SH(temp1, temp + 768);
  79. SUB4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, vec7, vec6, vec5, vec4);
  80. DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
  81. ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
  82. DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
  83. FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
  84. ST_SH(temp0, temp + 128);
  85. ST_SH(temp1, temp + 896);
  86. SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
  87. DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
  88. FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
  89. ST_SH(temp0, temp + 640);
  90. ST_SH(temp1, temp + 384);
  91. DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
  92. DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
  93. ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
  94. DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
  95. ADD2(in0, in1, in2, in3, vec0, vec7);
  96. DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
  97. FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
  98. ST_SH(temp0, temp + 64);
  99. ST_SH(temp1, temp + 960);
  100. SUB2(in0, in1, in2, in3, in0, in2);
  101. DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
  102. FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
  103. ST_SH(temp0, temp + 576);
  104. ST_SH(temp1, temp + 448);
  105. SUB2(in9, vec2, in14, vec5, vec2, vec5);
  106. DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
  107. SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
  108. DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
  109. FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
  110. ST_SH(temp0, temp + 320);
  111. ST_SH(temp1, temp + 704);
  112. ADD2(in3, in2, in0, in1, vec3, vec4);
  113. DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
  114. FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
  115. ST_SH(temp0, temp + 192);
  116. ST_SH(temp1, temp + 832);
  117. }
  118. static void fdct8x32_1d_column_odd_store(int16_t *input, int16_t *temp_ptr) {
  119. v8i16 in16, in17, in18, in19, in20, in21, in22, in23;
  120. v8i16 in24, in25, in26, in27, in28, in29, in30, in31, vec4, vec5;
  121. in20 = LD_SH(input + 32);
  122. in21 = LD_SH(input + 40);
  123. in26 = LD_SH(input + 80);
  124. in27 = LD_SH(input + 88);
  125. DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
  126. DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
  127. in18 = LD_SH(input + 16);
  128. in19 = LD_SH(input + 24);
  129. in28 = LD_SH(input + 96);
  130. in29 = LD_SH(input + 104);
  131. vec4 = in19 - in20;
  132. ST_SH(vec4, input + 32);
  133. vec4 = in18 - in21;
  134. ST_SH(vec4, input + 40);
  135. vec4 = in29 - in26;
  136. ST_SH(vec4, input + 80);
  137. vec4 = in28 - in27;
  138. ST_SH(vec4, input + 88);
  139. in21 = in18 + in21;
  140. in20 = in19 + in20;
  141. in27 = in28 + in27;
  142. in26 = in29 + in26;
  143. LD_SH4(input + 48, 8, in22, in23, in24, in25);
  144. DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
  145. DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
  146. in16 = LD_SH(input);
  147. in17 = LD_SH(input + 8);
  148. in30 = LD_SH(input + 112);
  149. in31 = LD_SH(input + 120);
  150. vec4 = in17 - in22;
  151. ST_SH(vec4, input + 16);
  152. vec4 = in16 - in23;
  153. ST_SH(vec4, input + 24);
  154. vec4 = in31 - in24;
  155. ST_SH(vec4, input + 96);
  156. vec4 = in30 - in25;
  157. ST_SH(vec4, input + 104);
  158. ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31);
  159. DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
  160. DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
  161. ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25);
  162. DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
  163. ADD2(in27, in26, in25, in24, in23, in20);
  164. DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
  165. FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
  166. ST_SH(vec5, temp_ptr);
  167. ST_SH(vec4, temp_ptr + 960);
  168. SUB2(in27, in26, in25, in24, in22, in21);
  169. DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
  170. FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
  171. ST_SH(vec5, temp_ptr + 448);
  172. ST_SH(vec4, temp_ptr + 512);
  173. SUB4(in17, in18, in16, in19, in31, in28, in30, in29, in23, in26, in24, in20);
  174. DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
  175. SUB2(in26, in27, in24, in25, in23, in20);
  176. DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
  177. FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
  178. ST_SH(vec4, temp_ptr + 704);
  179. ST_SH(vec5, temp_ptr + 256);
  180. ADD2(in26, in27, in24, in25, in22, in21);
  181. DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
  182. FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
  183. ST_SH(vec4, temp_ptr + 192);
  184. ST_SH(vec5, temp_ptr + 768);
  185. LD_SH4(input + 16, 8, in22, in23, in20, in21);
  186. LD_SH4(input + 80, 8, in26, in27, in24, in25);
  187. in16 = in20;
  188. in17 = in21;
  189. DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
  190. DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
  191. SUB4(in23, in20, in22, in21, in25, in26, in24, in27, in28, in17, in18, in31);
  192. DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
  193. ADD2(in28, in29, in31, in30, in16, in19);
  194. DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
  195. FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
  196. ST_SH(vec5, temp_ptr + 832);
  197. ST_SH(vec4, temp_ptr + 128);
  198. SUB2(in28, in29, in31, in30, in17, in18);
  199. DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
  200. FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
  201. ST_SH(vec5, temp_ptr + 320);
  202. ST_SH(vec4, temp_ptr + 640);
  203. ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19);
  204. DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
  205. SUB2(in29, in28, in30, in31, in16, in19);
  206. DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
  207. FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
  208. ST_SH(vec5, temp_ptr + 576);
  209. ST_SH(vec4, temp_ptr + 384);
  210. ADD2(in29, in28, in30, in31, in17, in18);
  211. DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
  212. FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
  213. ST_SH(vec5, temp_ptr + 64);
  214. ST_SH(vec4, temp_ptr + 896);
  215. }
  216. static void fdct8x32_1d_column(const int16_t *input, int32_t src_stride,
  217. int16_t *tmp_buf, int16_t *tmp_buf_big) {
  218. fdct8x32_1d_column_load_butterfly(input, src_stride, tmp_buf);
  219. fdct8x32_1d_column_even_store(tmp_buf, tmp_buf_big);
  220. fdct8x32_1d_column_odd_store(tmp_buf + 128, (tmp_buf_big + 32));
  221. }
  222. static void fdct8x32_1d_row_load_butterfly(int16_t *temp_buff,
  223. int16_t *output) {
  224. v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
  225. v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
  226. v8i16 step0, step1, step2, step3, step4, step5, step6, step7;
  227. LD_SH8(temp_buff, 32, in0, in1, in2, in3, in4, in5, in6, in7);
  228. LD_SH8(temp_buff + 24, 32, in8, in9, in10, in11, in12, in13, in14, in15);
  229. TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
  230. in4, in5, in6, in7);
  231. TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
  232. in10, in11, in12, in13, in14, in15);
  233. BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
  234. in12, in13, in14, in15, step0, step1, step2, step3, step4, step5,
  235. step6, step7, in8, in9, in10, in11, in12, in13, in14, in15);
  236. ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7, output, 8);
  237. ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 24 * 8), 8);
  238. /* 2nd set */
  239. LD_SH8(temp_buff + 8, 32, in0, in1, in2, in3, in4, in5, in6, in7);
  240. LD_SH8(temp_buff + 16, 32, in8, in9, in10, in11, in12, in13, in14, in15);
  241. TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
  242. in4, in5, in6, in7);
  243. TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
  244. in10, in11, in12, in13, in14, in15);
  245. BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
  246. in12, in13, in14, in15, step0, step1, step2, step3, step4, step5,
  247. step6, step7, in8, in9, in10, in11, in12, in13, in14, in15);
  248. ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7,
  249. (output + 8 * 8), 8);
  250. ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 16 * 8), 8);
  251. }
  252. static void fdct8x32_1d_row_even_4x(int16_t *input, int16_t *interm_ptr,
  253. int16_t *out) {
  254. v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
  255. v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
  256. v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
  257. v4i32 vec0_l, vec1_l, vec2_l, vec3_l, vec4_l, vec5_l, vec6_l, vec7_l;
  258. v4i32 vec0_r, vec1_r, vec2_r, vec3_r, vec4_r, vec5_r, vec6_r, vec7_r;
  259. v4i32 tmp0_w, tmp1_w, tmp2_w, tmp3_w;
  260. /* fdct32 even */
  261. /* stage 2 */
  262. LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
  263. LD_SH8(input + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
  264. BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
  265. in12, in13, in14, in15, vec0, vec1, vec2, vec3, vec4, vec5, vec6,
  266. vec7, in8, in9, in10, in11, in12, in13, in14, in15);
  267. ST_SH8(vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, interm_ptr, 8);
  268. ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, interm_ptr + 64, 8);
  269. /* Stage 3 */
  270. UNPCK_SH_SW(vec0, vec0_l, vec0_r);
  271. UNPCK_SH_SW(vec1, vec1_l, vec1_r);
  272. UNPCK_SH_SW(vec2, vec2_l, vec2_r);
  273. UNPCK_SH_SW(vec3, vec3_l, vec3_r);
  274. UNPCK_SH_SW(vec4, vec4_l, vec4_r);
  275. UNPCK_SH_SW(vec5, vec5_l, vec5_r);
  276. UNPCK_SH_SW(vec6, vec6_l, vec6_r);
  277. UNPCK_SH_SW(vec7, vec7_l, vec7_r);
  278. ADD4(vec0_r, vec7_r, vec1_r, vec6_r, vec2_r, vec5_r, vec3_r, vec4_r, tmp0_w,
  279. tmp1_w, tmp2_w, tmp3_w);
  280. BUTTERFLY_4(tmp0_w, tmp1_w, tmp2_w, tmp3_w, vec4_r, vec6_r, vec7_r, vec5_r);
  281. ADD4(vec0_l, vec7_l, vec1_l, vec6_l, vec2_l, vec5_l, vec3_l, vec4_l, vec0_r,
  282. vec1_r, vec2_r, vec3_r);
  283. tmp3_w = vec0_r + vec3_r;
  284. vec0_r = vec0_r - vec3_r;
  285. vec3_r = vec1_r + vec2_r;
  286. vec1_r = vec1_r - vec2_r;
  287. DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64, cospi_16_64,
  288. vec4_r, tmp3_w, vec6_r, vec3_r);
  289. FDCT32_POSTPROC_NEG_W(vec4_r);
  290. FDCT32_POSTPROC_NEG_W(tmp3_w);
  291. FDCT32_POSTPROC_NEG_W(vec6_r);
  292. FDCT32_POSTPROC_NEG_W(vec3_r);
  293. PCKEV_H2_SH(vec4_r, tmp3_w, vec6_r, vec3_r, vec4, vec5);
  294. ST_SH2(vec5, vec4, out, 8);
  295. DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64, cospi_8_64,
  296. vec4_r, tmp3_w, vec6_r, vec3_r);
  297. FDCT32_POSTPROC_NEG_W(vec4_r);
  298. FDCT32_POSTPROC_NEG_W(tmp3_w);
  299. FDCT32_POSTPROC_NEG_W(vec6_r);
  300. FDCT32_POSTPROC_NEG_W(vec3_r);
  301. PCKEV_H2_SH(vec4_r, tmp3_w, vec6_r, vec3_r, vec4, vec5);
  302. ST_SH2(vec5, vec4, out + 16, 8);
  303. LD_SH8(interm_ptr, 8, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7);
  304. SUB4(vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4, vec5, vec6, vec7);
  305. DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
  306. ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
  307. DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, in5, in4);
  308. FDCT_POSTPROC_2V_NEG_H(in4, in5);
  309. ST_SH(in4, out + 32);
  310. ST_SH(in5, out + 56);
  311. SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
  312. DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, in5, in4);
  313. FDCT_POSTPROC_2V_NEG_H(in4, in5);
  314. ST_SH(in4, out + 40);
  315. ST_SH(in5, out + 48);
  316. LD_SH8(interm_ptr + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
  317. DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
  318. DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
  319. ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
  320. DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
  321. ADD2(in0, in1, in2, in3, vec0, vec7);
  322. DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, in5, in4);
  323. FDCT_POSTPROC_2V_NEG_H(in4, in5);
  324. ST_SH(in4, out + 64);
  325. ST_SH(in5, out + 120);
  326. SUB2(in0, in1, in2, in3, in0, in2);
  327. DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, in5, in4);
  328. FDCT_POSTPROC_2V_NEG_H(in4, in5);
  329. ST_SH(in4, out + 72);
  330. ST_SH(in5, out + 112);
  331. SUB2(in9, vec2, in14, vec5, vec2, vec5);
  332. DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
  333. SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
  334. DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, in5, in4);
  335. FDCT_POSTPROC_2V_NEG_H(in4, in5);
  336. ST_SH(in4, out + 80);
  337. ST_SH(in5, out + 104);
  338. ADD2(in3, in2, in0, in1, vec3, vec4);
  339. DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, in4, in5);
  340. FDCT_POSTPROC_2V_NEG_H(in4, in5);
  341. ST_SH(in4, out + 96);
  342. ST_SH(in5, out + 88);
  343. }
  344. static void fdct8x32_1d_row_even(int16_t *temp, int16_t *out) {
  345. v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
  346. v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
  347. v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, temp0, temp1;
  348. /* fdct32 even */
  349. /* stage 2 */
  350. LD_SH8(temp, 8, in0, in1, in2, in3, in4, in5, in6, in7);
  351. LD_SH8(temp + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
  352. BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
  353. in12, in13, in14, in15, vec0, vec1, vec2, vec3, vec4, vec5, vec6,
  354. vec7, in8, in9, in10, in11, in12, in13, in14, in15);
  355. /* Stage 3 */
  356. ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
  357. BUTTERFLY_4(in0, in1, in2, in3, temp0, in4, in1, in0);
  358. DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
  359. FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
  360. ST_SH(temp0, out);
  361. ST_SH(temp1, out + 8);
  362. DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
  363. FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
  364. ST_SH(temp0, out + 16);
  365. ST_SH(temp1, out + 24);
  366. SUB4(vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4, vec5, vec6, vec7);
  367. DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
  368. ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
  369. DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
  370. FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
  371. ST_SH(temp0, out + 32);
  372. ST_SH(temp1, out + 56);
  373. SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
  374. DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
  375. FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
  376. ST_SH(temp0, out + 40);
  377. ST_SH(temp1, out + 48);
  378. DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
  379. DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
  380. ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
  381. DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
  382. ADD2(in0, in1, in2, in3, vec0, vec7);
  383. DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
  384. FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
  385. ST_SH(temp0, out + 64);
  386. ST_SH(temp1, out + 120);
  387. SUB2(in0, in1, in2, in3, in0, in2);
  388. DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
  389. FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
  390. ST_SH(temp0, out + 72);
  391. ST_SH(temp1, out + 112);
  392. SUB2(in9, vec2, in14, vec5, vec2, vec5);
  393. DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
  394. SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5)
  395. DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
  396. FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
  397. ST_SH(temp0, out + 80);
  398. ST_SH(temp1, out + 104);
  399. ADD2(in3, in2, in0, in1, vec3, vec4);
  400. DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
  401. FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
  402. ST_SH(temp0, out + 96);
  403. ST_SH(temp1, out + 88);
  404. }
  405. static void fdct8x32_1d_row_odd(int16_t *temp, int16_t *interm_ptr,
  406. int16_t *out) {
  407. v8i16 in16, in17, in18, in19, in20, in21, in22, in23;
  408. v8i16 in24, in25, in26, in27, in28, in29, in30, in31, vec4, vec5;
  409. in20 = LD_SH(temp + 32);
  410. in21 = LD_SH(temp + 40);
  411. in26 = LD_SH(temp + 80);
  412. in27 = LD_SH(temp + 88);
  413. DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
  414. DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
  415. in18 = LD_SH(temp + 16);
  416. in19 = LD_SH(temp + 24);
  417. in28 = LD_SH(temp + 96);
  418. in29 = LD_SH(temp + 104);
  419. vec4 = in19 - in20;
  420. ST_SH(vec4, interm_ptr + 32);
  421. vec4 = in18 - in21;
  422. ST_SH(vec4, interm_ptr + 88);
  423. vec4 = in28 - in27;
  424. ST_SH(vec4, interm_ptr + 56);
  425. vec4 = in29 - in26;
  426. ST_SH(vec4, interm_ptr + 64);
  427. ADD4(in18, in21, in19, in20, in28, in27, in29, in26, in21, in20, in27, in26);
  428. in22 = LD_SH(temp + 48);
  429. in23 = LD_SH(temp + 56);
  430. in24 = LD_SH(temp + 64);
  431. in25 = LD_SH(temp + 72);
  432. DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
  433. DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
  434. in16 = LD_SH(temp);
  435. in17 = LD_SH(temp + 8);
  436. in30 = LD_SH(temp + 112);
  437. in31 = LD_SH(temp + 120);
  438. vec4 = in17 - in22;
  439. ST_SH(vec4, interm_ptr + 40);
  440. vec4 = in30 - in25;
  441. ST_SH(vec4, interm_ptr + 48);
  442. vec4 = in31 - in24;
  443. ST_SH(vec4, interm_ptr + 72);
  444. vec4 = in16 - in23;
  445. ST_SH(vec4, interm_ptr + 80);
  446. ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31);
  447. DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
  448. DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
  449. ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25);
  450. DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
  451. ADD2(in27, in26, in25, in24, in23, in20);
  452. DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
  453. FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
  454. ST_SH(vec5, out);
  455. ST_SH(vec4, out + 120);
  456. SUB2(in27, in26, in25, in24, in22, in21);
  457. DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
  458. FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
  459. ST_SH(vec5, out + 112);
  460. ST_SH(vec4, out + 8);
  461. SUB4(in17, in18, in16, in19, in31, in28, in30, in29, in23, in26, in24, in20);
  462. DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
  463. SUB2(in26, in27, in24, in25, in23, in20);
  464. DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
  465. FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
  466. ST_SH(vec4, out + 16);
  467. ST_SH(vec5, out + 104);
  468. ADD2(in26, in27, in24, in25, in22, in21);
  469. DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
  470. FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
  471. ST_SH(vec4, out + 24);
  472. ST_SH(vec5, out + 96);
  473. in20 = LD_SH(interm_ptr + 32);
  474. in21 = LD_SH(interm_ptr + 88);
  475. in27 = LD_SH(interm_ptr + 56);
  476. in26 = LD_SH(interm_ptr + 64);
  477. in16 = in20;
  478. in17 = in21;
  479. DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
  480. DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
  481. in22 = LD_SH(interm_ptr + 40);
  482. in25 = LD_SH(interm_ptr + 48);
  483. in24 = LD_SH(interm_ptr + 72);
  484. in23 = LD_SH(interm_ptr + 80);
  485. SUB4(in23, in20, in22, in21, in25, in26, in24, in27, in28, in17, in18, in31);
  486. DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
  487. ADD2(in28, in29, in31, in30, in16, in19);
  488. DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
  489. FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
  490. ST_SH(vec5, out + 32);
  491. ST_SH(vec4, out + 88);
  492. SUB2(in28, in29, in31, in30, in17, in18);
  493. DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
  494. FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
  495. ST_SH(vec5, out + 40);
  496. ST_SH(vec4, out + 80);
  497. ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19);
  498. DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
  499. SUB2(in29, in28, in30, in31, in16, in19);
  500. DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
  501. FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
  502. ST_SH(vec5, out + 72);
  503. ST_SH(vec4, out + 48);
  504. ADD2(in29, in28, in30, in31, in17, in18);
  505. DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
  506. FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
  507. ST_SH(vec4, out + 56);
  508. ST_SH(vec5, out + 64);
  509. }
  510. static void fdct8x32_1d_row_transpose_store(int16_t *temp, int16_t *output) {
  511. v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
  512. v8i16 in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1;
  513. /* 1st set */
  514. in0 = LD_SH(temp);
  515. in4 = LD_SH(temp + 32);
  516. in2 = LD_SH(temp + 64);
  517. in6 = LD_SH(temp + 96);
  518. in1 = LD_SH(temp + 128);
  519. in7 = LD_SH(temp + 152);
  520. in3 = LD_SH(temp + 192);
  521. in5 = LD_SH(temp + 216);
  522. TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
  523. in4, in5, in6, in7);
  524. /* 2nd set */
  525. in0_1 = LD_SH(temp + 16);
  526. in1_1 = LD_SH(temp + 232);
  527. in2_1 = LD_SH(temp + 80);
  528. in3_1 = LD_SH(temp + 168);
  529. in4_1 = LD_SH(temp + 48);
  530. in5_1 = LD_SH(temp + 176);
  531. in6_1 = LD_SH(temp + 112);
  532. in7_1 = LD_SH(temp + 240);
  533. ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 32);
  534. TRANSPOSE8x8_SH_SH(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
  535. in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1);
  536. /* 3rd set */
  537. in0 = LD_SH(temp + 8);
  538. in1 = LD_SH(temp + 136);
  539. in2 = LD_SH(temp + 72);
  540. in3 = LD_SH(temp + 200);
  541. in4 = LD_SH(temp + 40);
  542. in5 = LD_SH(temp + 208);
  543. in6 = LD_SH(temp + 104);
  544. in7 = LD_SH(temp + 144);
  545. ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, output + 8,
  546. 32);
  547. TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
  548. in4, in5, in6, in7);
  549. ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output + 16, 32);
  550. /* 4th set */
  551. in0_1 = LD_SH(temp + 24);
  552. in1_1 = LD_SH(temp + 224);
  553. in2_1 = LD_SH(temp + 88);
  554. in3_1 = LD_SH(temp + 160);
  555. in4_1 = LD_SH(temp + 56);
  556. in5_1 = LD_SH(temp + 184);
  557. in6_1 = LD_SH(temp + 120);
  558. in7_1 = LD_SH(temp + 248);
  559. TRANSPOSE8x8_SH_SH(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
  560. in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1);
  561. ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, output + 24,
  562. 32);
  563. }
  564. static void fdct32x8_1d_row(int16_t *temp, int16_t *temp_buf, int16_t *output) {
  565. fdct8x32_1d_row_load_butterfly(temp, temp_buf);
  566. fdct8x32_1d_row_even(temp_buf, temp_buf);
  567. fdct8x32_1d_row_odd(temp_buf + 128, temp, temp_buf + 128);
  568. fdct8x32_1d_row_transpose_store(temp_buf, output);
  569. }
  570. static void fdct32x8_1d_row_4x(int16_t *tmp_buf_big, int16_t *tmp_buf,
  571. int16_t *output) {
  572. fdct8x32_1d_row_load_butterfly(tmp_buf_big, tmp_buf);
  573. fdct8x32_1d_row_even_4x(tmp_buf, tmp_buf_big, tmp_buf);
  574. fdct8x32_1d_row_odd(tmp_buf + 128, tmp_buf_big, tmp_buf + 128);
  575. fdct8x32_1d_row_transpose_store(tmp_buf, output);
  576. }
  577. void vpx_fdct32x32_msa(const int16_t *input, int16_t *output,
  578. int32_t src_stride) {
  579. int32_t i;
  580. DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]);
  581. DECLARE_ALIGNED(32, int16_t, tmp_buf[256]);
  582. /* column transform */
  583. for (i = 0; i < 4; ++i) {
  584. fdct8x32_1d_column(input + (8 * i), src_stride, tmp_buf,
  585. tmp_buf_big + (8 * i));
  586. }
  587. /* row transform */
  588. fdct32x8_1d_row_4x(tmp_buf_big, tmp_buf, output);
  589. /* row transform */
  590. for (i = 1; i < 4; ++i) {
  591. fdct32x8_1d_row(tmp_buf_big + (i * 256), tmp_buf, output + (i * 256));
  592. }
  593. }
  594. static void fdct8x32_1d_row_even_rd(int16_t *temp, int16_t *out) {
  595. v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
  596. v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
  597. v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, temp0, temp1;
  598. /* fdct32 even */
  599. /* stage 2 */
  600. LD_SH8(temp, 8, in0, in1, in2, in3, in4, in5, in6, in7);
  601. LD_SH8(temp + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
  602. BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
  603. in12, in13, in14, in15, vec0, vec1, vec2, vec3, vec4, vec5, vec6,
  604. vec7, in8, in9, in10, in11, in12, in13, in14, in15);
  605. FDCT_POSTPROC_2V_NEG_H(vec0, vec1);
  606. FDCT_POSTPROC_2V_NEG_H(vec2, vec3);
  607. FDCT_POSTPROC_2V_NEG_H(vec4, vec5);
  608. FDCT_POSTPROC_2V_NEG_H(vec6, vec7);
  609. FDCT_POSTPROC_2V_NEG_H(in8, in9);
  610. FDCT_POSTPROC_2V_NEG_H(in10, in11);
  611. FDCT_POSTPROC_2V_NEG_H(in12, in13);
  612. FDCT_POSTPROC_2V_NEG_H(in14, in15);
  613. /* Stage 3 */
  614. ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
  615. temp0 = in0 + in3;
  616. in0 = in0 - in3;
  617. in3 = in1 + in2;
  618. in1 = in1 - in2;
  619. DOTP_CONST_PAIR(temp0, in3, cospi_16_64, cospi_16_64, temp1, temp0);
  620. ST_SH(temp0, out);
  621. ST_SH(temp1, out + 8);
  622. DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
  623. ST_SH(temp0, out + 16);
  624. ST_SH(temp1, out + 24);
  625. SUB4(vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4, vec5, vec6, vec7);
  626. DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
  627. ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
  628. DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
  629. ST_SH(temp0, out + 32);
  630. ST_SH(temp1, out + 56);
  631. SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
  632. DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
  633. ST_SH(temp0, out + 40);
  634. ST_SH(temp1, out + 48);
  635. DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
  636. DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
  637. ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
  638. DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
  639. ADD2(in0, in1, in2, in3, vec0, vec7);
  640. DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
  641. ST_SH(temp0, out + 64);
  642. ST_SH(temp1, out + 120);
  643. SUB2(in0, in1, in2, in3, in0, in2);
  644. DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
  645. ST_SH(temp0, out + 72);
  646. ST_SH(temp1, out + 112);
  647. SUB2(in9, vec2, in14, vec5, vec2, vec5);
  648. DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
  649. SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
  650. DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
  651. ST_SH(temp0, out + 80);
  652. ST_SH(temp1, out + 104);
  653. ADD2(in3, in2, in0, in1, vec3, vec4);
  654. DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
  655. ST_SH(temp0, out + 96);
  656. ST_SH(temp1, out + 88);
  657. }
  658. static void fdct8x32_1d_row_odd_rd(int16_t *temp, int16_t *interm_ptr,
  659. int16_t *out) {
  660. v8i16 in16, in17, in18, in19, in20, in21, in22, in23;
  661. v8i16 in24, in25, in26, in27, in28, in29, in30, in31;
  662. v8i16 vec4, vec5;
  663. in20 = LD_SH(temp + 32);
  664. in21 = LD_SH(temp + 40);
  665. in26 = LD_SH(temp + 80);
  666. in27 = LD_SH(temp + 88);
  667. DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
  668. DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
  669. FDCT_POSTPROC_2V_NEG_H(in20, in21);
  670. FDCT_POSTPROC_2V_NEG_H(in26, in27);
  671. in18 = LD_SH(temp + 16);
  672. in19 = LD_SH(temp + 24);
  673. in28 = LD_SH(temp + 96);
  674. in29 = LD_SH(temp + 104);
  675. FDCT_POSTPROC_2V_NEG_H(in18, in19);
  676. FDCT_POSTPROC_2V_NEG_H(in28, in29);
  677. vec4 = in19 - in20;
  678. ST_SH(vec4, interm_ptr + 32);
  679. vec4 = in18 - in21;
  680. ST_SH(vec4, interm_ptr + 88);
  681. vec4 = in29 - in26;
  682. ST_SH(vec4, interm_ptr + 64);
  683. vec4 = in28 - in27;
  684. ST_SH(vec4, interm_ptr + 56);
  685. ADD4(in18, in21, in19, in20, in28, in27, in29, in26, in21, in20, in27, in26);
  686. in22 = LD_SH(temp + 48);
  687. in23 = LD_SH(temp + 56);
  688. in24 = LD_SH(temp + 64);
  689. in25 = LD_SH(temp + 72);
  690. DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
  691. DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
  692. FDCT_POSTPROC_2V_NEG_H(in22, in23);
  693. FDCT_POSTPROC_2V_NEG_H(in24, in25);
  694. in16 = LD_SH(temp);
  695. in17 = LD_SH(temp + 8);
  696. in30 = LD_SH(temp + 112);
  697. in31 = LD_SH(temp + 120);
  698. FDCT_POSTPROC_2V_NEG_H(in16, in17);
  699. FDCT_POSTPROC_2V_NEG_H(in30, in31);
  700. vec4 = in17 - in22;
  701. ST_SH(vec4, interm_ptr + 40);
  702. vec4 = in30 - in25;
  703. ST_SH(vec4, interm_ptr + 48);
  704. vec4 = in31 - in24;
  705. ST_SH(vec4, interm_ptr + 72);
  706. vec4 = in16 - in23;
  707. ST_SH(vec4, interm_ptr + 80);
  708. ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31);
  709. DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
  710. DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
  711. ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25);
  712. DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
  713. ADD2(in27, in26, in25, in24, in23, in20);
  714. DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
  715. ST_SH(vec5, out);
  716. ST_SH(vec4, out + 120);
  717. SUB2(in27, in26, in25, in24, in22, in21);
  718. DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
  719. ST_SH(vec5, out + 112);
  720. ST_SH(vec4, out + 8);
  721. SUB4(in17, in18, in16, in19, in31, in28, in30, in29, in23, in26, in24, in20);
  722. DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
  723. SUB2(in26, in27, in24, in25, in23, in20);
  724. DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
  725. ST_SH(vec4, out + 16);
  726. ST_SH(vec5, out + 104);
  727. ADD2(in26, in27, in24, in25, in22, in21);
  728. DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
  729. ST_SH(vec4, out + 24);
  730. ST_SH(vec5, out + 96);
  731. in20 = LD_SH(interm_ptr + 32);
  732. in21 = LD_SH(interm_ptr + 88);
  733. in27 = LD_SH(interm_ptr + 56);
  734. in26 = LD_SH(interm_ptr + 64);
  735. in16 = in20;
  736. in17 = in21;
  737. DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
  738. DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
  739. in22 = LD_SH(interm_ptr + 40);
  740. in25 = LD_SH(interm_ptr + 48);
  741. in24 = LD_SH(interm_ptr + 72);
  742. in23 = LD_SH(interm_ptr + 80);
  743. SUB4(in23, in20, in22, in21, in25, in26, in24, in27, in28, in17, in18, in31);
  744. DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
  745. in16 = in28 + in29;
  746. in19 = in31 + in30;
  747. DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
  748. ST_SH(vec5, out + 32);
  749. ST_SH(vec4, out + 88);
  750. SUB2(in28, in29, in31, in30, in17, in18);
  751. DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
  752. ST_SH(vec5, out + 40);
  753. ST_SH(vec4, out + 80);
  754. ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19);
  755. DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
  756. SUB2(in29, in28, in30, in31, in16, in19);
  757. DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
  758. ST_SH(vec5, out + 72);
  759. ST_SH(vec4, out + 48);
  760. ADD2(in29, in28, in30, in31, in17, in18);
  761. DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
  762. ST_SH(vec4, out + 56);
  763. ST_SH(vec5, out + 64);
  764. }
  765. static void fdct32x8_1d_row_rd(int16_t *tmp_buf_big, int16_t *tmp_buf,
  766. int16_t *output) {
  767. fdct8x32_1d_row_load_butterfly(tmp_buf_big, tmp_buf);
  768. fdct8x32_1d_row_even_rd(tmp_buf, tmp_buf);
  769. fdct8x32_1d_row_odd_rd((tmp_buf + 128), tmp_buf_big, (tmp_buf + 128));
  770. fdct8x32_1d_row_transpose_store(tmp_buf, output);
  771. }
  772. void vpx_fdct32x32_rd_msa(const int16_t *input, int16_t *out,
  773. int32_t src_stride) {
  774. int32_t i;
  775. DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]);
  776. DECLARE_ALIGNED(32, int16_t, tmp_buf[256]);
  777. /* column transform */
  778. for (i = 0; i < 4; ++i) {
  779. fdct8x32_1d_column(input + (8 * i), src_stride, &tmp_buf[0],
  780. &tmp_buf_big[0] + (8 * i));
  781. }
  782. /* row transform */
  783. for (i = 0; i < 4; ++i) {
  784. fdct32x8_1d_row_rd(&tmp_buf_big[0] + (8 * i * 32), &tmp_buf[0],
  785. out + (8 * i * 32));
  786. }
  787. }
  788. void vpx_fdct32x32_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
  789. int sum, i;
  790. v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
  791. v4i32 vec_w = { 0 };
  792. for (i = 0; i < 16; ++i) {
  793. LD_SH4(input, 8, in0, in1, in2, in3);
  794. input += stride;
  795. LD_SH4(input, 8, in4, in5, in6, in7);
  796. input += stride;
  797. ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6);
  798. ADD2(in0, in2, in4, in6, in0, in4);
  799. vec_w += __msa_hadd_s_w(in0, in0);
  800. vec_w += __msa_hadd_s_w(in4, in4);
  801. }
  802. sum = HADD_SW_S32(vec_w);
  803. out[0] = (int16_t)(sum >> 3);
  804. }