vp9_fdct16x16_msa.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500
  1. /*
  2. * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include "vp9/common/vp9_enums.h"
  12. #include "vp9/encoder/mips/msa/vp9_fdct_msa.h"
  13. #include "vpx_dsp/mips/fwd_txfm_msa.h"
  14. static void fadst16_cols_step1_msa(const int16_t *input, int32_t stride,
  15. const int32_t *const0, int16_t *int_buf) {
  16. v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
  17. v8i16 tp0, tp1, tp2, tp3, g0, g1, g2, g3, g8, g9, g10, g11, h0, h1, h2, h3;
  18. v4i32 k0, k1, k2, k3;
  19. /* load input data */
  20. r0 = LD_SH(input);
  21. r15 = LD_SH(input + 15 * stride);
  22. r7 = LD_SH(input + 7 * stride);
  23. r8 = LD_SH(input + 8 * stride);
  24. SLLI_4V(r0, r15, r7, r8, 2);
  25. /* stage 1 */
  26. LD_SW2(const0, 4, k0, k1);
  27. LD_SW2(const0 + 8, 4, k2, k3);
  28. MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
  29. r3 = LD_SH(input + 3 * stride);
  30. r4 = LD_SH(input + 4 * stride);
  31. r11 = LD_SH(input + 11 * stride);
  32. r12 = LD_SH(input + 12 * stride);
  33. SLLI_4V(r3, r4, r11, r12, 2);
  34. LD_SW2(const0 + 4 * 4, 4, k0, k1);
  35. LD_SW2(const0 + 4 * 6, 4, k2, k3);
  36. MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
  37. /* stage 2 */
  38. BUTTERFLY_4(g0, g2, g10, g8, tp0, tp2, tp3, tp1);
  39. ST_SH2(tp0, tp2, int_buf, 8);
  40. ST_SH2(tp1, tp3, int_buf + 4 * 8, 8);
  41. LD_SW2(const0 + 4 * 8, 4, k0, k1);
  42. k2 = LD_SW(const0 + 4 * 10);
  43. MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
  44. ST_SH2(h0, h1, int_buf + 8 * 8, 8);
  45. ST_SH2(h3, h2, int_buf + 12 * 8, 8);
  46. r9 = LD_SH(input + 9 * stride);
  47. r6 = LD_SH(input + 6 * stride);
  48. r1 = LD_SH(input + stride);
  49. r14 = LD_SH(input + 14 * stride);
  50. SLLI_4V(r9, r6, r1, r14, 2);
  51. LD_SW2(const0 + 4 * 11, 4, k0, k1);
  52. LD_SW2(const0 + 4 * 13, 4, k2, k3);
  53. MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g0, g1, g2, g3);
  54. ST_SH2(g1, g3, int_buf + 3 * 8, 4 * 8);
  55. r13 = LD_SH(input + 13 * stride);
  56. r2 = LD_SH(input + 2 * stride);
  57. r5 = LD_SH(input + 5 * stride);
  58. r10 = LD_SH(input + 10 * stride);
  59. SLLI_4V(r13, r2, r5, r10, 2);
  60. LD_SW2(const0 + 4 * 15, 4, k0, k1);
  61. LD_SW2(const0 + 4 * 17, 4, k2, k3);
  62. MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, h0, h1, h2, h3);
  63. ST_SH2(h1, h3, int_buf + 11 * 8, 4 * 8);
  64. BUTTERFLY_4(h0, h2, g2, g0, tp0, tp1, tp2, tp3);
  65. ST_SH4(tp0, tp1, tp2, tp3, int_buf + 2 * 8, 4 * 8);
  66. }
  67. static void fadst16_cols_step2_msa(int16_t *int_buf, const int32_t *const0,
  68. int16_t *out) {
  69. int16_t *out_ptr = out + 128;
  70. v8i16 tp0, tp1, tp2, tp3, g5, g7, g13, g15;
  71. v8i16 h0, h1, h2, h3, h4, h5, h6, h7, h10, h11;
  72. v8i16 out0, out1, out2, out3, out4, out5, out6, out7;
  73. v8i16 out8, out9, out10, out11, out12, out13, out14, out15;
  74. v4i32 k0, k1, k2, k3;
  75. LD_SH2(int_buf + 3 * 8, 4 * 8, g13, g15);
  76. LD_SH2(int_buf + 11 * 8, 4 * 8, g5, g7);
  77. LD_SW2(const0 + 4 * 19, 4, k0, k1);
  78. k2 = LD_SW(const0 + 4 * 21);
  79. MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
  80. tp0 = LD_SH(int_buf + 4 * 8);
  81. tp1 = LD_SH(int_buf + 5 * 8);
  82. tp3 = LD_SH(int_buf + 10 * 8);
  83. tp2 = LD_SH(int_buf + 14 * 8);
  84. LD_SW2(const0 + 4 * 22, 4, k0, k1);
  85. k2 = LD_SW(const0 + 4 * 24);
  86. MADD_BF(tp0, tp1, tp2, tp3, k0, k1, k2, k0, out4, out6, out5, out7);
  87. out4 = -out4;
  88. ST_SH(out4, (out + 3 * 16));
  89. ST_SH(out5, (out_ptr + 4 * 16));
  90. h1 = LD_SH(int_buf + 9 * 8);
  91. h3 = LD_SH(int_buf + 12 * 8);
  92. MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15);
  93. out13 = -out13;
  94. ST_SH(out12, (out + 2 * 16));
  95. ST_SH(out13, (out_ptr + 5 * 16));
  96. tp0 = LD_SH(int_buf);
  97. tp1 = LD_SH(int_buf + 8);
  98. tp2 = LD_SH(int_buf + 2 * 8);
  99. tp3 = LD_SH(int_buf + 6 * 8);
  100. BUTTERFLY_4(tp0, tp1, tp3, tp2, out0, out1, h11, h10);
  101. out1 = -out1;
  102. ST_SH(out0, (out));
  103. ST_SH(out1, (out_ptr + 7 * 16));
  104. h0 = LD_SH(int_buf + 8 * 8);
  105. h2 = LD_SH(int_buf + 13 * 8);
  106. BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10);
  107. out8 = -out8;
  108. ST_SH(out8, (out + 16));
  109. ST_SH(out9, (out_ptr + 6 * 16));
  110. /* stage 4 */
  111. LD_SW2(const0 + 4 * 25, 4, k0, k1);
  112. LD_SW2(const0 + 4 * 27, 4, k2, k3);
  113. MADD_SHORT(h10, h11, k1, k2, out2, out3);
  114. ST_SH(out2, (out + 7 * 16));
  115. ST_SH(out3, (out_ptr));
  116. MADD_SHORT(out6, out7, k0, k3, out6, out7);
  117. ST_SH(out6, (out + 4 * 16));
  118. ST_SH(out7, (out_ptr + 3 * 16));
  119. MADD_SHORT(out10, out11, k0, k3, out10, out11);
  120. ST_SH(out10, (out + 6 * 16));
  121. ST_SH(out11, (out_ptr + 16));
  122. MADD_SHORT(out14, out15, k1, k2, out14, out15);
  123. ST_SH(out14, (out + 5 * 16));
  124. ST_SH(out15, (out_ptr + 2 * 16));
  125. }
  126. static void fadst16_transpose_postproc_msa(int16_t *input, int16_t *out) {
  127. v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
  128. v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
  129. /* load input data */
  130. LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7);
  131. TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
  132. r7);
  133. FDCT_POSTPROC_2V_NEG_H(r0, r1);
  134. FDCT_POSTPROC_2V_NEG_H(r2, r3);
  135. FDCT_POSTPROC_2V_NEG_H(r4, r5);
  136. FDCT_POSTPROC_2V_NEG_H(r6, r7);
  137. ST_SH8(r0, r1, r2, r3, r4, r5, r6, r7, out, 8);
  138. out += 64;
  139. LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15);
  140. TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
  141. r12, r13, r14, r15);
  142. FDCT_POSTPROC_2V_NEG_H(r8, r9);
  143. FDCT_POSTPROC_2V_NEG_H(r10, r11);
  144. FDCT_POSTPROC_2V_NEG_H(r12, r13);
  145. FDCT_POSTPROC_2V_NEG_H(r14, r15);
  146. ST_SH8(r8, r9, r10, r11, r12, r13, r14, r15, out, 8);
  147. out += 64;
  148. /* load input data */
  149. input += 128;
  150. LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7);
  151. TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
  152. r7);
  153. FDCT_POSTPROC_2V_NEG_H(r0, r1);
  154. FDCT_POSTPROC_2V_NEG_H(r2, r3);
  155. FDCT_POSTPROC_2V_NEG_H(r4, r5);
  156. FDCT_POSTPROC_2V_NEG_H(r6, r7);
  157. ST_SH8(r0, r1, r2, r3, r4, r5, r6, r7, out, 8);
  158. out += 64;
  159. LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15);
  160. TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
  161. r12, r13, r14, r15);
  162. FDCT_POSTPROC_2V_NEG_H(r8, r9);
  163. FDCT_POSTPROC_2V_NEG_H(r10, r11);
  164. FDCT_POSTPROC_2V_NEG_H(r12, r13);
  165. FDCT_POSTPROC_2V_NEG_H(r14, r15);
  166. ST_SH8(r8, r9, r10, r11, r12, r13, r14, r15, out, 8);
  167. }
  168. static void fadst16_rows_step1_msa(int16_t *input, const int32_t *const0,
  169. int16_t *int_buf) {
  170. v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
  171. v8i16 tp0, tp1, tp2, tp3, g0, g1, g2, g3, g8, g9, g10, g11, h0, h1, h2, h3;
  172. v4i32 k0, k1, k2, k3;
  173. /* load input data */
  174. r0 = LD_SH(input);
  175. r7 = LD_SH(input + 7 * 8);
  176. r8 = LD_SH(input + 8 * 8);
  177. r15 = LD_SH(input + 15 * 8);
  178. /* stage 1 */
  179. LD_SW2(const0, 4, k0, k1);
  180. LD_SW2(const0 + 4 * 2, 4, k2, k3);
  181. MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
  182. r3 = LD_SH(input + 3 * 8);
  183. r4 = LD_SH(input + 4 * 8);
  184. r11 = LD_SH(input + 11 * 8);
  185. r12 = LD_SH(input + 12 * 8);
  186. LD_SW2(const0 + 4 * 4, 4, k0, k1);
  187. LD_SW2(const0 + 4 * 6, 4, k2, k3);
  188. MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
  189. /* stage 2 */
  190. BUTTERFLY_4(g0, g2, g10, g8, tp0, tp2, tp3, tp1);
  191. ST_SH2(tp0, tp1, int_buf, 4 * 8);
  192. ST_SH2(tp2, tp3, int_buf + 8, 4 * 8);
  193. LD_SW2(const0 + 4 * 8, 4, k0, k1);
  194. k2 = LD_SW(const0 + 4 * 10);
  195. MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
  196. ST_SH2(h0, h3, int_buf + 8 * 8, 4 * 8);
  197. ST_SH2(h1, h2, int_buf + 9 * 8, 4 * 8);
  198. r1 = LD_SH(input + 8);
  199. r6 = LD_SH(input + 6 * 8);
  200. r9 = LD_SH(input + 9 * 8);
  201. r14 = LD_SH(input + 14 * 8);
  202. LD_SW2(const0 + 4 * 11, 4, k0, k1);
  203. LD_SW2(const0 + 4 * 13, 4, k2, k3);
  204. MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g0, g1, g2, g3);
  205. ST_SH2(g1, g3, int_buf + 3 * 8, 4 * 8);
  206. r2 = LD_SH(input + 2 * 8);
  207. r5 = LD_SH(input + 5 * 8);
  208. r10 = LD_SH(input + 10 * 8);
  209. r13 = LD_SH(input + 13 * 8);
  210. LD_SW2(const0 + 4 * 15, 4, k0, k1);
  211. LD_SW2(const0 + 4 * 17, 4, k2, k3);
  212. MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, h0, h1, h2, h3);
  213. ST_SH2(h1, h3, int_buf + 11 * 8, 4 * 8);
  214. BUTTERFLY_4(h0, h2, g2, g0, tp0, tp1, tp2, tp3);
  215. ST_SH4(tp0, tp1, tp2, tp3, int_buf + 2 * 8, 4 * 8);
  216. }
  217. static void fadst16_rows_step2_msa(int16_t *int_buf, const int32_t *const0,
  218. int16_t *out) {
  219. int16_t *out_ptr = out + 8;
  220. v8i16 tp0, tp1, tp2, tp3, g5, g7, g13, g15;
  221. v8i16 h0, h1, h2, h3, h4, h5, h6, h7, h10, h11;
  222. v8i16 out0, out1, out2, out3, out4, out5, out6, out7;
  223. v8i16 out8, out9, out10, out11, out12, out13, out14, out15;
  224. v4i32 k0, k1, k2, k3;
  225. g13 = LD_SH(int_buf + 3 * 8);
  226. g15 = LD_SH(int_buf + 7 * 8);
  227. g5 = LD_SH(int_buf + 11 * 8);
  228. g7 = LD_SH(int_buf + 15 * 8);
  229. LD_SW2(const0 + 4 * 19, 4, k0, k1);
  230. k2 = LD_SW(const0 + 4 * 21);
  231. MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
  232. tp0 = LD_SH(int_buf + 4 * 8);
  233. tp1 = LD_SH(int_buf + 5 * 8);
  234. tp3 = LD_SH(int_buf + 10 * 8);
  235. tp2 = LD_SH(int_buf + 14 * 8);
  236. LD_SW2(const0 + 4 * 22, 4, k0, k1);
  237. k2 = LD_SW(const0 + 4 * 24);
  238. MADD_BF(tp0, tp1, tp2, tp3, k0, k1, k2, k0, out4, out6, out5, out7);
  239. out4 = -out4;
  240. ST_SH(out4, (out + 3 * 16));
  241. ST_SH(out5, (out_ptr + 4 * 16));
  242. h1 = LD_SH(int_buf + 9 * 8);
  243. h3 = LD_SH(int_buf + 12 * 8);
  244. MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15);
  245. out13 = -out13;
  246. ST_SH(out12, (out + 2 * 16));
  247. ST_SH(out13, (out_ptr + 5 * 16));
  248. tp0 = LD_SH(int_buf);
  249. tp1 = LD_SH(int_buf + 8);
  250. tp2 = LD_SH(int_buf + 2 * 8);
  251. tp3 = LD_SH(int_buf + 6 * 8);
  252. BUTTERFLY_4(tp0, tp1, tp3, tp2, out0, out1, h11, h10);
  253. out1 = -out1;
  254. ST_SH(out0, (out));
  255. ST_SH(out1, (out_ptr + 7 * 16));
  256. h0 = LD_SH(int_buf + 8 * 8);
  257. h2 = LD_SH(int_buf + 13 * 8);
  258. BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10);
  259. out8 = -out8;
  260. ST_SH(out8, (out + 16));
  261. ST_SH(out9, (out_ptr + 6 * 16));
  262. /* stage 4 */
  263. LD_SW2(const0 + 4 * 25, 4, k0, k1);
  264. LD_SW2(const0 + 4 * 27, 4, k2, k3);
  265. MADD_SHORT(h10, h11, k1, k2, out2, out3);
  266. ST_SH(out2, (out + 7 * 16));
  267. ST_SH(out3, (out_ptr));
  268. MADD_SHORT(out6, out7, k0, k3, out6, out7);
  269. ST_SH(out6, (out + 4 * 16));
  270. ST_SH(out7, (out_ptr + 3 * 16));
  271. MADD_SHORT(out10, out11, k0, k3, out10, out11);
  272. ST_SH(out10, (out + 6 * 16));
  273. ST_SH(out11, (out_ptr + 16));
  274. MADD_SHORT(out14, out15, k1, k2, out14, out15);
  275. ST_SH(out14, (out + 5 * 16));
  276. ST_SH(out15, (out_ptr + 2 * 16));
  277. }
  278. static void fadst16_transpose_msa(int16_t *input, int16_t *out) {
  279. v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
  280. v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
  281. /* load input data */
  282. LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14,
  283. l7, l15);
  284. TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
  285. r7);
  286. TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
  287. r12, r13, r14, r15);
  288. ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8);
  289. ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8);
  290. out += 16 * 8;
  291. /* load input data */
  292. input += 128;
  293. LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14,
  294. l7, l15);
  295. TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
  296. r7);
  297. TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
  298. r12, r13, r14, r15);
  299. ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8);
  300. ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8);
  301. }
  302. static void postproc_fdct16x8_1d_row(int16_t *intermediate, int16_t *output) {
  303. int16_t *temp = intermediate;
  304. int16_t *out = output;
  305. v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
  306. v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11;
  307. v8i16 in12, in13, in14, in15;
  308. LD_SH8(temp, 16, in0, in1, in2, in3, in4, in5, in6, in7);
  309. temp = intermediate + 8;
  310. LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15);
  311. TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
  312. in4, in5, in6, in7);
  313. TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
  314. in10, in11, in12, in13, in14, in15);
  315. FDCT_POSTPROC_2V_NEG_H(in0, in1);
  316. FDCT_POSTPROC_2V_NEG_H(in2, in3);
  317. FDCT_POSTPROC_2V_NEG_H(in4, in5);
  318. FDCT_POSTPROC_2V_NEG_H(in6, in7);
  319. FDCT_POSTPROC_2V_NEG_H(in8, in9);
  320. FDCT_POSTPROC_2V_NEG_H(in10, in11);
  321. FDCT_POSTPROC_2V_NEG_H(in12, in13);
  322. FDCT_POSTPROC_2V_NEG_H(in14, in15);
  323. BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
  324. in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6,
  325. tmp7, in8, in9, in10, in11, in12, in13, in14, in15);
  326. temp = intermediate;
  327. ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16);
  328. FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1,
  329. tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
  330. temp = intermediate;
  331. LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15);
  332. FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, in0, in1, in2, in3,
  333. in4, in5, in6, in7);
  334. TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, tmp0, in0,
  335. tmp1, in1, tmp2, in2, tmp3, in3);
  336. ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, out, 16);
  337. TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, tmp4, in4,
  338. tmp5, in5, tmp6, in6, tmp7, in7);
  339. out = output + 8;
  340. ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16);
  341. }
  342. void vp9_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
  343. int32_t tx_type) {
  344. DECLARE_ALIGNED(32, int16_t, tmp[256]);
  345. DECLARE_ALIGNED(32, int16_t, trans_buf[256]);
  346. DECLARE_ALIGNED(32, int16_t, tmp_buf[128]);
  347. int32_t i;
  348. int16_t *ptmpbuf = &tmp_buf[0];
  349. int16_t *trans = &trans_buf[0];
  350. const int32_t const_arr[29 * 4] = {
  351. 52707308, 52707308, 52707308, 52707308, -1072430300,
  352. -1072430300, -1072430300, -1072430300, 795618043, 795618043,
  353. 795618043, 795618043, -721080468, -721080468, -721080468,
  354. -721080468, 459094491, 459094491, 459094491, 459094491,
  355. -970646691, -970646691, -970646691, -970646691, 1010963856,
  356. 1010963856, 1010963856, 1010963856, -361743294, -361743294,
  357. -361743294, -361743294, 209469125, 209469125, 209469125,
  358. 209469125, -1053094788, -1053094788, -1053094788, -1053094788,
  359. 1053160324, 1053160324, 1053160324, 1053160324, 639644520,
  360. 639644520, 639644520, 639644520, -862444000, -862444000,
  361. -862444000, -862444000, 1062144356, 1062144356, 1062144356,
  362. 1062144356, -157532337, -157532337, -157532337, -157532337,
  363. 260914709, 260914709, 260914709, 260914709, -1041559667,
  364. -1041559667, -1041559667, -1041559667, 920985831, 920985831,
  365. 920985831, 920985831, -551995675, -551995675, -551995675,
  366. -551995675, 596522295, 596522295, 596522295, 596522295,
  367. 892853362, 892853362, 892853362, 892853362, -892787826,
  368. -892787826, -892787826, -892787826, 410925857, 410925857,
  369. 410925857, 410925857, -992012162, -992012162, -992012162,
  370. -992012162, 992077698, 992077698, 992077698, 992077698,
  371. 759246145, 759246145, 759246145, 759246145, -759180609,
  372. -759180609, -759180609, -759180609, -759222975, -759222975,
  373. -759222975, -759222975, 759288511, 759288511, 759288511,
  374. 759288511
  375. };
  376. switch (tx_type) {
  377. case DCT_DCT:
  378. /* column transform */
  379. for (i = 0; i < 2; ++i) {
  380. fdct8x16_1d_column(input + 8 * i, tmp + 8 * i, stride);
  381. }
  382. /* row transform */
  383. for (i = 0; i < 2; ++i) {
  384. fdct16x8_1d_row(tmp + (128 * i), output + (128 * i));
  385. }
  386. break;
  387. case ADST_DCT:
  388. /* column transform */
  389. for (i = 0; i < 2; ++i) {
  390. fadst16_cols_step1_msa(input + (i << 3), stride, const_arr, ptmpbuf);
  391. fadst16_cols_step2_msa(ptmpbuf, const_arr, tmp + (i << 3));
  392. }
  393. /* row transform */
  394. for (i = 0; i < 2; ++i) {
  395. postproc_fdct16x8_1d_row(tmp + (128 * i), output + (128 * i));
  396. }
  397. break;
  398. case DCT_ADST:
  399. /* column transform */
  400. for (i = 0; i < 2; ++i) {
  401. fdct8x16_1d_column(input + 8 * i, tmp + 8 * i, stride);
  402. }
  403. fadst16_transpose_postproc_msa(tmp, trans);
  404. /* row transform */
  405. for (i = 0; i < 2; ++i) {
  406. fadst16_rows_step1_msa(trans + (i << 7), const_arr, ptmpbuf);
  407. fadst16_rows_step2_msa(ptmpbuf, const_arr, tmp + (i << 7));
  408. }
  409. fadst16_transpose_msa(tmp, output);
  410. break;
  411. case ADST_ADST:
  412. /* column transform */
  413. for (i = 0; i < 2; ++i) {
  414. fadst16_cols_step1_msa(input + (i << 3), stride, const_arr, ptmpbuf);
  415. fadst16_cols_step2_msa(ptmpbuf, const_arr, tmp + (i << 3));
  416. }
  417. fadst16_transpose_postproc_msa(tmp, trans);
  418. /* row transform */
  419. for (i = 0; i < 2; ++i) {
  420. fadst16_rows_step1_msa(trans + (i << 7), const_arr, ptmpbuf);
  421. fadst16_rows_step2_msa(ptmpbuf, const_arr, tmp + (i << 7));
  422. }
  423. fadst16_transpose_msa(tmp, output);
  424. break;
  425. default: assert(0); break;
  426. }
  427. }