idct32x32_msa.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730
  1. /*
  2. * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "./vpx_dsp_rtcd.h"
  11. #include "vpx_dsp/mips/inv_txfm_msa.h"
  12. static void idct32x8_row_transpose_store(const int16_t *input,
  13. int16_t *tmp_buf) {
  14. v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
  15. /* 1st & 2nd 8x8 */
  16. LD_SH8(input, 32, m0, n0, m1, n1, m2, n2, m3, n3);
  17. LD_SH8((input + 8), 32, m4, n4, m5, n5, m6, n6, m7, n7);
  18. TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
  19. n3);
  20. TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
  21. n7);
  22. ST_SH8(m0, n0, m1, n1, m2, n2, m3, n3, (tmp_buf), 8);
  23. ST_SH4(m4, n4, m5, n5, (tmp_buf + 8 * 8), 8);
  24. ST_SH4(m6, n6, m7, n7, (tmp_buf + 12 * 8), 8);
  25. /* 3rd & 4th 8x8 */
  26. LD_SH8((input + 16), 32, m0, n0, m1, n1, m2, n2, m3, n3);
  27. LD_SH8((input + 24), 32, m4, n4, m5, n5, m6, n6, m7, n7);
  28. TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
  29. n3);
  30. TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
  31. n7);
  32. ST_SH4(m0, n0, m1, n1, (tmp_buf + 16 * 8), 8);
  33. ST_SH4(m2, n2, m3, n3, (tmp_buf + 20 * 8), 8);
  34. ST_SH4(m4, n4, m5, n5, (tmp_buf + 24 * 8), 8);
  35. ST_SH4(m6, n6, m7, n7, (tmp_buf + 28 * 8), 8);
  36. }
  37. static void idct32x8_row_even_process_store(int16_t *tmp_buf,
  38. int16_t *tmp_eve_buf) {
  39. v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
  40. v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
  41. v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
  42. /* Even stage 1 */
  43. LD_SH8(tmp_buf, 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
  44. DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
  45. DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
  46. BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
  47. DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
  48. loc1 = vec3;
  49. loc0 = vec1;
  50. DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
  51. DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
  52. BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
  53. BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
  54. BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
  55. /* Even stage 2 */
  56. LD_SH8((tmp_buf + 16), 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
  57. DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
  58. DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
  59. DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
  60. DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
  61. vec0 = reg0 + reg4;
  62. reg0 = reg0 - reg4;
  63. reg4 = reg6 + reg2;
  64. reg6 = reg6 - reg2;
  65. reg2 = reg1 + reg5;
  66. reg1 = reg1 - reg5;
  67. reg5 = reg7 + reg3;
  68. reg7 = reg7 - reg3;
  69. reg3 = vec0;
  70. vec1 = reg2;
  71. reg2 = reg3 + reg4;
  72. reg3 = reg3 - reg4;
  73. reg4 = reg5 - vec1;
  74. reg5 = reg5 + vec1;
  75. DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
  76. DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
  77. vec0 = reg0 - reg6;
  78. reg0 = reg0 + reg6;
  79. vec1 = reg7 - reg1;
  80. reg7 = reg7 + reg1;
  81. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
  82. DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
  83. /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
  84. BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
  85. ST_SH(loc0, (tmp_eve_buf + 15 * 8));
  86. ST_SH(loc1, (tmp_eve_buf));
  87. ST_SH(loc2, (tmp_eve_buf + 14 * 8));
  88. ST_SH(loc3, (tmp_eve_buf + 8));
  89. BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
  90. ST_SH(loc0, (tmp_eve_buf + 13 * 8));
  91. ST_SH(loc1, (tmp_eve_buf + 2 * 8));
  92. ST_SH(loc2, (tmp_eve_buf + 12 * 8));
  93. ST_SH(loc3, (tmp_eve_buf + 3 * 8));
  94. /* Store 8 */
  95. BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
  96. ST_SH(loc0, (tmp_eve_buf + 11 * 8));
  97. ST_SH(loc1, (tmp_eve_buf + 4 * 8));
  98. ST_SH(loc2, (tmp_eve_buf + 10 * 8));
  99. ST_SH(loc3, (tmp_eve_buf + 5 * 8));
  100. BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
  101. ST_SH(loc0, (tmp_eve_buf + 9 * 8));
  102. ST_SH(loc1, (tmp_eve_buf + 6 * 8));
  103. ST_SH(loc2, (tmp_eve_buf + 8 * 8));
  104. ST_SH(loc3, (tmp_eve_buf + 7 * 8));
  105. }
  106. static void idct32x8_row_odd_process_store(int16_t *tmp_buf,
  107. int16_t *tmp_odd_buf) {
  108. v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
  109. v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
  110. /* Odd stage 1 */
  111. reg0 = LD_SH(tmp_buf + 8);
  112. reg1 = LD_SH(tmp_buf + 7 * 8);
  113. reg2 = LD_SH(tmp_buf + 9 * 8);
  114. reg3 = LD_SH(tmp_buf + 15 * 8);
  115. reg4 = LD_SH(tmp_buf + 17 * 8);
  116. reg5 = LD_SH(tmp_buf + 23 * 8);
  117. reg6 = LD_SH(tmp_buf + 25 * 8);
  118. reg7 = LD_SH(tmp_buf + 31 * 8);
  119. DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
  120. DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
  121. DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
  122. DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
  123. vec0 = reg0 + reg3;
  124. reg0 = reg0 - reg3;
  125. reg3 = reg7 + reg4;
  126. reg7 = reg7 - reg4;
  127. reg4 = reg1 + reg2;
  128. reg1 = reg1 - reg2;
  129. reg2 = reg6 + reg5;
  130. reg6 = reg6 - reg5;
  131. reg5 = vec0;
  132. /* 4 Stores */
  133. ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
  134. ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
  135. SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
  136. DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
  137. ST_SH2(vec0, vec1, (tmp_odd_buf), 8);
  138. /* 4 Stores */
  139. DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
  140. DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
  141. BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
  142. ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
  143. DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
  144. ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
  145. /* Odd stage 2 */
  146. /* 8 loads */
  147. reg0 = LD_SH(tmp_buf + 3 * 8);
  148. reg1 = LD_SH(tmp_buf + 5 * 8);
  149. reg2 = LD_SH(tmp_buf + 11 * 8);
  150. reg3 = LD_SH(tmp_buf + 13 * 8);
  151. reg4 = LD_SH(tmp_buf + 19 * 8);
  152. reg5 = LD_SH(tmp_buf + 21 * 8);
  153. reg6 = LD_SH(tmp_buf + 27 * 8);
  154. reg7 = LD_SH(tmp_buf + 29 * 8);
  155. DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
  156. DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
  157. DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
  158. DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
  159. /* 4 Stores */
  160. SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec0, vec1, vec2, vec3);
  161. DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
  162. DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
  163. BUTTERFLY_4(loc3, loc2, loc0, loc1, vec1, vec0, vec2, vec3);
  164. ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
  165. DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
  166. ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
  167. /* 4 Stores */
  168. ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec1, vec2, vec0, vec3);
  169. BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
  170. ST_SH(reg0, (tmp_odd_buf + 13 * 8));
  171. ST_SH(reg1, (tmp_odd_buf + 14 * 8));
  172. DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
  173. ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
  174. /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
  175. /* Load 8 & Store 8 */
  176. LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
  177. LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
  178. ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
  179. ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
  180. SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
  181. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
  182. SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
  183. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
  184. ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
  185. /* Load 8 & Store 8 */
  186. LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
  187. LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
  188. ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
  189. ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
  190. SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
  191. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
  192. SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
  193. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
  194. ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
  195. }
  196. static void idct_butterfly_transpose_store(int16_t *tmp_buf,
  197. int16_t *tmp_eve_buf,
  198. int16_t *tmp_odd_buf, int16_t *dst) {
  199. v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
  200. v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
  201. /* FINAL BUTTERFLY : Dependency on Even & Odd */
  202. vec0 = LD_SH(tmp_odd_buf);
  203. vec1 = LD_SH(tmp_odd_buf + 9 * 8);
  204. vec2 = LD_SH(tmp_odd_buf + 14 * 8);
  205. vec3 = LD_SH(tmp_odd_buf + 6 * 8);
  206. loc0 = LD_SH(tmp_eve_buf);
  207. loc1 = LD_SH(tmp_eve_buf + 8 * 8);
  208. loc2 = LD_SH(tmp_eve_buf + 4 * 8);
  209. loc3 = LD_SH(tmp_eve_buf + 12 * 8);
  210. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
  211. ST_SH((loc0 - vec3), (tmp_buf + 31 * 8));
  212. ST_SH((loc1 - vec2), (tmp_buf + 23 * 8));
  213. ST_SH((loc2 - vec1), (tmp_buf + 27 * 8));
  214. ST_SH((loc3 - vec0), (tmp_buf + 19 * 8));
  215. /* Load 8 & Store 8 */
  216. vec0 = LD_SH(tmp_odd_buf + 4 * 8);
  217. vec1 = LD_SH(tmp_odd_buf + 13 * 8);
  218. vec2 = LD_SH(tmp_odd_buf + 10 * 8);
  219. vec3 = LD_SH(tmp_odd_buf + 3 * 8);
  220. loc0 = LD_SH(tmp_eve_buf + 2 * 8);
  221. loc1 = LD_SH(tmp_eve_buf + 10 * 8);
  222. loc2 = LD_SH(tmp_eve_buf + 6 * 8);
  223. loc3 = LD_SH(tmp_eve_buf + 14 * 8);
  224. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
  225. ST_SH((loc0 - vec3), (tmp_buf + 29 * 8));
  226. ST_SH((loc1 - vec2), (tmp_buf + 21 * 8));
  227. ST_SH((loc2 - vec1), (tmp_buf + 25 * 8));
  228. ST_SH((loc3 - vec0), (tmp_buf + 17 * 8));
  229. /* Load 8 & Store 8 */
  230. vec0 = LD_SH(tmp_odd_buf + 2 * 8);
  231. vec1 = LD_SH(tmp_odd_buf + 11 * 8);
  232. vec2 = LD_SH(tmp_odd_buf + 12 * 8);
  233. vec3 = LD_SH(tmp_odd_buf + 7 * 8);
  234. loc0 = LD_SH(tmp_eve_buf + 1 * 8);
  235. loc1 = LD_SH(tmp_eve_buf + 9 * 8);
  236. loc2 = LD_SH(tmp_eve_buf + 5 * 8);
  237. loc3 = LD_SH(tmp_eve_buf + 13 * 8);
  238. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
  239. ST_SH((loc0 - vec3), (tmp_buf + 30 * 8));
  240. ST_SH((loc1 - vec2), (tmp_buf + 22 * 8));
  241. ST_SH((loc2 - vec1), (tmp_buf + 26 * 8));
  242. ST_SH((loc3 - vec0), (tmp_buf + 18 * 8));
  243. /* Load 8 & Store 8 */
  244. vec0 = LD_SH(tmp_odd_buf + 5 * 8);
  245. vec1 = LD_SH(tmp_odd_buf + 15 * 8);
  246. vec2 = LD_SH(tmp_odd_buf + 8 * 8);
  247. vec3 = LD_SH(tmp_odd_buf + 1 * 8);
  248. loc0 = LD_SH(tmp_eve_buf + 3 * 8);
  249. loc1 = LD_SH(tmp_eve_buf + 11 * 8);
  250. loc2 = LD_SH(tmp_eve_buf + 7 * 8);
  251. loc3 = LD_SH(tmp_eve_buf + 15 * 8);
  252. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
  253. ST_SH((loc0 - vec3), (tmp_buf + 28 * 8));
  254. ST_SH((loc1 - vec2), (tmp_buf + 20 * 8));
  255. ST_SH((loc2 - vec1), (tmp_buf + 24 * 8));
  256. ST_SH((loc3 - vec0), (tmp_buf + 16 * 8));
  257. /* Transpose : 16 vectors */
  258. /* 1st & 2nd 8x8 */
  259. TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
  260. n3);
  261. ST_SH4(m0, n0, m1, n1, (dst + 0), 32);
  262. ST_SH4(m2, n2, m3, n3, (dst + 4 * 32), 32);
  263. TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
  264. n7);
  265. ST_SH4(m4, n4, m5, n5, (dst + 8), 32);
  266. ST_SH4(m6, n6, m7, n7, (dst + 8 + 4 * 32), 32);
  267. /* 3rd & 4th 8x8 */
  268. LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3);
  269. LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7);
  270. TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
  271. n3);
  272. ST_SH4(m0, n0, m1, n1, (dst + 16), 32);
  273. ST_SH4(m2, n2, m3, n3, (dst + 16 + 4 * 32), 32);
  274. TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
  275. n7);
  276. ST_SH4(m4, n4, m5, n5, (dst + 24), 32);
  277. ST_SH4(m6, n6, m7, n7, (dst + 24 + 4 * 32), 32);
  278. }
  279. static void idct32x8_1d_rows_msa(const int16_t *input, int16_t *output) {
  280. DECLARE_ALIGNED(32, int16_t, tmp_buf[8 * 32]);
  281. DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]);
  282. DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]);
  283. idct32x8_row_transpose_store(input, &tmp_buf[0]);
  284. idct32x8_row_even_process_store(&tmp_buf[0], &tmp_eve_buf[0]);
  285. idct32x8_row_odd_process_store(&tmp_buf[0], &tmp_odd_buf[0]);
  286. idct_butterfly_transpose_store(&tmp_buf[0], &tmp_eve_buf[0], &tmp_odd_buf[0],
  287. output);
  288. }
  289. static void idct8x32_column_even_process_store(int16_t *tmp_buf,
  290. int16_t *tmp_eve_buf) {
  291. v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
  292. v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
  293. v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
  294. /* Even stage 1 */
  295. LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
  296. tmp_buf += (2 * 32);
  297. DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
  298. DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
  299. BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
  300. DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
  301. loc1 = vec3;
  302. loc0 = vec1;
  303. DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
  304. DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
  305. BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
  306. BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
  307. BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
  308. /* Even stage 2 */
  309. /* Load 8 */
  310. LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
  311. DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
  312. DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
  313. DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
  314. DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
  315. vec0 = reg0 + reg4;
  316. reg0 = reg0 - reg4;
  317. reg4 = reg6 + reg2;
  318. reg6 = reg6 - reg2;
  319. reg2 = reg1 + reg5;
  320. reg1 = reg1 - reg5;
  321. reg5 = reg7 + reg3;
  322. reg7 = reg7 - reg3;
  323. reg3 = vec0;
  324. vec1 = reg2;
  325. reg2 = reg3 + reg4;
  326. reg3 = reg3 - reg4;
  327. reg4 = reg5 - vec1;
  328. reg5 = reg5 + vec1;
  329. DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
  330. DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
  331. vec0 = reg0 - reg6;
  332. reg0 = reg0 + reg6;
  333. vec1 = reg7 - reg1;
  334. reg7 = reg7 + reg1;
  335. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
  336. DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
  337. /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
  338. /* Store 8 */
  339. BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
  340. ST_SH2(loc1, loc3, tmp_eve_buf, 8);
  341. ST_SH2(loc2, loc0, (tmp_eve_buf + 14 * 8), 8);
  342. BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
  343. ST_SH2(loc1, loc3, (tmp_eve_buf + 2 * 8), 8);
  344. ST_SH2(loc2, loc0, (tmp_eve_buf + 12 * 8), 8);
  345. /* Store 8 */
  346. BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
  347. ST_SH2(loc1, loc3, (tmp_eve_buf + 4 * 8), 8);
  348. ST_SH2(loc2, loc0, (tmp_eve_buf + 10 * 8), 8);
  349. BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
  350. ST_SH2(loc1, loc3, (tmp_eve_buf + 6 * 8), 8);
  351. ST_SH2(loc2, loc0, (tmp_eve_buf + 8 * 8), 8);
  352. }
  353. static void idct8x32_column_odd_process_store(int16_t *tmp_buf,
  354. int16_t *tmp_odd_buf) {
  355. v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
  356. v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
  357. /* Odd stage 1 */
  358. reg0 = LD_SH(tmp_buf + 32);
  359. reg1 = LD_SH(tmp_buf + 7 * 32);
  360. reg2 = LD_SH(tmp_buf + 9 * 32);
  361. reg3 = LD_SH(tmp_buf + 15 * 32);
  362. reg4 = LD_SH(tmp_buf + 17 * 32);
  363. reg5 = LD_SH(tmp_buf + 23 * 32);
  364. reg6 = LD_SH(tmp_buf + 25 * 32);
  365. reg7 = LD_SH(tmp_buf + 31 * 32);
  366. DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
  367. DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
  368. DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
  369. DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
  370. vec0 = reg0 + reg3;
  371. reg0 = reg0 - reg3;
  372. reg3 = reg7 + reg4;
  373. reg7 = reg7 - reg4;
  374. reg4 = reg1 + reg2;
  375. reg1 = reg1 - reg2;
  376. reg2 = reg6 + reg5;
  377. reg6 = reg6 - reg5;
  378. reg5 = vec0;
  379. /* 4 Stores */
  380. ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
  381. ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
  382. SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
  383. DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
  384. ST_SH2(vec0, vec1, tmp_odd_buf, 8);
  385. /* 4 Stores */
  386. DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
  387. DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
  388. BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
  389. ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
  390. DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
  391. ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
  392. /* Odd stage 2 */
  393. /* 8 loads */
  394. reg0 = LD_SH(tmp_buf + 3 * 32);
  395. reg1 = LD_SH(tmp_buf + 5 * 32);
  396. reg2 = LD_SH(tmp_buf + 11 * 32);
  397. reg3 = LD_SH(tmp_buf + 13 * 32);
  398. reg4 = LD_SH(tmp_buf + 19 * 32);
  399. reg5 = LD_SH(tmp_buf + 21 * 32);
  400. reg6 = LD_SH(tmp_buf + 27 * 32);
  401. reg7 = LD_SH(tmp_buf + 29 * 32);
  402. DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
  403. DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
  404. DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
  405. DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
  406. /* 4 Stores */
  407. SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec0, vec1, vec2, vec3);
  408. DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
  409. DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
  410. BUTTERFLY_4(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2);
  411. ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
  412. DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
  413. ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
  414. /* 4 Stores */
  415. ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7, vec0, vec1, vec2, vec3);
  416. BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
  417. ST_SH2(reg0, reg1, (tmp_odd_buf + 13 * 8), 8);
  418. DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
  419. ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
  420. /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
  421. /* Load 8 & Store 8 */
  422. LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
  423. LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
  424. ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
  425. ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
  426. SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
  427. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
  428. SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
  429. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
  430. ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
  431. /* Load 8 & Store 8 */
  432. LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
  433. LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
  434. ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
  435. ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
  436. SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
  437. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
  438. SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
  439. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
  440. ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
  441. }
  442. static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
  443. int16_t *tmp_odd_buf, uint8_t *dst,
  444. int32_t dst_stride) {
  445. v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
  446. v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
  447. /* FINAL BUTTERFLY : Dependency on Even & Odd */
  448. vec0 = LD_SH(tmp_odd_buf);
  449. vec1 = LD_SH(tmp_odd_buf + 9 * 8);
  450. vec2 = LD_SH(tmp_odd_buf + 14 * 8);
  451. vec3 = LD_SH(tmp_odd_buf + 6 * 8);
  452. loc0 = LD_SH(tmp_eve_buf);
  453. loc1 = LD_SH(tmp_eve_buf + 8 * 8);
  454. loc2 = LD_SH(tmp_eve_buf + 4 * 8);
  455. loc3 = LD_SH(tmp_eve_buf + 12 * 8);
  456. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
  457. SRARI_H4_SH(m0, m2, m4, m6, 6);
  458. VP9_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
  459. SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
  460. SRARI_H4_SH(m0, m2, m4, m6, 6);
  461. VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride), m0, m2, m4,
  462. m6);
  463. /* Load 8 & Store 8 */
  464. vec0 = LD_SH(tmp_odd_buf + 4 * 8);
  465. vec1 = LD_SH(tmp_odd_buf + 13 * 8);
  466. vec2 = LD_SH(tmp_odd_buf + 10 * 8);
  467. vec3 = LD_SH(tmp_odd_buf + 3 * 8);
  468. loc0 = LD_SH(tmp_eve_buf + 2 * 8);
  469. loc1 = LD_SH(tmp_eve_buf + 10 * 8);
  470. loc2 = LD_SH(tmp_eve_buf + 6 * 8);
  471. loc3 = LD_SH(tmp_eve_buf + 14 * 8);
  472. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
  473. SRARI_H4_SH(m1, m3, m5, m7, 6);
  474. VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride), m1, m3, m5, m7);
  475. SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
  476. SRARI_H4_SH(m1, m3, m5, m7, 6);
  477. VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride), m1, m3, m5,
  478. m7);
  479. /* Load 8 & Store 8 */
  480. vec0 = LD_SH(tmp_odd_buf + 2 * 8);
  481. vec1 = LD_SH(tmp_odd_buf + 11 * 8);
  482. vec2 = LD_SH(tmp_odd_buf + 12 * 8);
  483. vec3 = LD_SH(tmp_odd_buf + 7 * 8);
  484. loc0 = LD_SH(tmp_eve_buf + 1 * 8);
  485. loc1 = LD_SH(tmp_eve_buf + 9 * 8);
  486. loc2 = LD_SH(tmp_eve_buf + 5 * 8);
  487. loc3 = LD_SH(tmp_eve_buf + 13 * 8);
  488. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
  489. SRARI_H4_SH(n0, n2, n4, n6, 6);
  490. VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride), n0, n2, n4, n6);
  491. SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
  492. SRARI_H4_SH(n0, n2, n4, n6, 6);
  493. VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride), n0, n2, n4,
  494. n6);
  495. /* Load 8 & Store 8 */
  496. vec0 = LD_SH(tmp_odd_buf + 5 * 8);
  497. vec1 = LD_SH(tmp_odd_buf + 15 * 8);
  498. vec2 = LD_SH(tmp_odd_buf + 8 * 8);
  499. vec3 = LD_SH(tmp_odd_buf + 1 * 8);
  500. loc0 = LD_SH(tmp_eve_buf + 3 * 8);
  501. loc1 = LD_SH(tmp_eve_buf + 11 * 8);
  502. loc2 = LD_SH(tmp_eve_buf + 7 * 8);
  503. loc3 = LD_SH(tmp_eve_buf + 15 * 8);
  504. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
  505. SRARI_H4_SH(n1, n3, n5, n7, 6);
  506. VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride), n1, n3, n5, n7);
  507. SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
  508. SRARI_H4_SH(n1, n3, n5, n7, 6);
  509. VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride), n1, n3, n5,
  510. n7);
  511. }
  512. static void idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
  513. int32_t dst_stride) {
  514. DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]);
  515. DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]);
  516. idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
  517. idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
  518. idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0], dst,
  519. dst_stride);
  520. }
  521. void vpx_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst,
  522. int32_t dst_stride) {
  523. int32_t i;
  524. DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
  525. int16_t *out_ptr = out_arr;
  526. /* transform rows */
  527. for (i = 0; i < 4; ++i) {
  528. /* process 32 * 8 block */
  529. idct32x8_1d_rows_msa((input + (i << 8)), (out_ptr + (i << 8)));
  530. }
  531. /* transform columns */
  532. for (i = 0; i < 4; ++i) {
  533. /* process 8 * 32 block */
  534. idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
  535. dst_stride);
  536. }
  537. }
  538. void vpx_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst,
  539. int32_t dst_stride) {
  540. int32_t i;
  541. DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
  542. int16_t *out_ptr = out_arr;
  543. for (i = 32; i--;) {
  544. __asm__ __volatile__(
  545. "sw $zero, 0(%[out_ptr]) \n\t"
  546. "sw $zero, 4(%[out_ptr]) \n\t"
  547. "sw $zero, 8(%[out_ptr]) \n\t"
  548. "sw $zero, 12(%[out_ptr]) \n\t"
  549. "sw $zero, 16(%[out_ptr]) \n\t"
  550. "sw $zero, 20(%[out_ptr]) \n\t"
  551. "sw $zero, 24(%[out_ptr]) \n\t"
  552. "sw $zero, 28(%[out_ptr]) \n\t"
  553. "sw $zero, 32(%[out_ptr]) \n\t"
  554. "sw $zero, 36(%[out_ptr]) \n\t"
  555. "sw $zero, 40(%[out_ptr]) \n\t"
  556. "sw $zero, 44(%[out_ptr]) \n\t"
  557. "sw $zero, 48(%[out_ptr]) \n\t"
  558. "sw $zero, 52(%[out_ptr]) \n\t"
  559. "sw $zero, 56(%[out_ptr]) \n\t"
  560. "sw $zero, 60(%[out_ptr]) \n\t"
  561. :
  562. : [out_ptr] "r"(out_ptr));
  563. out_ptr += 32;
  564. }
  565. out_ptr = out_arr;
  566. /* rows: only upper-left 8x8 has non-zero coeff */
  567. idct32x8_1d_rows_msa(input, out_ptr);
  568. /* transform columns */
  569. for (i = 0; i < 4; ++i) {
  570. /* process 8 * 32 block */
  571. idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
  572. dst_stride);
  573. }
  574. }
  575. void vpx_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst,
  576. int32_t dst_stride) {
  577. int32_t i;
  578. int16_t out;
  579. v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
  580. v8i16 res0, res1, res2, res3, res4, res5, res6, res7, vec;
  581. out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
  582. out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
  583. out = ROUND_POWER_OF_TWO(out, 6);
  584. vec = __msa_fill_h(out);
  585. for (i = 16; i--;) {
  586. LD_UB2(dst, 16, dst0, dst1);
  587. LD_UB2(dst + dst_stride, 16, dst2, dst3);
  588. UNPCK_UB_SH(dst0, res0, res4);
  589. UNPCK_UB_SH(dst1, res1, res5);
  590. UNPCK_UB_SH(dst2, res2, res6);
  591. UNPCK_UB_SH(dst3, res3, res7);
  592. ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
  593. ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7);
  594. CLIP_SH4_0_255(res0, res1, res2, res3);
  595. CLIP_SH4_0_255(res4, res5, res6, res7);
  596. PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, tmp0, tmp1,
  597. tmp2, tmp3);
  598. ST_UB2(tmp0, tmp1, dst, 16);
  599. dst += dst_stride;
  600. ST_UB2(tmp2, tmp3, dst, 16);
  601. dst += dst_stride;
  602. }
  603. }