inv_txfm_ssse3.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. /*
  2. * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <tmmintrin.h>
  11. #include "./vpx_dsp_rtcd.h"
  12. #include "vpx_dsp/x86/inv_txfm_sse2.h"
  13. #include "vpx_dsp/x86/inv_txfm_ssse3.h"
  14. #include "vpx_dsp/x86/transpose_sse2.h"
  15. #include "vpx_dsp/x86/txfm_common_sse2.h"
  16. static INLINE void partial_butterfly_ssse3(const __m128i in, const int c0,
  17. const int c1, __m128i *const out0,
  18. __m128i *const out1) {
  19. const __m128i cst0 = _mm_set1_epi16(2 * c0);
  20. const __m128i cst1 = _mm_set1_epi16(2 * c1);
  21. *out0 = _mm_mulhrs_epi16(in, cst0);
  22. *out1 = _mm_mulhrs_epi16(in, cst1);
  23. }
  24. static INLINE __m128i partial_butterfly_cospi16_ssse3(const __m128i in) {
  25. const __m128i coef_pair = _mm_set1_epi16(2 * cospi_16_64);
  26. return _mm_mulhrs_epi16(in, coef_pair);
  27. }
  28. void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
  29. int stride) {
  30. __m128i io[8];
  31. io[0] = load_input_data4(input + 0 * 8);
  32. io[1] = load_input_data4(input + 1 * 8);
  33. io[2] = load_input_data4(input + 2 * 8);
  34. io[3] = load_input_data4(input + 3 * 8);
  35. idct8x8_12_add_kernel_ssse3(io);
  36. write_buffer_8x8(io, dest, stride);
  37. }
  38. // Group the coefficient calculation into smaller functions to prevent stack
  39. // spillover in 32x32 idct optimizations:
  40. // quarter_1: 0-7
  41. // quarter_2: 8-15
  42. // quarter_3_4: 16-23, 24-31
  43. // For each 8x32 block __m128i in[32],
  44. // Input with index, 0, 4
  45. // output pixels: 0-7 in __m128i out[32]
  46. static INLINE void idct32_34_8x32_quarter_1(const __m128i *const in /*in[32]*/,
  47. __m128i *const out /*out[8]*/) {
  48. __m128i step1[8], step2[8];
  49. // stage 3
  50. partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
  51. // stage 4
  52. step2[0] = partial_butterfly_cospi16_ssse3(in[0]);
  53. step2[4] = step1[4];
  54. step2[5] = step1[4];
  55. step2[6] = step1[7];
  56. step2[7] = step1[7];
  57. // stage 5
  58. step1[0] = step2[0];
  59. step1[1] = step2[0];
  60. step1[2] = step2[0];
  61. step1[3] = step2[0];
  62. step1[4] = step2[4];
  63. butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
  64. step1[7] = step2[7];
  65. // stage 6
  66. out[0] = _mm_add_epi16(step1[0], step1[7]);
  67. out[1] = _mm_add_epi16(step1[1], step1[6]);
  68. out[2] = _mm_add_epi16(step1[2], step1[5]);
  69. out[3] = _mm_add_epi16(step1[3], step1[4]);
  70. out[4] = _mm_sub_epi16(step1[3], step1[4]);
  71. out[5] = _mm_sub_epi16(step1[2], step1[5]);
  72. out[6] = _mm_sub_epi16(step1[1], step1[6]);
  73. out[7] = _mm_sub_epi16(step1[0], step1[7]);
  74. }
  75. // For each 8x32 block __m128i in[32],
  76. // Input with index, 2, 6
  77. // output pixels: 8-15 in __m128i out[32]
  78. static INLINE void idct32_34_8x32_quarter_2(const __m128i *const in /*in[32]*/,
  79. __m128i *const out /*out[16]*/) {
  80. __m128i step1[16], step2[16];
  81. // stage 2
  82. partial_butterfly_ssse3(in[2], cospi_30_64, cospi_2_64, &step2[8],
  83. &step2[15]);
  84. partial_butterfly_ssse3(in[6], -cospi_26_64, cospi_6_64, &step2[11],
  85. &step2[12]);
  86. // stage 3
  87. step1[8] = step2[8];
  88. step1[9] = step2[8];
  89. step1[14] = step2[15];
  90. step1[15] = step2[15];
  91. step1[10] = step2[11];
  92. step1[11] = step2[11];
  93. step1[12] = step2[12];
  94. step1[13] = step2[12];
  95. idct32_8x32_quarter_2_stage_4_to_6(step1, out);
  96. }
  97. static INLINE void idct32_34_8x32_quarter_1_2(
  98. const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
  99. __m128i temp[16];
  100. idct32_34_8x32_quarter_1(in, temp);
  101. idct32_34_8x32_quarter_2(in, temp);
  102. // stage 7
  103. add_sub_butterfly(temp, out, 16);
  104. }
  105. // For each 8x32 block __m128i in[32],
  106. // Input with odd index, 1, 3, 5, 7
  107. // output pixels: 16-23, 24-31 in __m128i out[32]
  108. static INLINE void idct32_34_8x32_quarter_3_4(
  109. const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
  110. __m128i step1[32];
  111. // stage 1
  112. partial_butterfly_ssse3(in[1], cospi_31_64, cospi_1_64, &step1[16],
  113. &step1[31]);
  114. partial_butterfly_ssse3(in[7], -cospi_25_64, cospi_7_64, &step1[19],
  115. &step1[28]);
  116. partial_butterfly_ssse3(in[5], cospi_27_64, cospi_5_64, &step1[20],
  117. &step1[27]);
  118. partial_butterfly_ssse3(in[3], -cospi_29_64, cospi_3_64, &step1[23],
  119. &step1[24]);
  120. // stage 3
  121. butterfly(step1[31], step1[16], cospi_28_64, cospi_4_64, &step1[17],
  122. &step1[30]);
  123. butterfly(step1[28], step1[19], -cospi_4_64, cospi_28_64, &step1[18],
  124. &step1[29]);
  125. butterfly(step1[27], step1[20], cospi_12_64, cospi_20_64, &step1[21],
  126. &step1[26]);
  127. butterfly(step1[24], step1[23], -cospi_20_64, cospi_12_64, &step1[22],
  128. &step1[25]);
  129. idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
  130. }
  131. void idct32_34_8x32_ssse3(const __m128i *const in /*in[32]*/,
  132. __m128i *const out /*out[32]*/) {
  133. __m128i temp[32];
  134. idct32_34_8x32_quarter_1_2(in, temp);
  135. idct32_34_8x32_quarter_3_4(in, temp);
  136. // final stage
  137. add_sub_butterfly(temp, out, 32);
  138. }
  139. // Only upper-left 8x8 has non-zero coeff
  140. void vpx_idct32x32_34_add_ssse3(const tran_low_t *input, uint8_t *dest,
  141. int stride) {
  142. __m128i io[32], col[32];
  143. int i;
  144. // Load input data. Only need to load the top left 8x8 block.
  145. load_transpose_16bit_8x8(input, 32, io);
  146. idct32_34_8x32_ssse3(io, col);
  147. for (i = 0; i < 32; i += 8) {
  148. int j;
  149. transpose_16bit_8x8(col + i, io);
  150. idct32_34_8x32_ssse3(io, io);
  151. for (j = 0; j < 32; ++j) {
  152. write_buffer_8x1(dest + j * stride, io[j]);
  153. }
  154. dest += 8;
  155. }
  156. }
  157. // For each 8x32 block __m128i in[32],
  158. // Input with index, 0, 4, 8, 12
  159. // output pixels: 0-7 in __m128i out[32]
  160. static INLINE void idct32_135_8x32_quarter_1(const __m128i *const in /*in[32]*/,
  161. __m128i *const out /*out[8]*/) {
  162. __m128i step1[8], step2[8];
  163. // stage 3
  164. partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
  165. partial_butterfly_ssse3(in[12], -cospi_20_64, cospi_12_64, &step1[5],
  166. &step1[6]);
  167. // stage 4
  168. step2[0] = partial_butterfly_cospi16_ssse3(in[0]);
  169. partial_butterfly_ssse3(in[8], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
  170. step2[4] = _mm_add_epi16(step1[4], step1[5]);
  171. step2[5] = _mm_sub_epi16(step1[4], step1[5]);
  172. step2[6] = _mm_sub_epi16(step1[7], step1[6]);
  173. step2[7] = _mm_add_epi16(step1[7], step1[6]);
  174. // stage 5
  175. step1[0] = _mm_add_epi16(step2[0], step2[3]);
  176. step1[1] = _mm_add_epi16(step2[0], step2[2]);
  177. step1[2] = _mm_sub_epi16(step2[0], step2[2]);
  178. step1[3] = _mm_sub_epi16(step2[0], step2[3]);
  179. step1[4] = step2[4];
  180. butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
  181. step1[7] = step2[7];
  182. // stage 6
  183. out[0] = _mm_add_epi16(step1[0], step1[7]);
  184. out[1] = _mm_add_epi16(step1[1], step1[6]);
  185. out[2] = _mm_add_epi16(step1[2], step1[5]);
  186. out[3] = _mm_add_epi16(step1[3], step1[4]);
  187. out[4] = _mm_sub_epi16(step1[3], step1[4]);
  188. out[5] = _mm_sub_epi16(step1[2], step1[5]);
  189. out[6] = _mm_sub_epi16(step1[1], step1[6]);
  190. out[7] = _mm_sub_epi16(step1[0], step1[7]);
  191. }
  192. // For each 8x32 block __m128i in[32],
  193. // Input with index, 2, 6, 10, 14
  194. // output pixels: 8-15 in __m128i out[32]
  195. static INLINE void idct32_135_8x32_quarter_2(const __m128i *const in /*in[32]*/,
  196. __m128i *const out /*out[16]*/) {
  197. __m128i step1[16], step2[16];
  198. // stage 2
  199. partial_butterfly_ssse3(in[2], cospi_30_64, cospi_2_64, &step2[8],
  200. &step2[15]);
  201. partial_butterfly_ssse3(in[14], -cospi_18_64, cospi_14_64, &step2[9],
  202. &step2[14]);
  203. partial_butterfly_ssse3(in[10], cospi_22_64, cospi_10_64, &step2[10],
  204. &step2[13]);
  205. partial_butterfly_ssse3(in[6], -cospi_26_64, cospi_6_64, &step2[11],
  206. &step2[12]);
  207. // stage 3
  208. step1[8] = _mm_add_epi16(step2[8], step2[9]);
  209. step1[9] = _mm_sub_epi16(step2[8], step2[9]);
  210. step1[10] = _mm_sub_epi16(step2[11], step2[10]);
  211. step1[11] = _mm_add_epi16(step2[11], step2[10]);
  212. step1[12] = _mm_add_epi16(step2[12], step2[13]);
  213. step1[13] = _mm_sub_epi16(step2[12], step2[13]);
  214. step1[14] = _mm_sub_epi16(step2[15], step2[14]);
  215. step1[15] = _mm_add_epi16(step2[15], step2[14]);
  216. idct32_8x32_quarter_2_stage_4_to_6(step1, out);
  217. }
  218. static INLINE void idct32_135_8x32_quarter_1_2(
  219. const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
  220. __m128i temp[16];
  221. idct32_135_8x32_quarter_1(in, temp);
  222. idct32_135_8x32_quarter_2(in, temp);
  223. // stage 7
  224. add_sub_butterfly(temp, out, 16);
  225. }
  226. // For each 8x32 block __m128i in[32],
  227. // Input with odd index,
  228. // 1, 3, 5, 7, 9, 11, 13, 15
  229. // output pixels: 16-23, 24-31 in __m128i out[32]
  230. static INLINE void idct32_135_8x32_quarter_3_4(
  231. const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
  232. __m128i step1[32], step2[32];
  233. // stage 1
  234. partial_butterfly_ssse3(in[1], cospi_31_64, cospi_1_64, &step1[16],
  235. &step1[31]);
  236. partial_butterfly_ssse3(in[15], -cospi_17_64, cospi_15_64, &step1[17],
  237. &step1[30]);
  238. partial_butterfly_ssse3(in[9], cospi_23_64, cospi_9_64, &step1[18],
  239. &step1[29]);
  240. partial_butterfly_ssse3(in[7], -cospi_25_64, cospi_7_64, &step1[19],
  241. &step1[28]);
  242. partial_butterfly_ssse3(in[5], cospi_27_64, cospi_5_64, &step1[20],
  243. &step1[27]);
  244. partial_butterfly_ssse3(in[11], -cospi_21_64, cospi_11_64, &step1[21],
  245. &step1[26]);
  246. partial_butterfly_ssse3(in[13], cospi_19_64, cospi_13_64, &step1[22],
  247. &step1[25]);
  248. partial_butterfly_ssse3(in[3], -cospi_29_64, cospi_3_64, &step1[23],
  249. &step1[24]);
  250. // stage 2
  251. step2[16] = _mm_add_epi16(step1[16], step1[17]);
  252. step2[17] = _mm_sub_epi16(step1[16], step1[17]);
  253. step2[18] = _mm_sub_epi16(step1[19], step1[18]);
  254. step2[19] = _mm_add_epi16(step1[19], step1[18]);
  255. step2[20] = _mm_add_epi16(step1[20], step1[21]);
  256. step2[21] = _mm_sub_epi16(step1[20], step1[21]);
  257. step2[22] = _mm_sub_epi16(step1[23], step1[22]);
  258. step2[23] = _mm_add_epi16(step1[23], step1[22]);
  259. step2[24] = _mm_add_epi16(step1[24], step1[25]);
  260. step2[25] = _mm_sub_epi16(step1[24], step1[25]);
  261. step2[26] = _mm_sub_epi16(step1[27], step1[26]);
  262. step2[27] = _mm_add_epi16(step1[27], step1[26]);
  263. step2[28] = _mm_add_epi16(step1[28], step1[29]);
  264. step2[29] = _mm_sub_epi16(step1[28], step1[29]);
  265. step2[30] = _mm_sub_epi16(step1[31], step1[30]);
  266. step2[31] = _mm_add_epi16(step1[31], step1[30]);
  267. // stage 3
  268. step1[16] = step2[16];
  269. step1[31] = step2[31];
  270. butterfly(step2[30], step2[17], cospi_28_64, cospi_4_64, &step1[17],
  271. &step1[30]);
  272. butterfly(step2[29], step2[18], -cospi_4_64, cospi_28_64, &step1[18],
  273. &step1[29]);
  274. step1[19] = step2[19];
  275. step1[20] = step2[20];
  276. butterfly(step2[26], step2[21], cospi_12_64, cospi_20_64, &step1[21],
  277. &step1[26]);
  278. butterfly(step2[25], step2[22], -cospi_20_64, cospi_12_64, &step1[22],
  279. &step1[25]);
  280. step1[23] = step2[23];
  281. step1[24] = step2[24];
  282. step1[27] = step2[27];
  283. step1[28] = step2[28];
  284. idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
  285. }
  286. void idct32_135_8x32_ssse3(const __m128i *const in /*in[32]*/,
  287. __m128i *const out /*out[32]*/) {
  288. __m128i temp[32];
  289. idct32_135_8x32_quarter_1_2(in, temp);
  290. idct32_135_8x32_quarter_3_4(in, temp);
  291. // final stage
  292. add_sub_butterfly(temp, out, 32);
  293. }
  294. void vpx_idct32x32_135_add_ssse3(const tran_low_t *input, uint8_t *dest,
  295. int stride) {
  296. __m128i col[2][32], io[32];
  297. int i;
  298. // rows
  299. for (i = 0; i < 2; i++) {
  300. load_transpose_16bit_8x8(&input[0], 32, &io[0]);
  301. load_transpose_16bit_8x8(&input[8], 32, &io[8]);
  302. idct32_135_8x32_ssse3(io, col[i]);
  303. input += 32 << 3;
  304. }
  305. // columns
  306. for (i = 0; i < 32; i += 8) {
  307. transpose_16bit_8x8(col[0] + i, io);
  308. transpose_16bit_8x8(col[1] + i, io + 8);
  309. idct32_135_8x32_ssse3(io, io);
  310. store_buffer_8x32(io, dest, stride);
  311. dest += 8;
  312. }
  313. }