highbd_idct8x8_add_sse2.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /*
  2. * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <emmintrin.h> // SSE2
  11. #include "./vpx_dsp_rtcd.h"
  12. #include "vpx_dsp/x86/highbd_inv_txfm_sse2.h"
  13. #include "vpx_dsp/x86/inv_txfm_sse2.h"
  14. #include "vpx_dsp/x86/transpose_sse2.h"
  15. static void highbd_idct8x8_half1d(__m128i *const io) {
  16. __m128i step1[8], step2[8];
  17. transpose_32bit_4x4x2(io, io);
  18. // stage 1
  19. step1[0] = io[0];
  20. step1[2] = io[4];
  21. step1[1] = io[2];
  22. step1[3] = io[6];
  23. highbd_butterfly_sse2(io[1], io[7], cospi_28_64, cospi_4_64, &step1[4],
  24. &step1[7]);
  25. highbd_butterfly_sse2(io[5], io[3], cospi_12_64, cospi_20_64, &step1[5],
  26. &step1[6]);
  27. // stage 2
  28. highbd_butterfly_cospi16_sse2(step1[0], step1[2], &step2[0], &step2[1]);
  29. highbd_butterfly_sse2(step1[1], step1[3], cospi_24_64, cospi_8_64, &step2[2],
  30. &step2[3]);
  31. step2[4] = _mm_add_epi32(step1[4], step1[5]);
  32. step2[5] = _mm_sub_epi32(step1[4], step1[5]);
  33. step2[6] = _mm_sub_epi32(step1[7], step1[6]);
  34. step2[7] = _mm_add_epi32(step1[7], step1[6]);
  35. // stage 3
  36. step1[0] = _mm_add_epi32(step2[0], step2[3]);
  37. step1[1] = _mm_add_epi32(step2[1], step2[2]);
  38. step1[2] = _mm_sub_epi32(step2[1], step2[2]);
  39. step1[3] = _mm_sub_epi32(step2[0], step2[3]);
  40. step1[4] = step2[4];
  41. highbd_butterfly_cospi16_sse2(step2[6], step2[5], &step1[6], &step1[5]);
  42. step1[7] = step2[7];
  43. // stage 4
  44. highbd_idct8_stage4(step1, io);
  45. }
  46. static void highbd_idct8x8_12_half1d(__m128i *const io) {
  47. __m128i temp1[4], sign[2], step1[8], step2[8];
  48. transpose_32bit_4x4(io, io);
  49. // stage 1
  50. step1[0] = io[0];
  51. step1[1] = io[2];
  52. abs_extend_64bit_sse2(io[1], temp1, sign);
  53. step1[4] = multiplication_round_shift_sse2(temp1, sign, cospi_28_64);
  54. step1[7] = multiplication_round_shift_sse2(temp1, sign, cospi_4_64);
  55. abs_extend_64bit_sse2(io[3], temp1, sign);
  56. step1[5] = multiplication_neg_round_shift_sse2(temp1, sign, cospi_20_64);
  57. step1[6] = multiplication_round_shift_sse2(temp1, sign, cospi_12_64);
  58. // stage 2
  59. abs_extend_64bit_sse2(step1[0], temp1, sign);
  60. step2[0] = multiplication_round_shift_sse2(temp1, sign, cospi_16_64);
  61. abs_extend_64bit_sse2(step1[1], temp1, sign);
  62. step2[2] = multiplication_round_shift_sse2(temp1, sign, cospi_24_64);
  63. step2[3] = multiplication_round_shift_sse2(temp1, sign, cospi_8_64);
  64. step2[4] = _mm_add_epi32(step1[4], step1[5]);
  65. step2[5] = _mm_sub_epi32(step1[4], step1[5]);
  66. step2[6] = _mm_sub_epi32(step1[7], step1[6]);
  67. step2[7] = _mm_add_epi32(step1[7], step1[6]);
  68. // stage 3
  69. step1[0] = _mm_add_epi32(step2[0], step2[3]);
  70. step1[1] = _mm_add_epi32(step2[0], step2[2]);
  71. step1[2] = _mm_sub_epi32(step2[0], step2[2]);
  72. step1[3] = _mm_sub_epi32(step2[0], step2[3]);
  73. step1[4] = step2[4];
  74. highbd_butterfly_cospi16_sse2(step2[6], step2[5], &step1[6], &step1[5]);
  75. step1[7] = step2[7];
  76. // stage 4
  77. highbd_idct8_stage4(step1, io);
  78. }
  79. void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint16_t *dest,
  80. int stride, int bd) {
  81. __m128i io[16];
  82. io[0] = _mm_load_si128((const __m128i *)(input + 0 * 8 + 0));
  83. io[4] = _mm_load_si128((const __m128i *)(input + 0 * 8 + 4));
  84. io[1] = _mm_load_si128((const __m128i *)(input + 1 * 8 + 0));
  85. io[5] = _mm_load_si128((const __m128i *)(input + 1 * 8 + 4));
  86. io[2] = _mm_load_si128((const __m128i *)(input + 2 * 8 + 0));
  87. io[6] = _mm_load_si128((const __m128i *)(input + 2 * 8 + 4));
  88. io[3] = _mm_load_si128((const __m128i *)(input + 3 * 8 + 0));
  89. io[7] = _mm_load_si128((const __m128i *)(input + 3 * 8 + 4));
  90. if (bd == 8) {
  91. __m128i io_short[8];
  92. io_short[0] = _mm_packs_epi32(io[0], io[4]);
  93. io_short[1] = _mm_packs_epi32(io[1], io[5]);
  94. io_short[2] = _mm_packs_epi32(io[2], io[6]);
  95. io_short[3] = _mm_packs_epi32(io[3], io[7]);
  96. io[8] = _mm_load_si128((const __m128i *)(input + 4 * 8 + 0));
  97. io[12] = _mm_load_si128((const __m128i *)(input + 4 * 8 + 4));
  98. io[9] = _mm_load_si128((const __m128i *)(input + 5 * 8 + 0));
  99. io[13] = _mm_load_si128((const __m128i *)(input + 5 * 8 + 4));
  100. io[10] = _mm_load_si128((const __m128i *)(input + 6 * 8 + 0));
  101. io[14] = _mm_load_si128((const __m128i *)(input + 6 * 8 + 4));
  102. io[11] = _mm_load_si128((const __m128i *)(input + 7 * 8 + 0));
  103. io[15] = _mm_load_si128((const __m128i *)(input + 7 * 8 + 4));
  104. io_short[4] = _mm_packs_epi32(io[8], io[12]);
  105. io_short[5] = _mm_packs_epi32(io[9], io[13]);
  106. io_short[6] = _mm_packs_epi32(io[10], io[14]);
  107. io_short[7] = _mm_packs_epi32(io[11], io[15]);
  108. vpx_idct8_sse2(io_short);
  109. vpx_idct8_sse2(io_short);
  110. round_shift_8x8(io_short, io);
  111. } else {
  112. __m128i temp[4];
  113. highbd_idct8x8_half1d(io);
  114. io[8] = _mm_load_si128((const __m128i *)(input + 4 * 8 + 0));
  115. io[12] = _mm_load_si128((const __m128i *)(input + 4 * 8 + 4));
  116. io[9] = _mm_load_si128((const __m128i *)(input + 5 * 8 + 0));
  117. io[13] = _mm_load_si128((const __m128i *)(input + 5 * 8 + 4));
  118. io[10] = _mm_load_si128((const __m128i *)(input + 6 * 8 + 0));
  119. io[14] = _mm_load_si128((const __m128i *)(input + 6 * 8 + 4));
  120. io[11] = _mm_load_si128((const __m128i *)(input + 7 * 8 + 0));
  121. io[15] = _mm_load_si128((const __m128i *)(input + 7 * 8 + 4));
  122. highbd_idct8x8_half1d(&io[8]);
  123. temp[0] = io[4];
  124. temp[1] = io[5];
  125. temp[2] = io[6];
  126. temp[3] = io[7];
  127. io[4] = io[8];
  128. io[5] = io[9];
  129. io[6] = io[10];
  130. io[7] = io[11];
  131. highbd_idct8x8_half1d(io);
  132. io[8] = temp[0];
  133. io[9] = temp[1];
  134. io[10] = temp[2];
  135. io[11] = temp[3];
  136. highbd_idct8x8_half1d(&io[8]);
  137. highbd_idct8x8_final_round(io);
  138. }
  139. recon_and_store_8x8(io, dest, stride, bd);
  140. }
  141. void vpx_highbd_idct8x8_12_add_sse2(const tran_low_t *input, uint16_t *dest,
  142. int stride, int bd) {
  143. const __m128i zero = _mm_setzero_si128();
  144. __m128i io[16];
  145. io[0] = _mm_load_si128((const __m128i *)(input + 0 * 8 + 0));
  146. io[1] = _mm_load_si128((const __m128i *)(input + 1 * 8 + 0));
  147. io[2] = _mm_load_si128((const __m128i *)(input + 2 * 8 + 0));
  148. io[3] = _mm_load_si128((const __m128i *)(input + 3 * 8 + 0));
  149. if (bd == 8) {
  150. __m128i io_short[8];
  151. io_short[0] = _mm_packs_epi32(io[0], zero);
  152. io_short[1] = _mm_packs_epi32(io[1], zero);
  153. io_short[2] = _mm_packs_epi32(io[2], zero);
  154. io_short[3] = _mm_packs_epi32(io[3], zero);
  155. idct8x8_12_add_kernel_sse2(io_short);
  156. round_shift_8x8(io_short, io);
  157. } else {
  158. __m128i temp[4];
  159. highbd_idct8x8_12_half1d(io);
  160. temp[0] = io[4];
  161. temp[1] = io[5];
  162. temp[2] = io[6];
  163. temp[3] = io[7];
  164. highbd_idct8x8_12_half1d(io);
  165. io[8] = temp[0];
  166. io[9] = temp[1];
  167. io[10] = temp[2];
  168. io[11] = temp[3];
  169. highbd_idct8x8_12_half1d(&io[8]);
  170. highbd_idct8x8_final_round(io);
  171. }
  172. recon_and_store_8x8(io, dest, stride, bd);
  173. }
  174. void vpx_highbd_idct8x8_1_add_sse2(const tran_low_t *input, uint16_t *dest,
  175. int stride, int bd) {
  176. highbd_idct_1_add_kernel(input, dest, stride, bd, 8);
  177. }