highbd_idct4x4_add_sse2.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /*
  2. * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <emmintrin.h> // SSE2
  11. #include "./vpx_dsp_rtcd.h"
  12. #include "vpx_dsp/x86/highbd_inv_txfm_sse2.h"
  13. #include "vpx_dsp/x86/inv_txfm_sse2.h"
  14. #include "vpx_dsp/x86/transpose_sse2.h"
  15. static INLINE __m128i dct_const_round_shift_4_sse2(const __m128i in0,
  16. const __m128i in1) {
  17. const __m128i t0 = _mm_unpacklo_epi32(in0, in1); // 0, 1
  18. const __m128i t1 = _mm_unpackhi_epi32(in0, in1); // 2, 3
  19. const __m128i t2 = _mm_unpacklo_epi64(t0, t1); // 0, 1, 2, 3
  20. return dct_const_round_shift_sse2(t2);
  21. }
  22. static INLINE void highbd_idct4_small_sse2(__m128i *const io) {
  23. const __m128i cospi_p16_p16 = _mm_setr_epi32(cospi_16_64, 0, cospi_16_64, 0);
  24. const __m128i cospi_p08_p08 = _mm_setr_epi32(cospi_8_64, 0, cospi_8_64, 0);
  25. const __m128i cospi_p24_p24 = _mm_setr_epi32(cospi_24_64, 0, cospi_24_64, 0);
  26. __m128i temp1[4], temp2[4], step[4];
  27. transpose_32bit_4x4(io, io);
  28. // Note: There is no 32-bit signed multiply SIMD instruction in SSE2.
  29. // _mm_mul_epu32() is used which can only guarantee the lower 32-bit
  30. // (signed) result is meaningful, which is enough in this function.
  31. // stage 1
  32. temp1[0] = _mm_add_epi32(io[0], io[2]); // input[0] + input[2]
  33. temp2[0] = _mm_sub_epi32(io[0], io[2]); // input[0] - input[2]
  34. temp1[1] = _mm_srli_si128(temp1[0], 4); // 1, 3
  35. temp2[1] = _mm_srli_si128(temp2[0], 4); // 1, 3
  36. temp1[0] = _mm_mul_epu32(temp1[0], cospi_p16_p16); // ([0] + [2])*cospi_16_64
  37. temp1[1] = _mm_mul_epu32(temp1[1], cospi_p16_p16); // ([0] + [2])*cospi_16_64
  38. temp2[0] = _mm_mul_epu32(temp2[0], cospi_p16_p16); // ([0] - [2])*cospi_16_64
  39. temp2[1] = _mm_mul_epu32(temp2[1], cospi_p16_p16); // ([0] - [2])*cospi_16_64
  40. step[0] = dct_const_round_shift_4_sse2(temp1[0], temp1[1]);
  41. step[1] = dct_const_round_shift_4_sse2(temp2[0], temp2[1]);
  42. temp1[3] = _mm_srli_si128(io[1], 4);
  43. temp2[3] = _mm_srli_si128(io[3], 4);
  44. temp1[0] = _mm_mul_epu32(io[1], cospi_p24_p24); // input[1] * cospi_24_64
  45. temp1[1] = _mm_mul_epu32(temp1[3], cospi_p24_p24); // input[1] * cospi_24_64
  46. temp2[0] = _mm_mul_epu32(io[1], cospi_p08_p08); // input[1] * cospi_8_64
  47. temp2[1] = _mm_mul_epu32(temp1[3], cospi_p08_p08); // input[1] * cospi_8_64
  48. temp1[2] = _mm_mul_epu32(io[3], cospi_p08_p08); // input[3] * cospi_8_64
  49. temp1[3] = _mm_mul_epu32(temp2[3], cospi_p08_p08); // input[3] * cospi_8_64
  50. temp2[2] = _mm_mul_epu32(io[3], cospi_p24_p24); // input[3] * cospi_24_64
  51. temp2[3] = _mm_mul_epu32(temp2[3], cospi_p24_p24); // input[3] * cospi_24_64
  52. temp1[0] = _mm_sub_epi64(temp1[0], temp1[2]); // [1]*cospi_24 - [3]*cospi_8
  53. temp1[1] = _mm_sub_epi64(temp1[1], temp1[3]); // [1]*cospi_24 - [3]*cospi_8
  54. temp2[0] = _mm_add_epi64(temp2[0], temp2[2]); // [1]*cospi_8 + [3]*cospi_24
  55. temp2[1] = _mm_add_epi64(temp2[1], temp2[3]); // [1]*cospi_8 + [3]*cospi_24
  56. step[2] = dct_const_round_shift_4_sse2(temp1[0], temp1[1]);
  57. step[3] = dct_const_round_shift_4_sse2(temp2[0], temp2[1]);
  58. // stage 2
  59. io[0] = _mm_add_epi32(step[0], step[3]); // step[0] + step[3]
  60. io[1] = _mm_add_epi32(step[1], step[2]); // step[1] + step[2]
  61. io[2] = _mm_sub_epi32(step[1], step[2]); // step[1] - step[2]
  62. io[3] = _mm_sub_epi32(step[0], step[3]); // step[0] - step[3]
  63. }
  64. static INLINE void highbd_idct4_large_sse2(__m128i *const io) {
  65. __m128i step[4];
  66. transpose_32bit_4x4(io, io);
  67. // stage 1
  68. highbd_butterfly_cospi16_sse2(io[0], io[2], &step[0], &step[1]);
  69. highbd_butterfly_sse2(io[1], io[3], cospi_24_64, cospi_8_64, &step[2],
  70. &step[3]);
  71. // stage 2
  72. io[0] = _mm_add_epi32(step[0], step[3]); // step[0] + step[3]
  73. io[1] = _mm_add_epi32(step[1], step[2]); // step[1] + step[2]
  74. io[2] = _mm_sub_epi32(step[1], step[2]); // step[1] - step[2]
  75. io[3] = _mm_sub_epi32(step[0], step[3]); // step[0] - step[3]
  76. }
  77. void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint16_t *dest,
  78. int stride, int bd) {
  79. int16_t max = 0, min = 0;
  80. __m128i io[4], io_short[2];
  81. io[0] = _mm_load_si128((const __m128i *)(input + 0));
  82. io[1] = _mm_load_si128((const __m128i *)(input + 4));
  83. io[2] = _mm_load_si128((const __m128i *)(input + 8));
  84. io[3] = _mm_load_si128((const __m128i *)(input + 12));
  85. io_short[0] = _mm_packs_epi32(io[0], io[1]);
  86. io_short[1] = _mm_packs_epi32(io[2], io[3]);
  87. if (bd != 8) {
  88. __m128i max_input, min_input;
  89. max_input = _mm_max_epi16(io_short[0], io_short[1]);
  90. min_input = _mm_min_epi16(io_short[0], io_short[1]);
  91. max_input = _mm_max_epi16(max_input, _mm_srli_si128(max_input, 8));
  92. min_input = _mm_min_epi16(min_input, _mm_srli_si128(min_input, 8));
  93. max_input = _mm_max_epi16(max_input, _mm_srli_si128(max_input, 4));
  94. min_input = _mm_min_epi16(min_input, _mm_srli_si128(min_input, 4));
  95. max_input = _mm_max_epi16(max_input, _mm_srli_si128(max_input, 2));
  96. min_input = _mm_min_epi16(min_input, _mm_srli_si128(min_input, 2));
  97. max = _mm_extract_epi16(max_input, 0);
  98. min = _mm_extract_epi16(min_input, 0);
  99. }
  100. if (bd == 8 || (max < 4096 && min >= -4096)) {
  101. idct4_sse2(io_short);
  102. idct4_sse2(io_short);
  103. io_short[0] = _mm_add_epi16(io_short[0], _mm_set1_epi16(8));
  104. io_short[1] = _mm_add_epi16(io_short[1], _mm_set1_epi16(8));
  105. io[0] = _mm_srai_epi16(io_short[0], 4);
  106. io[1] = _mm_srai_epi16(io_short[1], 4);
  107. } else {
  108. if (max < 32767 && min > -32768) {
  109. highbd_idct4_small_sse2(io);
  110. highbd_idct4_small_sse2(io);
  111. } else {
  112. highbd_idct4_large_sse2(io);
  113. highbd_idct4_large_sse2(io);
  114. }
  115. io[0] = wraplow_16bit_shift4(io[0], io[1], _mm_set1_epi32(8));
  116. io[1] = wraplow_16bit_shift4(io[2], io[3], _mm_set1_epi32(8));
  117. }
  118. recon_and_store_4x4(io, dest, stride, bd);
  119. }
  120. void vpx_highbd_idct4x4_1_add_sse2(const tran_low_t *input, uint16_t *dest,
  121. int stride, int bd) {
  122. int a1, i;
  123. tran_low_t out;
  124. __m128i dc, d;
  125. out = HIGHBD_WRAPLOW(
  126. dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
  127. out =
  128. HIGHBD_WRAPLOW(dct_const_round_shift(out * (tran_high_t)cospi_16_64), bd);
  129. a1 = ROUND_POWER_OF_TWO(out, 4);
  130. dc = _mm_set1_epi16(a1);
  131. for (i = 0; i < 4; ++i) {
  132. d = _mm_loadl_epi64((const __m128i *)dest);
  133. d = add_clamp(d, dc, bd);
  134. _mm_storel_epi64((__m128i *)dest, d);
  135. dest += stride;
  136. }
  137. }