convolve_ssse3.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. /*
  2. * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #ifndef VPX_VPX_DSP_X86_CONVOLVE_SSSE3_H_
  11. #define VPX_VPX_DSP_X86_CONVOLVE_SSSE3_H_
  12. #include <assert.h>
  13. #include <tmmintrin.h> // SSSE3
  14. #include "./vpx_config.h"
  15. static INLINE void shuffle_filter_ssse3(const int16_t *const filter,
  16. __m128i *const f) {
  17. const __m128i f_values = _mm_load_si128((const __m128i *)filter);
  18. // pack and duplicate the filter values
  19. f[0] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0200u));
  20. f[1] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0604u));
  21. f[2] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0a08u));
  22. f[3] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0e0cu));
  23. }
  24. static INLINE void shuffle_filter_odd_ssse3(const int16_t *const filter,
  25. __m128i *const f) {
  26. const __m128i f_values = _mm_load_si128((const __m128i *)filter);
  27. // pack and duplicate the filter values
  28. // It utilizes the fact that the high byte of filter[3] is always 0 to clean
  29. // half of f[0] and f[4].
  30. assert(filter[3] >= 0 && filter[3] < 256);
  31. f[0] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0007u));
  32. f[1] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0402u));
  33. f[2] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0806u));
  34. f[3] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0c0au));
  35. f[4] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x070eu));
  36. }
  37. static INLINE __m128i convolve8_8_ssse3(const __m128i *const s,
  38. const __m128i *const f) {
  39. // multiply 2 adjacent elements with the filter and add the result
  40. const __m128i k_64 = _mm_set1_epi16(1 << 6);
  41. const __m128i x0 = _mm_maddubs_epi16(s[0], f[0]);
  42. const __m128i x1 = _mm_maddubs_epi16(s[1], f[1]);
  43. const __m128i x2 = _mm_maddubs_epi16(s[2], f[2]);
  44. const __m128i x3 = _mm_maddubs_epi16(s[3], f[3]);
  45. __m128i sum1, sum2;
  46. // sum the results together, saturating only on the final step
  47. // adding x0 with x2 and x1 with x3 is the only order that prevents
  48. // outranges for all filters
  49. sum1 = _mm_add_epi16(x0, x2);
  50. sum2 = _mm_add_epi16(x1, x3);
  51. // add the rounding offset early to avoid another saturated add
  52. sum1 = _mm_add_epi16(sum1, k_64);
  53. sum1 = _mm_adds_epi16(sum1, sum2);
  54. // shift by 7 bit each 16 bit
  55. sum1 = _mm_srai_epi16(sum1, 7);
  56. return sum1;
  57. }
  58. static INLINE __m128i convolve8_8_even_offset_ssse3(const __m128i *const s,
  59. const __m128i *const f) {
  60. // multiply 2 adjacent elements with the filter and add the result
  61. const __m128i k_64 = _mm_set1_epi16(1 << 6);
  62. const __m128i x0 = _mm_maddubs_epi16(s[0], f[0]);
  63. const __m128i x1 = _mm_maddubs_epi16(s[1], f[1]);
  64. const __m128i x2 = _mm_maddubs_epi16(s[2], f[2]);
  65. const __m128i x3 = _mm_maddubs_epi16(s[3], f[3]);
  66. // compensate the subtracted 64 in f[1]. x4 is always non negative.
  67. const __m128i x4 = _mm_maddubs_epi16(s[1], _mm_set1_epi8(64));
  68. // add and saturate the results together
  69. __m128i temp = _mm_adds_epi16(x0, x3);
  70. temp = _mm_adds_epi16(temp, x1);
  71. temp = _mm_adds_epi16(temp, x2);
  72. temp = _mm_adds_epi16(temp, x4);
  73. // round and shift by 7 bit each 16 bit
  74. temp = _mm_adds_epi16(temp, k_64);
  75. temp = _mm_srai_epi16(temp, 7);
  76. return temp;
  77. }
  78. static INLINE __m128i convolve8_8_odd_offset_ssse3(const __m128i *const s,
  79. const __m128i *const f) {
  80. // multiply 2 adjacent elements with the filter and add the result
  81. const __m128i k_64 = _mm_set1_epi16(1 << 6);
  82. const __m128i x0 = _mm_maddubs_epi16(s[0], f[0]);
  83. const __m128i x1 = _mm_maddubs_epi16(s[1], f[1]);
  84. const __m128i x2 = _mm_maddubs_epi16(s[2], f[2]);
  85. const __m128i x3 = _mm_maddubs_epi16(s[3], f[3]);
  86. const __m128i x4 = _mm_maddubs_epi16(s[4], f[4]);
  87. // compensate the subtracted 64 in f[2]. x5 is always non negative.
  88. const __m128i x5 = _mm_maddubs_epi16(s[2], _mm_set1_epi8(64));
  89. __m128i temp;
  90. // add and saturate the results together
  91. temp = _mm_adds_epi16(x0, x1);
  92. temp = _mm_adds_epi16(temp, x2);
  93. temp = _mm_adds_epi16(temp, x3);
  94. temp = _mm_adds_epi16(temp, x4);
  95. temp = _mm_adds_epi16(temp, x5);
  96. // round and shift by 7 bit each 16 bit
  97. temp = _mm_adds_epi16(temp, k_64);
  98. temp = _mm_srai_epi16(temp, 7);
  99. return temp;
  100. }
  101. #endif // VPX_VPX_DSP_X86_CONVOLVE_SSSE3_H_