convolve_avx2.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /*
  2. * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #ifndef VPX_VPX_DSP_X86_CONVOLVE_AVX2_H_
  11. #define VPX_VPX_DSP_X86_CONVOLVE_AVX2_H_
  12. #include <immintrin.h> // AVX2
  13. #include "./vpx_config.h"
  14. #if defined(__clang__)
  15. #if (__clang_major__ > 0 && __clang_major__ < 3) || \
  16. (__clang_major__ == 3 && __clang_minor__ <= 3) || \
  17. (defined(__APPLE__) && defined(__apple_build_version__) && \
  18. ((__clang_major__ == 4 && __clang_minor__ <= 2) || \
  19. (__clang_major__ == 5 && __clang_minor__ == 0)))
  20. #define MM256_BROADCASTSI128_SI256(x) \
  21. _mm_broadcastsi128_si256((__m128i const *)&(x))
  22. #else // clang > 3.3, and not 5.0 on macosx.
  23. #define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
  24. #endif // clang <= 3.3
  25. #elif defined(__GNUC__)
  26. #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ <= 6)
  27. #define MM256_BROADCASTSI128_SI256(x) \
  28. _mm_broadcastsi128_si256((__m128i const *)&(x))
  29. #elif __GNUC__ == 4 && __GNUC_MINOR__ == 7
  30. #define MM256_BROADCASTSI128_SI256(x) _mm_broadcastsi128_si256(x)
  31. #else // gcc > 4.7
  32. #define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
  33. #endif // gcc <= 4.6
  34. #else // !(gcc || clang)
  35. #define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
  36. #endif // __clang__
  37. static INLINE void shuffle_filter_avx2(const int16_t *const filter,
  38. __m256i *const f) {
  39. const __m256i f_values =
  40. MM256_BROADCASTSI128_SI256(_mm_load_si128((const __m128i *)filter));
  41. // pack and duplicate the filter values
  42. f[0] = _mm256_shuffle_epi8(f_values, _mm256_set1_epi16(0x0200u));
  43. f[1] = _mm256_shuffle_epi8(f_values, _mm256_set1_epi16(0x0604u));
  44. f[2] = _mm256_shuffle_epi8(f_values, _mm256_set1_epi16(0x0a08u));
  45. f[3] = _mm256_shuffle_epi8(f_values, _mm256_set1_epi16(0x0e0cu));
  46. }
  47. static INLINE __m256i convolve8_16_avx2(const __m256i *const s,
  48. const __m256i *const f) {
  49. // multiply 2 adjacent elements with the filter and add the result
  50. const __m256i k_64 = _mm256_set1_epi16(1 << 6);
  51. const __m256i x0 = _mm256_maddubs_epi16(s[0], f[0]);
  52. const __m256i x1 = _mm256_maddubs_epi16(s[1], f[1]);
  53. const __m256i x2 = _mm256_maddubs_epi16(s[2], f[2]);
  54. const __m256i x3 = _mm256_maddubs_epi16(s[3], f[3]);
  55. __m256i sum1, sum2;
  56. // sum the results together, saturating only on the final step
  57. // adding x0 with x2 and x1 with x3 is the only order that prevents
  58. // outranges for all filters
  59. sum1 = _mm256_add_epi16(x0, x2);
  60. sum2 = _mm256_add_epi16(x1, x3);
  61. // add the rounding offset early to avoid another saturated add
  62. sum1 = _mm256_add_epi16(sum1, k_64);
  63. sum1 = _mm256_adds_epi16(sum1, sum2);
  64. // round and shift by 7 bit each 16 bit
  65. sum1 = _mm256_srai_epi16(sum1, 7);
  66. return sum1;
  67. }
  68. static INLINE __m128i convolve8_8_avx2(const __m256i *const s,
  69. const __m256i *const f) {
  70. // multiply 2 adjacent elements with the filter and add the result
  71. const __m128i k_64 = _mm_set1_epi16(1 << 6);
  72. const __m128i x0 = _mm_maddubs_epi16(_mm256_castsi256_si128(s[0]),
  73. _mm256_castsi256_si128(f[0]));
  74. const __m128i x1 = _mm_maddubs_epi16(_mm256_castsi256_si128(s[1]),
  75. _mm256_castsi256_si128(f[1]));
  76. const __m128i x2 = _mm_maddubs_epi16(_mm256_castsi256_si128(s[2]),
  77. _mm256_castsi256_si128(f[2]));
  78. const __m128i x3 = _mm_maddubs_epi16(_mm256_castsi256_si128(s[3]),
  79. _mm256_castsi256_si128(f[3]));
  80. __m128i sum1, sum2;
  81. // sum the results together, saturating only on the final step
  82. // adding x0 with x2 and x1 with x3 is the only order that prevents
  83. // outranges for all filters
  84. sum1 = _mm_add_epi16(x0, x2);
  85. sum2 = _mm_add_epi16(x1, x3);
  86. // add the rounding offset early to avoid another saturated add
  87. sum1 = _mm_add_epi16(sum1, k_64);
  88. sum1 = _mm_adds_epi16(sum1, sum2);
  89. // shift by 7 bit each 16 bit
  90. sum1 = _mm_srai_epi16(sum1, 7);
  91. return sum1;
  92. }
  93. static INLINE __m256i mm256_loadu2_si128(const void *lo, const void *hi) {
  94. const __m256i tmp =
  95. _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)lo));
  96. return _mm256_inserti128_si256(tmp, _mm_loadu_si128((const __m128i *)hi), 1);
  97. }
  98. static INLINE __m256i mm256_loadu2_epi64(const void *lo, const void *hi) {
  99. const __m256i tmp =
  100. _mm256_castsi128_si256(_mm_loadl_epi64((const __m128i *)lo));
  101. return _mm256_inserti128_si256(tmp, _mm_loadl_epi64((const __m128i *)hi), 1);
  102. }
  103. static INLINE void mm256_store2_si128(__m128i *const dst_ptr_1,
  104. __m128i *const dst_ptr_2,
  105. const __m256i *const src) {
  106. _mm_store_si128(dst_ptr_1, _mm256_castsi256_si128(*src));
  107. _mm_store_si128(dst_ptr_2, _mm256_extractf128_si256(*src, 1));
  108. }
  109. static INLINE void mm256_storeu2_epi64(__m128i *const dst_ptr_1,
  110. __m128i *const dst_ptr_2,
  111. const __m256i *const src) {
  112. _mm_storel_epi64(dst_ptr_1, _mm256_castsi256_si128(*src));
  113. _mm_storel_epi64(dst_ptr_2, _mm256_extractf128_si256(*src, 1));
  114. }
  115. static INLINE void mm256_storeu2_epi32(__m128i *const dst_ptr_1,
  116. __m128i *const dst_ptr_2,
  117. const __m256i *const src) {
  118. *((uint32_t *)(dst_ptr_1)) = _mm_cvtsi128_si32(_mm256_castsi256_si128(*src));
  119. *((uint32_t *)(dst_ptr_2)) =
  120. _mm_cvtsi128_si32(_mm256_extractf128_si256(*src, 1));
  121. }
  122. static INLINE __m256i mm256_round_epi32(const __m256i *const src,
  123. const __m256i *const half_depth,
  124. const int depth) {
  125. const __m256i nearest_src = _mm256_add_epi32(*src, *half_depth);
  126. return _mm256_srai_epi32(nearest_src, depth);
  127. }
  128. static INLINE __m256i mm256_round_epi16(const __m256i *const src,
  129. const __m256i *const half_depth,
  130. const int depth) {
  131. const __m256i nearest_src = _mm256_adds_epi16(*src, *half_depth);
  132. return _mm256_srai_epi16(nearest_src, depth);
  133. }
  134. static INLINE __m256i mm256_madd_add_epi32(const __m256i *const src_0,
  135. const __m256i *const src_1,
  136. const __m256i *const ker_0,
  137. const __m256i *const ker_1) {
  138. const __m256i tmp_0 = _mm256_madd_epi16(*src_0, *ker_0);
  139. const __m256i tmp_1 = _mm256_madd_epi16(*src_1, *ker_1);
  140. return _mm256_add_epi32(tmp_0, tmp_1);
  141. }
  142. #undef MM256_BROADCASTSI128_SI256
  143. #endif // VPX_VPX_DSP_X86_CONVOLVE_AVX2_H_