quantize_sse2.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. /*
  2. * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #ifndef VPX_VPX_DSP_X86_QUANTIZE_SSE2_H_
  11. #define VPX_VPX_DSP_X86_QUANTIZE_SSE2_H_
  12. #include <emmintrin.h>
  13. #include "./vpx_config.h"
  14. #include "vpx/vpx_integer.h"
  15. static INLINE void load_b_values(const int16_t *zbin_ptr, __m128i *zbin,
  16. const int16_t *round_ptr, __m128i *round,
  17. const int16_t *quant_ptr, __m128i *quant,
  18. const int16_t *dequant_ptr, __m128i *dequant,
  19. const int16_t *shift_ptr, __m128i *shift) {
  20. *zbin = _mm_load_si128((const __m128i *)zbin_ptr);
  21. *round = _mm_load_si128((const __m128i *)round_ptr);
  22. *quant = _mm_load_si128((const __m128i *)quant_ptr);
  23. *zbin = _mm_sub_epi16(*zbin, _mm_set1_epi16(1));
  24. *dequant = _mm_load_si128((const __m128i *)dequant_ptr);
  25. *shift = _mm_load_si128((const __m128i *)shift_ptr);
  26. }
  27. // With ssse3 and later abs() and sign() are preferred.
  28. static INLINE __m128i invert_sign_sse2(__m128i a, __m128i sign) {
  29. a = _mm_xor_si128(a, sign);
  30. return _mm_sub_epi16(a, sign);
  31. }
  32. static INLINE void calculate_qcoeff(__m128i *coeff, const __m128i round,
  33. const __m128i quant, const __m128i shift) {
  34. __m128i tmp, qcoeff;
  35. qcoeff = _mm_adds_epi16(*coeff, round);
  36. tmp = _mm_mulhi_epi16(qcoeff, quant);
  37. qcoeff = _mm_add_epi16(tmp, qcoeff);
  38. *coeff = _mm_mulhi_epi16(qcoeff, shift);
  39. }
  40. static INLINE void calculate_dqcoeff_and_store(__m128i qcoeff, __m128i dequant,
  41. tran_low_t *dqcoeff) {
  42. #if CONFIG_VP9_HIGHBITDEPTH
  43. const __m128i low = _mm_mullo_epi16(qcoeff, dequant);
  44. const __m128i high = _mm_mulhi_epi16(qcoeff, dequant);
  45. const __m128i dqcoeff32_0 = _mm_unpacklo_epi16(low, high);
  46. const __m128i dqcoeff32_1 = _mm_unpackhi_epi16(low, high);
  47. _mm_store_si128((__m128i *)(dqcoeff), dqcoeff32_0);
  48. _mm_store_si128((__m128i *)(dqcoeff + 4), dqcoeff32_1);
  49. #else
  50. const __m128i dqcoeff16 = _mm_mullo_epi16(qcoeff, dequant);
  51. _mm_store_si128((__m128i *)(dqcoeff), dqcoeff16);
  52. #endif // CONFIG_VP9_HIGHBITDEPTH
  53. }
  54. // Scan 16 values for eob reference in scan. Use masks (-1) from comparing to
  55. // zbin to add 1 to the index in 'scan'.
  56. static INLINE __m128i scan_for_eob(__m128i *coeff0, __m128i *coeff1,
  57. const __m128i zbin_mask0,
  58. const __m128i zbin_mask1,
  59. const int16_t *scan, const int index,
  60. const __m128i zero) {
  61. const __m128i zero_coeff0 = _mm_cmpeq_epi16(*coeff0, zero);
  62. const __m128i zero_coeff1 = _mm_cmpeq_epi16(*coeff1, zero);
  63. __m128i scan0 = _mm_load_si128((const __m128i *)(scan + index));
  64. __m128i scan1 = _mm_load_si128((const __m128i *)(scan + index + 8));
  65. __m128i eob0, eob1;
  66. // Add one to convert from indices to counts
  67. scan0 = _mm_sub_epi16(scan0, zbin_mask0);
  68. scan1 = _mm_sub_epi16(scan1, zbin_mask1);
  69. eob0 = _mm_andnot_si128(zero_coeff0, scan0);
  70. eob1 = _mm_andnot_si128(zero_coeff1, scan1);
  71. return _mm_max_epi16(eob0, eob1);
  72. }
  73. static INLINE int16_t accumulate_eob(__m128i eob) {
  74. __m128i eob_shuffled;
  75. eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
  76. eob = _mm_max_epi16(eob, eob_shuffled);
  77. eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
  78. eob = _mm_max_epi16(eob, eob_shuffled);
  79. eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
  80. eob = _mm_max_epi16(eob, eob_shuffled);
  81. return _mm_extract_epi16(eob, 1);
  82. }
  83. #endif // VPX_VPX_DSP_X86_QUANTIZE_SSE2_H_