quantize_avx.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. /*
  2. * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #if defined(_MSC_VER)
  12. #include <intrin.h>
  13. #endif
  14. #include <immintrin.h>
  15. #include "./vpx_dsp_rtcd.h"
  16. #include "vpx/vpx_integer.h"
  17. #include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
  18. #include "vpx_dsp/x86/quantize_sse2.h"
  19. #include "vpx_dsp/x86/quantize_ssse3.h"
  20. void vpx_quantize_b_avx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
  21. int skip_block, const int16_t *zbin_ptr,
  22. const int16_t *round_ptr, const int16_t *quant_ptr,
  23. const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
  24. tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
  25. uint16_t *eob_ptr, const int16_t *scan,
  26. const int16_t *iscan) {
  27. const __m128i zero = _mm_setzero_si128();
  28. const __m256i big_zero = _mm256_setzero_si256();
  29. int index;
  30. __m128i zbin, round, quant, dequant, shift;
  31. __m128i coeff0, coeff1;
  32. __m128i qcoeff0, qcoeff1;
  33. __m128i cmp_mask0, cmp_mask1;
  34. __m128i all_zero;
  35. __m128i eob = zero, eob0;
  36. (void)scan;
  37. (void)skip_block;
  38. assert(!skip_block);
  39. *eob_ptr = 0;
  40. load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant,
  41. dequant_ptr, &dequant, quant_shift_ptr, &shift);
  42. // Do DC and first 15 AC.
  43. coeff0 = load_tran_low(coeff_ptr);
  44. coeff1 = load_tran_low(coeff_ptr + 8);
  45. qcoeff0 = _mm_abs_epi16(coeff0);
  46. qcoeff1 = _mm_abs_epi16(coeff1);
  47. cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
  48. zbin = _mm_unpackhi_epi64(zbin, zbin); // Switch DC to AC
  49. cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
  50. all_zero = _mm_or_si128(cmp_mask0, cmp_mask1);
  51. if (_mm_test_all_zeros(all_zero, all_zero)) {
  52. _mm256_store_si256((__m256i *)(qcoeff_ptr), big_zero);
  53. _mm256_store_si256((__m256i *)(dqcoeff_ptr), big_zero);
  54. #if CONFIG_VP9_HIGHBITDEPTH
  55. _mm256_store_si256((__m256i *)(qcoeff_ptr + 8), big_zero);
  56. _mm256_store_si256((__m256i *)(dqcoeff_ptr + 8), big_zero);
  57. #endif // CONFIG_VP9_HIGHBITDEPTH
  58. if (n_coeffs == 16) return;
  59. round = _mm_unpackhi_epi64(round, round);
  60. quant = _mm_unpackhi_epi64(quant, quant);
  61. shift = _mm_unpackhi_epi64(shift, shift);
  62. dequant = _mm_unpackhi_epi64(dequant, dequant);
  63. } else {
  64. calculate_qcoeff(&qcoeff0, round, quant, shift);
  65. round = _mm_unpackhi_epi64(round, round);
  66. quant = _mm_unpackhi_epi64(quant, quant);
  67. shift = _mm_unpackhi_epi64(shift, shift);
  68. calculate_qcoeff(&qcoeff1, round, quant, shift);
  69. // Reinsert signs
  70. qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
  71. qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
  72. // Mask out zbin threshold coeffs
  73. qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
  74. qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1);
  75. store_tran_low(qcoeff0, qcoeff_ptr);
  76. store_tran_low(qcoeff1, qcoeff_ptr + 8);
  77. calculate_dqcoeff_and_store(qcoeff0, dequant, dqcoeff_ptr);
  78. dequant = _mm_unpackhi_epi64(dequant, dequant);
  79. calculate_dqcoeff_and_store(qcoeff1, dequant, dqcoeff_ptr + 8);
  80. eob =
  81. scan_for_eob(&qcoeff0, &qcoeff1, cmp_mask0, cmp_mask1, iscan, 0, zero);
  82. }
  83. // AC only loop.
  84. for (index = 16; index < n_coeffs; index += 16) {
  85. coeff0 = load_tran_low(coeff_ptr + index);
  86. coeff1 = load_tran_low(coeff_ptr + index + 8);
  87. qcoeff0 = _mm_abs_epi16(coeff0);
  88. qcoeff1 = _mm_abs_epi16(coeff1);
  89. cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
  90. cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
  91. all_zero = _mm_or_si128(cmp_mask0, cmp_mask1);
  92. if (_mm_test_all_zeros(all_zero, all_zero)) {
  93. _mm256_store_si256((__m256i *)(qcoeff_ptr + index), big_zero);
  94. _mm256_store_si256((__m256i *)(dqcoeff_ptr + index), big_zero);
  95. #if CONFIG_VP9_HIGHBITDEPTH
  96. _mm256_store_si256((__m256i *)(qcoeff_ptr + index + 8), big_zero);
  97. _mm256_store_si256((__m256i *)(dqcoeff_ptr + index + 8), big_zero);
  98. #endif // CONFIG_VP9_HIGHBITDEPTH
  99. continue;
  100. }
  101. calculate_qcoeff(&qcoeff0, round, quant, shift);
  102. calculate_qcoeff(&qcoeff1, round, quant, shift);
  103. qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
  104. qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
  105. qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
  106. qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1);
  107. store_tran_low(qcoeff0, qcoeff_ptr + index);
  108. store_tran_low(qcoeff1, qcoeff_ptr + index + 8);
  109. calculate_dqcoeff_and_store(qcoeff0, dequant, dqcoeff_ptr + index);
  110. calculate_dqcoeff_and_store(qcoeff1, dequant, dqcoeff_ptr + index + 8);
  111. eob0 = scan_for_eob(&qcoeff0, &qcoeff1, cmp_mask0, cmp_mask1, iscan, index,
  112. zero);
  113. eob = _mm_max_epi16(eob, eob0);
  114. }
  115. *eob_ptr = accumulate_eob(eob);
  116. }
  117. void vpx_quantize_b_32x32_avx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
  118. int skip_block, const int16_t *zbin_ptr,
  119. const int16_t *round_ptr,
  120. const int16_t *quant_ptr,
  121. const int16_t *quant_shift_ptr,
  122. tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
  123. const int16_t *dequant_ptr, uint16_t *eob_ptr,
  124. const int16_t *scan, const int16_t *iscan) {
  125. const __m128i zero = _mm_setzero_si128();
  126. const __m128i one = _mm_set1_epi16(1);
  127. const __m256i big_zero = _mm256_setzero_si256();
  128. int index;
  129. __m128i zbin, round, quant, dequant, shift;
  130. __m128i coeff0, coeff1;
  131. __m128i qcoeff0, qcoeff1;
  132. __m128i cmp_mask0, cmp_mask1;
  133. __m128i all_zero;
  134. __m128i eob = zero, eob0;
  135. (void)scan;
  136. (void)n_coeffs;
  137. (void)skip_block;
  138. assert(!skip_block);
  139. // Setup global values.
  140. // The 32x32 halves zbin and round.
  141. zbin = _mm_load_si128((const __m128i *)zbin_ptr);
  142. // Shift with rounding.
  143. zbin = _mm_add_epi16(zbin, one);
  144. zbin = _mm_srli_epi16(zbin, 1);
  145. // x86 has no "greater *or equal*" comparison. Subtract 1 from zbin so
  146. // it is a strict "greater" comparison.
  147. zbin = _mm_sub_epi16(zbin, one);
  148. round = _mm_load_si128((const __m128i *)round_ptr);
  149. round = _mm_add_epi16(round, one);
  150. round = _mm_srli_epi16(round, 1);
  151. quant = _mm_load_si128((const __m128i *)quant_ptr);
  152. dequant = _mm_load_si128((const __m128i *)dequant_ptr);
  153. shift = _mm_load_si128((const __m128i *)quant_shift_ptr);
  154. shift = _mm_slli_epi16(shift, 1);
  155. // Do DC and first 15 AC.
  156. coeff0 = load_tran_low(coeff_ptr);
  157. coeff1 = load_tran_low(coeff_ptr + 8);
  158. qcoeff0 = _mm_abs_epi16(coeff0);
  159. qcoeff1 = _mm_abs_epi16(coeff1);
  160. cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
  161. zbin = _mm_unpackhi_epi64(zbin, zbin); // Switch DC to AC.
  162. cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
  163. all_zero = _mm_or_si128(cmp_mask0, cmp_mask1);
  164. if (_mm_test_all_zeros(all_zero, all_zero)) {
  165. _mm256_store_si256((__m256i *)(qcoeff_ptr), big_zero);
  166. _mm256_store_si256((__m256i *)(dqcoeff_ptr), big_zero);
  167. #if CONFIG_VP9_HIGHBITDEPTH
  168. _mm256_store_si256((__m256i *)(qcoeff_ptr + 8), big_zero);
  169. _mm256_store_si256((__m256i *)(dqcoeff_ptr + 8), big_zero);
  170. #endif // CONFIG_VP9_HIGHBITDEPTH
  171. round = _mm_unpackhi_epi64(round, round);
  172. quant = _mm_unpackhi_epi64(quant, quant);
  173. shift = _mm_unpackhi_epi64(shift, shift);
  174. dequant = _mm_unpackhi_epi64(dequant, dequant);
  175. } else {
  176. calculate_qcoeff(&qcoeff0, round, quant, shift);
  177. round = _mm_unpackhi_epi64(round, round);
  178. quant = _mm_unpackhi_epi64(quant, quant);
  179. shift = _mm_unpackhi_epi64(shift, shift);
  180. calculate_qcoeff(&qcoeff1, round, quant, shift);
  181. // Reinsert signs.
  182. qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
  183. qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
  184. // Mask out zbin threshold coeffs.
  185. qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
  186. qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1);
  187. store_tran_low(qcoeff0, qcoeff_ptr);
  188. store_tran_low(qcoeff1, qcoeff_ptr + 8);
  189. calculate_dqcoeff_and_store_32x32(qcoeff0, dequant, zero, dqcoeff_ptr);
  190. dequant = _mm_unpackhi_epi64(dequant, dequant);
  191. calculate_dqcoeff_and_store_32x32(qcoeff1, dequant, zero, dqcoeff_ptr + 8);
  192. eob =
  193. scan_for_eob(&qcoeff0, &qcoeff1, cmp_mask0, cmp_mask1, iscan, 0, zero);
  194. }
  195. // AC only loop.
  196. for (index = 16; index < 32 * 32; index += 16) {
  197. coeff0 = load_tran_low(coeff_ptr + index);
  198. coeff1 = load_tran_low(coeff_ptr + index + 8);
  199. qcoeff0 = _mm_abs_epi16(coeff0);
  200. qcoeff1 = _mm_abs_epi16(coeff1);
  201. cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
  202. cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
  203. all_zero = _mm_or_si128(cmp_mask0, cmp_mask1);
  204. if (_mm_test_all_zeros(all_zero, all_zero)) {
  205. _mm256_store_si256((__m256i *)(qcoeff_ptr + index), big_zero);
  206. _mm256_store_si256((__m256i *)(dqcoeff_ptr + index), big_zero);
  207. #if CONFIG_VP9_HIGHBITDEPTH
  208. _mm256_store_si256((__m256i *)(qcoeff_ptr + index + 8), big_zero);
  209. _mm256_store_si256((__m256i *)(dqcoeff_ptr + index + 8), big_zero);
  210. #endif // CONFIG_VP9_HIGHBITDEPTH
  211. continue;
  212. }
  213. calculate_qcoeff(&qcoeff0, round, quant, shift);
  214. calculate_qcoeff(&qcoeff1, round, quant, shift);
  215. qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
  216. qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
  217. qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
  218. qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1);
  219. store_tran_low(qcoeff0, qcoeff_ptr + index);
  220. store_tran_low(qcoeff1, qcoeff_ptr + index + 8);
  221. calculate_dqcoeff_and_store_32x32(qcoeff0, dequant, zero,
  222. dqcoeff_ptr + index);
  223. calculate_dqcoeff_and_store_32x32(qcoeff1, dequant, zero,
  224. dqcoeff_ptr + index + 8);
  225. eob0 = scan_for_eob(&qcoeff0, &qcoeff1, cmp_mask0, cmp_mask1, iscan, index,
  226. zero);
  227. eob = _mm_max_epi16(eob, eob0);
  228. }
  229. *eob_ptr = accumulate_eob(eob);
  230. }