quantize_vsx.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. /*
  2. * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include "./vpx_dsp_rtcd.h"
  12. #include "vpx_dsp/ppc/types_vsx.h"
  13. // Negate 16-bit integers in a when the corresponding signed 16-bit
  14. // integer in b is negative.
  15. static INLINE int16x8_t vec_sign(int16x8_t a, int16x8_t b) {
  16. const int16x8_t mask = vec_sra(b, vec_shift_sign_s16);
  17. return vec_xor(vec_add(a, mask), mask);
  18. }
  19. // Sets the value of a 32-bit integers to 1 when the corresponding value in a is
  20. // negative.
  21. static INLINE int32x4_t vec_is_neg(int32x4_t a) {
  22. return vec_sr(a, vec_shift_sign_s32);
  23. }
  24. // Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit
  25. // integers, and return the high 16 bits of the intermediate integers.
  26. // (a * b) >> 16
  27. static INLINE int16x8_t vec_mulhi(int16x8_t a, int16x8_t b) {
  28. // madds does ((A * B) >>15) + C, we need >> 16, so we perform an extra right
  29. // shift.
  30. return vec_sra(vec_madds(a, b, vec_zeros_s16), vec_ones_u16);
  31. }
  32. // Quantization function used for 4x4, 8x8 and 16x16 blocks.
  33. static INLINE int16x8_t quantize_coeff(int16x8_t coeff, int16x8_t coeff_abs,
  34. int16x8_t round, int16x8_t quant,
  35. int16x8_t quant_shift, bool16x8_t mask) {
  36. const int16x8_t rounded = vec_vaddshs(coeff_abs, round);
  37. int16x8_t qcoeff = vec_mulhi(rounded, quant);
  38. qcoeff = vec_add(qcoeff, rounded);
  39. qcoeff = vec_mulhi(qcoeff, quant_shift);
  40. qcoeff = vec_sign(qcoeff, coeff);
  41. return vec_and(qcoeff, mask);
  42. }
  43. // Quantization function used for 32x32 blocks.
  44. static INLINE int16x8_t quantize_coeff_32(int16x8_t coeff, int16x8_t coeff_abs,
  45. int16x8_t round, int16x8_t quant,
  46. int16x8_t quant_shift,
  47. bool16x8_t mask) {
  48. const int16x8_t rounded = vec_vaddshs(coeff_abs, round);
  49. int16x8_t qcoeff = vec_mulhi(rounded, quant);
  50. qcoeff = vec_add(qcoeff, rounded);
  51. // 32x32 blocks require an extra multiplication by 2, this compensates for the
  52. // extra right shift added in vec_mulhi, as such vec_madds can be used
  53. // directly instead of vec_mulhi (((a * b) >> 15) >> 1) << 1 == (a * b >> 15)
  54. qcoeff = vec_madds(qcoeff, quant_shift, vec_zeros_s16);
  55. qcoeff = vec_sign(qcoeff, coeff);
  56. return vec_and(qcoeff, mask);
  57. }
  58. // DeQuantization function used for 32x32 blocks. Quantized coeff of 32x32
  59. // blocks are twice as big as for other block sizes. As such, using
  60. // vec_mladd results in overflow.
  61. static INLINE int16x8_t dequantize_coeff_32(int16x8_t qcoeff,
  62. int16x8_t dequant) {
  63. int32x4_t dqcoeffe = vec_mule(qcoeff, dequant);
  64. int32x4_t dqcoeffo = vec_mulo(qcoeff, dequant);
  65. // Add 1 if negative to round towards zero because the C uses division.
  66. dqcoeffe = vec_add(dqcoeffe, vec_is_neg(dqcoeffe));
  67. dqcoeffo = vec_add(dqcoeffo, vec_is_neg(dqcoeffo));
  68. dqcoeffe = vec_sra(dqcoeffe, vec_ones_u32);
  69. dqcoeffo = vec_sra(dqcoeffo, vec_ones_u32);
  70. return (int16x8_t)vec_perm(dqcoeffe, dqcoeffo, vec_perm_odd_even_pack);
  71. }
  72. static INLINE int16x8_t nonzero_scanindex(int16x8_t qcoeff, bool16x8_t mask,
  73. const int16_t *iscan_ptr, int index) {
  74. int16x8_t scan = vec_vsx_ld(index, iscan_ptr);
  75. bool16x8_t zero_coeff = vec_cmpeq(qcoeff, vec_zeros_s16);
  76. scan = vec_sub(scan, mask);
  77. return vec_andc(scan, zero_coeff);
  78. }
  79. // Compare packed 16-bit integers across a, and return the maximum value in
  80. // every element. Returns a vector containing the biggest value across vector a.
  81. static INLINE int16x8_t vec_max_across(int16x8_t a) {
  82. a = vec_max(a, vec_perm(a, a, vec_perm64));
  83. a = vec_max(a, vec_perm(a, a, vec_perm32));
  84. return vec_max(a, vec_perm(a, a, vec_perm16));
  85. }
  86. void vpx_quantize_b_vsx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
  87. int skip_block, const int16_t *zbin_ptr,
  88. const int16_t *round_ptr, const int16_t *quant_ptr,
  89. const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
  90. tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
  91. uint16_t *eob_ptr, const int16_t *scan_ptr,
  92. const int16_t *iscan_ptr) {
  93. int16x8_t qcoeff0, qcoeff1, dqcoeff0, dqcoeff1, eob;
  94. bool16x8_t zero_mask0, zero_mask1;
  95. // First set of 8 coeff starts with DC + 7 AC
  96. int16x8_t zbin = vec_vsx_ld(0, zbin_ptr);
  97. int16x8_t round = vec_vsx_ld(0, round_ptr);
  98. int16x8_t quant = vec_vsx_ld(0, quant_ptr);
  99. int16x8_t dequant = vec_vsx_ld(0, dequant_ptr);
  100. int16x8_t quant_shift = vec_vsx_ld(0, quant_shift_ptr);
  101. int16x8_t coeff0 = vec_vsx_ld(0, coeff_ptr);
  102. int16x8_t coeff1 = vec_vsx_ld(16, coeff_ptr);
  103. int16x8_t coeff0_abs = vec_abs(coeff0);
  104. int16x8_t coeff1_abs = vec_abs(coeff1);
  105. zero_mask0 = vec_cmpge(coeff0_abs, zbin);
  106. zbin = vec_splat(zbin, 1);
  107. zero_mask1 = vec_cmpge(coeff1_abs, zbin);
  108. (void)scan_ptr;
  109. (void)skip_block;
  110. assert(!skip_block);
  111. qcoeff0 =
  112. quantize_coeff(coeff0, coeff0_abs, round, quant, quant_shift, zero_mask0);
  113. vec_vsx_st(qcoeff0, 0, qcoeff_ptr);
  114. round = vec_splat(round, 1);
  115. quant = vec_splat(quant, 1);
  116. quant_shift = vec_splat(quant_shift, 1);
  117. qcoeff1 =
  118. quantize_coeff(coeff1, coeff1_abs, round, quant, quant_shift, zero_mask1);
  119. vec_vsx_st(qcoeff1, 16, qcoeff_ptr);
  120. dqcoeff0 = vec_mladd(qcoeff0, dequant, vec_zeros_s16);
  121. vec_vsx_st(dqcoeff0, 0, dqcoeff_ptr);
  122. dequant = vec_splat(dequant, 1);
  123. dqcoeff1 = vec_mladd(qcoeff1, dequant, vec_zeros_s16);
  124. vec_vsx_st(dqcoeff1, 16, dqcoeff_ptr);
  125. eob = vec_max(nonzero_scanindex(qcoeff0, zero_mask0, iscan_ptr, 0),
  126. nonzero_scanindex(qcoeff1, zero_mask1, iscan_ptr, 16));
  127. if (n_coeffs > 16) {
  128. int index = 16;
  129. int off0 = 32;
  130. int off1 = 48;
  131. int off2 = 64;
  132. do {
  133. int16x8_t coeff2, coeff2_abs, qcoeff2, dqcoeff2, eob2;
  134. bool16x8_t zero_mask2;
  135. coeff0 = vec_vsx_ld(off0, coeff_ptr);
  136. coeff1 = vec_vsx_ld(off1, coeff_ptr);
  137. coeff2 = vec_vsx_ld(off2, coeff_ptr);
  138. coeff0_abs = vec_abs(coeff0);
  139. coeff1_abs = vec_abs(coeff1);
  140. coeff2_abs = vec_abs(coeff2);
  141. zero_mask0 = vec_cmpge(coeff0_abs, zbin);
  142. zero_mask1 = vec_cmpge(coeff1_abs, zbin);
  143. zero_mask2 = vec_cmpge(coeff2_abs, zbin);
  144. qcoeff0 = quantize_coeff(coeff0, coeff0_abs, round, quant, quant_shift,
  145. zero_mask0);
  146. qcoeff1 = quantize_coeff(coeff1, coeff1_abs, round, quant, quant_shift,
  147. zero_mask1);
  148. qcoeff2 = quantize_coeff(coeff2, coeff2_abs, round, quant, quant_shift,
  149. zero_mask2);
  150. vec_vsx_st(qcoeff0, off0, qcoeff_ptr);
  151. vec_vsx_st(qcoeff1, off1, qcoeff_ptr);
  152. vec_vsx_st(qcoeff2, off2, qcoeff_ptr);
  153. dqcoeff0 = vec_mladd(qcoeff0, dequant, vec_zeros_s16);
  154. dqcoeff1 = vec_mladd(qcoeff1, dequant, vec_zeros_s16);
  155. dqcoeff2 = vec_mladd(qcoeff2, dequant, vec_zeros_s16);
  156. vec_vsx_st(dqcoeff0, off0, dqcoeff_ptr);
  157. vec_vsx_st(dqcoeff1, off1, dqcoeff_ptr);
  158. vec_vsx_st(dqcoeff2, off2, dqcoeff_ptr);
  159. eob =
  160. vec_max(eob, nonzero_scanindex(qcoeff0, zero_mask0, iscan_ptr, off0));
  161. eob2 = vec_max(nonzero_scanindex(qcoeff1, zero_mask1, iscan_ptr, off1),
  162. nonzero_scanindex(qcoeff2, zero_mask2, iscan_ptr, off2));
  163. eob = vec_max(eob, eob2);
  164. index += 24;
  165. off0 += 48;
  166. off1 += 48;
  167. off2 += 48;
  168. } while (index < n_coeffs);
  169. }
  170. eob = vec_max_across(eob);
  171. *eob_ptr = eob[0];
  172. }
  173. void vpx_quantize_b_32x32_vsx(
  174. const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
  175. const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
  176. const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
  177. tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
  178. const int16_t *scan_ptr, const int16_t *iscan_ptr) {
  179. // In stage 1, we quantize 16 coeffs (DC + 15 AC)
  180. // In stage 2, we loop 42 times and quantize 24 coeffs per iteration
  181. // (32 * 32 - 16) / 24 = 42
  182. int num_itr = 42;
  183. // Offsets are in bytes, 16 coeffs = 32 bytes
  184. int off0 = 32;
  185. int off1 = 48;
  186. int off2 = 64;
  187. int16x8_t qcoeff0, qcoeff1, eob;
  188. bool16x8_t zero_mask0, zero_mask1;
  189. int16x8_t zbin = vec_vsx_ld(0, zbin_ptr);
  190. int16x8_t round = vec_vsx_ld(0, round_ptr);
  191. int16x8_t quant = vec_vsx_ld(0, quant_ptr);
  192. int16x8_t dequant = vec_vsx_ld(0, dequant_ptr);
  193. int16x8_t quant_shift = vec_vsx_ld(0, quant_shift_ptr);
  194. int16x8_t coeff0 = vec_vsx_ld(0, coeff_ptr);
  195. int16x8_t coeff1 = vec_vsx_ld(16, coeff_ptr);
  196. int16x8_t coeff0_abs = vec_abs(coeff0);
  197. int16x8_t coeff1_abs = vec_abs(coeff1);
  198. (void)scan_ptr;
  199. (void)skip_block;
  200. (void)n_coeffs;
  201. assert(!skip_block);
  202. // 32x32 quantization requires that zbin and round be divided by 2
  203. zbin = vec_sra(vec_add(zbin, vec_ones_s16), vec_ones_u16);
  204. round = vec_sra(vec_add(round, vec_ones_s16), vec_ones_u16);
  205. zero_mask0 = vec_cmpge(coeff0_abs, zbin);
  206. zbin = vec_splat(zbin, 1); // remove DC from zbin
  207. zero_mask1 = vec_cmpge(coeff1_abs, zbin);
  208. qcoeff0 = quantize_coeff_32(coeff0, coeff0_abs, round, quant, quant_shift,
  209. zero_mask0);
  210. round = vec_splat(round, 1); // remove DC from round
  211. quant = vec_splat(quant, 1); // remove DC from quant
  212. quant_shift = vec_splat(quant_shift, 1); // remove DC from quant_shift
  213. qcoeff1 = quantize_coeff_32(coeff1, coeff1_abs, round, quant, quant_shift,
  214. zero_mask1);
  215. vec_vsx_st(qcoeff0, 0, qcoeff_ptr);
  216. vec_vsx_st(qcoeff1, 16, qcoeff_ptr);
  217. vec_vsx_st(dequantize_coeff_32(qcoeff0, dequant), 0, dqcoeff_ptr);
  218. dequant = vec_splat(dequant, 1); // remove DC from dequant
  219. vec_vsx_st(dequantize_coeff_32(qcoeff1, dequant), 16, dqcoeff_ptr);
  220. eob = vec_max(nonzero_scanindex(qcoeff0, zero_mask0, iscan_ptr, 0),
  221. nonzero_scanindex(qcoeff1, zero_mask1, iscan_ptr, 16));
  222. do {
  223. int16x8_t coeff2, coeff2_abs, qcoeff2, eob2;
  224. bool16x8_t zero_mask2;
  225. coeff0 = vec_vsx_ld(off0, coeff_ptr);
  226. coeff1 = vec_vsx_ld(off1, coeff_ptr);
  227. coeff2 = vec_vsx_ld(off2, coeff_ptr);
  228. coeff0_abs = vec_abs(coeff0);
  229. coeff1_abs = vec_abs(coeff1);
  230. coeff2_abs = vec_abs(coeff2);
  231. zero_mask0 = vec_cmpge(coeff0_abs, zbin);
  232. zero_mask1 = vec_cmpge(coeff1_abs, zbin);
  233. zero_mask2 = vec_cmpge(coeff2_abs, zbin);
  234. qcoeff0 = quantize_coeff_32(coeff0, coeff0_abs, round, quant, quant_shift,
  235. zero_mask0);
  236. qcoeff1 = quantize_coeff_32(coeff1, coeff1_abs, round, quant, quant_shift,
  237. zero_mask1);
  238. qcoeff2 = quantize_coeff_32(coeff2, coeff2_abs, round, quant, quant_shift,
  239. zero_mask2);
  240. vec_vsx_st(qcoeff0, off0, qcoeff_ptr);
  241. vec_vsx_st(qcoeff1, off1, qcoeff_ptr);
  242. vec_vsx_st(qcoeff2, off2, qcoeff_ptr);
  243. vec_vsx_st(dequantize_coeff_32(qcoeff0, dequant), off0, dqcoeff_ptr);
  244. vec_vsx_st(dequantize_coeff_32(qcoeff1, dequant), off1, dqcoeff_ptr);
  245. vec_vsx_st(dequantize_coeff_32(qcoeff2, dequant), off2, dqcoeff_ptr);
  246. eob = vec_max(eob, nonzero_scanindex(qcoeff0, zero_mask0, iscan_ptr, off0));
  247. eob2 = vec_max(nonzero_scanindex(qcoeff1, zero_mask1, iscan_ptr, off1),
  248. nonzero_scanindex(qcoeff2, zero_mask2, iscan_ptr, off2));
  249. eob = vec_max(eob, eob2);
  250. // 24 int16_t is 48 bytes
  251. off0 += 48;
  252. off1 += 48;
  253. off2 += 48;
  254. num_itr--;
  255. } while (num_itr != 0);
  256. eob = vec_max_across(eob);
  257. *eob_ptr = eob[0];
  258. }