vp8_quantize.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <math.h>
  11. #include "vpx_mem/vpx_mem.h"
  12. #include "onyx_int.h"
  13. #include "vp8/encoder/quantize.h"
  14. #include "vp8/common/quant_common.h"
  15. void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) {
  16. int i, rc, eob;
  17. int x, y, z, sz;
  18. short *coeff_ptr = b->coeff;
  19. short *round_ptr = b->round;
  20. short *quant_ptr = b->quant_fast;
  21. short *qcoeff_ptr = d->qcoeff;
  22. short *dqcoeff_ptr = d->dqcoeff;
  23. short *dequant_ptr = d->dequant;
  24. eob = -1;
  25. for (i = 0; i < 16; ++i) {
  26. rc = vp8_default_zig_zag1d[i];
  27. z = coeff_ptr[rc];
  28. sz = (z >> 31); /* sign of z */
  29. x = (z ^ sz) - sz; /* x = abs(z) */
  30. y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */
  31. x = (y ^ sz) - sz; /* get the sign back */
  32. qcoeff_ptr[rc] = x; /* write to destination */
  33. dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
  34. if (y) {
  35. eob = i; /* last nonzero coeffs */
  36. }
  37. }
  38. *d->eob = (char)(eob + 1);
  39. }
  40. void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d) {
  41. int i, rc, eob;
  42. int zbin;
  43. int x, y, z, sz;
  44. short *zbin_boost_ptr = b->zrun_zbin_boost;
  45. short *coeff_ptr = b->coeff;
  46. short *zbin_ptr = b->zbin;
  47. short *round_ptr = b->round;
  48. short *quant_ptr = b->quant;
  49. short *quant_shift_ptr = b->quant_shift;
  50. short *qcoeff_ptr = d->qcoeff;
  51. short *dqcoeff_ptr = d->dqcoeff;
  52. short *dequant_ptr = d->dequant;
  53. short zbin_oq_value = b->zbin_extra;
  54. memset(qcoeff_ptr, 0, 32);
  55. memset(dqcoeff_ptr, 0, 32);
  56. eob = -1;
  57. for (i = 0; i < 16; ++i) {
  58. rc = vp8_default_zig_zag1d[i];
  59. z = coeff_ptr[rc];
  60. zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
  61. zbin_boost_ptr++;
  62. sz = (z >> 31); /* sign of z */
  63. x = (z ^ sz) - sz; /* x = abs(z) */
  64. if (x >= zbin) {
  65. x += round_ptr[rc];
  66. y = ((((x * quant_ptr[rc]) >> 16) + x) * quant_shift_ptr[rc]) >>
  67. 16; /* quantize (x) */
  68. x = (y ^ sz) - sz; /* get the sign back */
  69. qcoeff_ptr[rc] = x; /* write to destination */
  70. dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
  71. if (y) {
  72. eob = i; /* last nonzero coeffs */
  73. zbin_boost_ptr = b->zrun_zbin_boost; /* reset zero runlength */
  74. }
  75. }
  76. }
  77. *d->eob = (char)(eob + 1);
  78. }
  79. void vp8_quantize_mby(MACROBLOCK *x) {
  80. int i;
  81. int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
  82. x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
  83. for (i = 0; i < 16; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
  84. if (has_2nd_order) x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
  85. }
  86. void vp8_quantize_mb(MACROBLOCK *x) {
  87. int i;
  88. int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
  89. x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
  90. for (i = 0; i < 24 + has_2nd_order; ++i) {
  91. x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
  92. }
  93. }
  94. void vp8_quantize_mbuv(MACROBLOCK *x) {
  95. int i;
  96. for (i = 16; i < 24; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
  97. }
  98. static const int qrounding_factors[129] = {
  99. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
  100. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
  101. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
  102. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
  103. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
  104. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
  105. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48
  106. };
  107. static const int qzbin_factors[129] = {
  108. 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
  109. 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
  110. 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 80, 80, 80, 80, 80, 80, 80, 80, 80,
  111. 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
  112. 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
  113. 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
  114. 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80
  115. };
  116. static const int qrounding_factors_y2[129] = {
  117. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
  118. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
  119. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
  120. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
  121. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
  122. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
  123. 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48
  124. };
  125. static const int qzbin_factors_y2[129] = {
  126. 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
  127. 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
  128. 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 80, 80, 80, 80, 80, 80, 80, 80, 80,
  129. 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
  130. 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
  131. 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
  132. 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80
  133. };
  134. static void invert_quant(int improved_quant, short *quant, short *shift,
  135. short d) {
  136. if (improved_quant) {
  137. unsigned t;
  138. int l, m;
  139. t = d;
  140. for (l = 0; t > 1; ++l) t >>= 1;
  141. m = 1 + (1 << (16 + l)) / d;
  142. *quant = (short)(m - (1 << 16));
  143. *shift = l;
  144. /* use multiplication and constant shift by 16 */
  145. *shift = 1 << (16 - *shift);
  146. } else {
  147. *quant = (1 << 16) / d;
  148. *shift = 0;
  149. /* use multiplication and constant shift by 16 */
  150. *shift = 1 << (16 - *shift);
  151. }
  152. }
  153. void vp8cx_init_quantizer(VP8_COMP *cpi) {
  154. int i;
  155. int quant_val;
  156. int Q;
  157. int zbin_boost[16] = { 0, 0, 8, 10, 12, 14, 16, 20,
  158. 24, 28, 32, 36, 40, 44, 44, 44 };
  159. for (Q = 0; Q < QINDEX_RANGE; ++Q) {
  160. /* dc values */
  161. quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
  162. cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
  163. invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
  164. cpi->Y1quant_shift[Q] + 0, quant_val);
  165. cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
  166. cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
  167. cpi->common.Y1dequant[Q][0] = quant_val;
  168. cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
  169. quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
  170. cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
  171. invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
  172. cpi->Y2quant_shift[Q] + 0, quant_val);
  173. cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
  174. cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
  175. cpi->common.Y2dequant[Q][0] = quant_val;
  176. cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
  177. quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
  178. cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
  179. invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
  180. cpi->UVquant_shift[Q] + 0, quant_val);
  181. cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
  182. cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
  183. cpi->common.UVdequant[Q][0] = quant_val;
  184. cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
  185. /* all the ac values = ; */
  186. quant_val = vp8_ac_yquant(Q);
  187. cpi->Y1quant_fast[Q][1] = (1 << 16) / quant_val;
  188. invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 1,
  189. cpi->Y1quant_shift[Q] + 1, quant_val);
  190. cpi->Y1zbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
  191. cpi->Y1round[Q][1] = (qrounding_factors[Q] * quant_val) >> 7;
  192. cpi->common.Y1dequant[Q][1] = quant_val;
  193. cpi->zrun_zbin_boost_y1[Q][1] = (quant_val * zbin_boost[1]) >> 7;
  194. quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
  195. cpi->Y2quant_fast[Q][1] = (1 << 16) / quant_val;
  196. invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 1,
  197. cpi->Y2quant_shift[Q] + 1, quant_val);
  198. cpi->Y2zbin[Q][1] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
  199. cpi->Y2round[Q][1] = (qrounding_factors_y2[Q] * quant_val) >> 7;
  200. cpi->common.Y2dequant[Q][1] = quant_val;
  201. cpi->zrun_zbin_boost_y2[Q][1] = (quant_val * zbin_boost[1]) >> 7;
  202. quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
  203. cpi->UVquant_fast[Q][1] = (1 << 16) / quant_val;
  204. invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 1,
  205. cpi->UVquant_shift[Q] + 1, quant_val);
  206. cpi->UVzbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
  207. cpi->UVround[Q][1] = (qrounding_factors[Q] * quant_val) >> 7;
  208. cpi->common.UVdequant[Q][1] = quant_val;
  209. cpi->zrun_zbin_boost_uv[Q][1] = (quant_val * zbin_boost[1]) >> 7;
  210. for (i = 2; i < 16; ++i) {
  211. cpi->Y1quant_fast[Q][i] = cpi->Y1quant_fast[Q][1];
  212. cpi->Y1quant[Q][i] = cpi->Y1quant[Q][1];
  213. cpi->Y1quant_shift[Q][i] = cpi->Y1quant_shift[Q][1];
  214. cpi->Y1zbin[Q][i] = cpi->Y1zbin[Q][1];
  215. cpi->Y1round[Q][i] = cpi->Y1round[Q][1];
  216. cpi->zrun_zbin_boost_y1[Q][i] =
  217. (cpi->common.Y1dequant[Q][1] * zbin_boost[i]) >> 7;
  218. cpi->Y2quant_fast[Q][i] = cpi->Y2quant_fast[Q][1];
  219. cpi->Y2quant[Q][i] = cpi->Y2quant[Q][1];
  220. cpi->Y2quant_shift[Q][i] = cpi->Y2quant_shift[Q][1];
  221. cpi->Y2zbin[Q][i] = cpi->Y2zbin[Q][1];
  222. cpi->Y2round[Q][i] = cpi->Y2round[Q][1];
  223. cpi->zrun_zbin_boost_y2[Q][i] =
  224. (cpi->common.Y2dequant[Q][1] * zbin_boost[i]) >> 7;
  225. cpi->UVquant_fast[Q][i] = cpi->UVquant_fast[Q][1];
  226. cpi->UVquant[Q][i] = cpi->UVquant[Q][1];
  227. cpi->UVquant_shift[Q][i] = cpi->UVquant_shift[Q][1];
  228. cpi->UVzbin[Q][i] = cpi->UVzbin[Q][1];
  229. cpi->UVround[Q][i] = cpi->UVround[Q][1];
  230. cpi->zrun_zbin_boost_uv[Q][i] =
  231. (cpi->common.UVdequant[Q][1] * zbin_boost[i]) >> 7;
  232. }
  233. }
  234. }
  235. #define ZBIN_EXTRA_Y \
  236. ((cpi->common.Y1dequant[QIndex][1] * \
  237. (x->zbin_over_quant + x->zbin_mode_boost + x->act_zbin_adj)) >> \
  238. 7)
  239. #define ZBIN_EXTRA_UV \
  240. ((cpi->common.UVdequant[QIndex][1] * \
  241. (x->zbin_over_quant + x->zbin_mode_boost + x->act_zbin_adj)) >> \
  242. 7)
  243. #define ZBIN_EXTRA_Y2 \
  244. ((cpi->common.Y2dequant[QIndex][1] * \
  245. ((x->zbin_over_quant / 2) + x->zbin_mode_boost + x->act_zbin_adj)) >> \
  246. 7)
  247. void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) {
  248. int i;
  249. int QIndex;
  250. MACROBLOCKD *xd = &x->e_mbd;
  251. int zbin_extra;
  252. /* Select the baseline MB Q index. */
  253. if (xd->segmentation_enabled) {
  254. /* Abs Value */
  255. if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) {
  256. QIndex = xd->segment_feature_data[MB_LVL_ALT_Q]
  257. [xd->mode_info_context->mbmi.segment_id];
  258. /* Delta Value */
  259. } else {
  260. QIndex = cpi->common.base_qindex +
  261. xd->segment_feature_data[MB_LVL_ALT_Q]
  262. [xd->mode_info_context->mbmi.segment_id];
  263. /* Clamp to valid range */
  264. QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;
  265. }
  266. } else {
  267. QIndex = cpi->common.base_qindex;
  268. }
  269. /* This initialization should be called at least once. Use ok_to_skip to
  270. * decide if it is ok to skip.
  271. * Before encoding a frame, this function is always called with ok_to_skip
  272. * =0, which means no skiping of calculations. The "last" values are
  273. * initialized at that time.
  274. */
  275. if (!ok_to_skip || QIndex != x->q_index) {
  276. xd->dequant_y1_dc[0] = 1;
  277. xd->dequant_y1[0] = cpi->common.Y1dequant[QIndex][0];
  278. xd->dequant_y2[0] = cpi->common.Y2dequant[QIndex][0];
  279. xd->dequant_uv[0] = cpi->common.UVdequant[QIndex][0];
  280. for (i = 1; i < 16; ++i) {
  281. xd->dequant_y1_dc[i] = xd->dequant_y1[i] =
  282. cpi->common.Y1dequant[QIndex][1];
  283. xd->dequant_y2[i] = cpi->common.Y2dequant[QIndex][1];
  284. xd->dequant_uv[i] = cpi->common.UVdequant[QIndex][1];
  285. }
  286. #if 1
  287. /*TODO: Remove dequant from BLOCKD. This is a temporary solution until
  288. * the quantizer code uses a passed in pointer to the dequant constants.
  289. * This will also require modifications to the x86 and neon assembly.
  290. * */
  291. for (i = 0; i < 16; ++i) x->e_mbd.block[i].dequant = xd->dequant_y1;
  292. for (i = 16; i < 24; ++i) x->e_mbd.block[i].dequant = xd->dequant_uv;
  293. x->e_mbd.block[24].dequant = xd->dequant_y2;
  294. #endif
  295. /* Y */
  296. zbin_extra = ZBIN_EXTRA_Y;
  297. for (i = 0; i < 16; ++i) {
  298. x->block[i].quant = cpi->Y1quant[QIndex];
  299. x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
  300. x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
  301. x->block[i].zbin = cpi->Y1zbin[QIndex];
  302. x->block[i].round = cpi->Y1round[QIndex];
  303. x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
  304. x->block[i].zbin_extra = (short)zbin_extra;
  305. }
  306. /* UV */
  307. zbin_extra = ZBIN_EXTRA_UV;
  308. for (i = 16; i < 24; ++i) {
  309. x->block[i].quant = cpi->UVquant[QIndex];
  310. x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
  311. x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
  312. x->block[i].zbin = cpi->UVzbin[QIndex];
  313. x->block[i].round = cpi->UVround[QIndex];
  314. x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
  315. x->block[i].zbin_extra = (short)zbin_extra;
  316. }
  317. /* Y2 */
  318. zbin_extra = ZBIN_EXTRA_Y2;
  319. x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
  320. x->block[24].quant = cpi->Y2quant[QIndex];
  321. x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
  322. x->block[24].zbin = cpi->Y2zbin[QIndex];
  323. x->block[24].round = cpi->Y2round[QIndex];
  324. x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
  325. x->block[24].zbin_extra = (short)zbin_extra;
  326. /* save this macroblock QIndex for vp8_update_zbin_extra() */
  327. x->q_index = QIndex;
  328. x->last_zbin_over_quant = x->zbin_over_quant;
  329. x->last_zbin_mode_boost = x->zbin_mode_boost;
  330. x->last_act_zbin_adj = x->act_zbin_adj;
  331. } else if (x->last_zbin_over_quant != x->zbin_over_quant ||
  332. x->last_zbin_mode_boost != x->zbin_mode_boost ||
  333. x->last_act_zbin_adj != x->act_zbin_adj) {
  334. /* Y */
  335. zbin_extra = ZBIN_EXTRA_Y;
  336. for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra;
  337. /* UV */
  338. zbin_extra = ZBIN_EXTRA_UV;
  339. for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra;
  340. /* Y2 */
  341. zbin_extra = ZBIN_EXTRA_Y2;
  342. x->block[24].zbin_extra = (short)zbin_extra;
  343. x->last_zbin_over_quant = x->zbin_over_quant;
  344. x->last_zbin_mode_boost = x->zbin_mode_boost;
  345. x->last_act_zbin_adj = x->act_zbin_adj;
  346. }
  347. }
  348. void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
  349. int i;
  350. int QIndex = x->q_index;
  351. int zbin_extra;
  352. /* Y */
  353. zbin_extra = ZBIN_EXTRA_Y;
  354. for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra;
  355. /* UV */
  356. zbin_extra = ZBIN_EXTRA_UV;
  357. for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra;
  358. /* Y2 */
  359. zbin_extra = ZBIN_EXTRA_Y2;
  360. x->block[24].zbin_extra = (short)zbin_extra;
  361. }
  362. #undef ZBIN_EXTRA_Y
  363. #undef ZBIN_EXTRA_UV
  364. #undef ZBIN_EXTRA_Y2
  365. void vp8cx_frame_init_quantizer(VP8_COMP *cpi) {
  366. /* Clear Zbin mode boost for default case */
  367. cpi->mb.zbin_mode_boost = 0;
  368. /* MB level quantizer setup */
  369. vp8cx_mb_init_quantizer(cpi, &cpi->mb, 0);
  370. }
  371. void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) {
  372. VP8_COMMON *cm = &cpi->common;
  373. MACROBLOCKD *mbd = &cpi->mb.e_mbd;
  374. int update = 0;
  375. int new_delta_q;
  376. int new_uv_delta_q;
  377. cm->base_qindex = Q;
  378. /* if any of the delta_q values are changing update flag has to be set */
  379. /* currently only y2dc_delta_q may change */
  380. cm->y1dc_delta_q = 0;
  381. cm->y2ac_delta_q = 0;
  382. if (Q < 4) {
  383. new_delta_q = 4 - Q;
  384. } else {
  385. new_delta_q = 0;
  386. }
  387. update |= cm->y2dc_delta_q != new_delta_q;
  388. cm->y2dc_delta_q = new_delta_q;
  389. new_uv_delta_q = 0;
  390. // For screen content, lower the q value for UV channel. For now, select
  391. // conservative delta; same delta for dc and ac, and decrease it with lower
  392. // Q, and set to 0 below some threshold. May want to condition this in
  393. // future on the variance/energy in UV channel.
  394. if (cpi->oxcf.screen_content_mode && Q > 40) {
  395. new_uv_delta_q = -(int)(0.15 * Q);
  396. // Check range: magnitude of delta is 4 bits.
  397. if (new_uv_delta_q < -15) {
  398. new_uv_delta_q = -15;
  399. }
  400. }
  401. update |= cm->uvdc_delta_q != new_uv_delta_q;
  402. cm->uvdc_delta_q = new_uv_delta_q;
  403. cm->uvac_delta_q = new_uv_delta_q;
  404. /* Set Segment specific quatizers */
  405. mbd->segment_feature_data[MB_LVL_ALT_Q][0] =
  406. cpi->segment_feature_data[MB_LVL_ALT_Q][0];
  407. mbd->segment_feature_data[MB_LVL_ALT_Q][1] =
  408. cpi->segment_feature_data[MB_LVL_ALT_Q][1];
  409. mbd->segment_feature_data[MB_LVL_ALT_Q][2] =
  410. cpi->segment_feature_data[MB_LVL_ALT_Q][2];
  411. mbd->segment_feature_data[MB_LVL_ALT_Q][3] =
  412. cpi->segment_feature_data[MB_LVL_ALT_Q][3];
  413. /* quantizer has to be reinitialized for any delta_q changes */
  414. if (update) vp8cx_init_quantizer(cpi);
  415. }