vp9_aq_variance.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <math.h>
  11. #include "vpx_ports/mem.h"
  12. #include "vpx_ports/system_state.h"
  13. #include "vp9/encoder/vp9_aq_variance.h"
  14. #include "vp9/common/vp9_seg_common.h"
  15. #include "vp9/encoder/vp9_ratectrl.h"
  16. #include "vp9/encoder/vp9_rd.h"
  17. #include "vp9/encoder/vp9_segmentation.h"
  18. #define ENERGY_MIN (-4)
  19. #define ENERGY_MAX (1)
  20. #define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1)
  21. #define ENERGY_IN_BOUNDS(energy) \
  22. assert((energy) >= ENERGY_MIN && (energy) <= ENERGY_MAX)
  23. static const double rate_ratio[MAX_SEGMENTS] = { 2.5, 2.0, 1.5, 1.0,
  24. 0.75, 1.0, 1.0, 1.0 };
  25. static const int segment_id[ENERGY_SPAN] = { 0, 1, 1, 2, 3, 4 };
  26. #define SEGMENT_ID(i) segment_id[(i)-ENERGY_MIN]
  27. DECLARE_ALIGNED(16, static const uint8_t, vp9_64_zeros[64]) = { 0 };
  28. #if CONFIG_VP9_HIGHBITDEPTH
  29. DECLARE_ALIGNED(16, static const uint16_t, vp9_highbd_64_zeros[64]) = { 0 };
  30. #endif
  31. unsigned int vp9_vaq_segment_id(int energy) {
  32. ENERGY_IN_BOUNDS(energy);
  33. return SEGMENT_ID(energy);
  34. }
  35. void vp9_vaq_frame_setup(VP9_COMP *cpi) {
  36. VP9_COMMON *cm = &cpi->common;
  37. struct segmentation *seg = &cm->seg;
  38. int i;
  39. if (frame_is_intra_only(cm) || cm->error_resilient_mode ||
  40. cpi->refresh_alt_ref_frame || cpi->force_update_segmentation ||
  41. (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
  42. vp9_enable_segmentation(seg);
  43. vp9_clearall_segfeatures(seg);
  44. seg->abs_delta = SEGMENT_DELTADATA;
  45. vpx_clear_system_state();
  46. for (i = 0; i < MAX_SEGMENTS; ++i) {
  47. int qindex_delta =
  48. vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
  49. rate_ratio[i], cm->bit_depth);
  50. // We don't allow qindex 0 in a segment if the base value is not 0.
  51. // Q index 0 (lossless) implies 4x4 encoding only and in AQ mode a segment
  52. // Q delta is sometimes applied without going back around the rd loop.
  53. // This could lead to an illegal combination of partition size and q.
  54. if ((cm->base_qindex != 0) && ((cm->base_qindex + qindex_delta) == 0)) {
  55. qindex_delta = -cm->base_qindex + 1;
  56. }
  57. // No need to enable SEG_LVL_ALT_Q for this segment.
  58. if (rate_ratio[i] == 1.0) {
  59. continue;
  60. }
  61. vp9_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta);
  62. vp9_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
  63. }
  64. }
  65. }
  66. /* TODO(agrange, paulwilkins): The block_variance calls the unoptimized versions
  67. * of variance() and highbd_8_variance(). It should not.
  68. */
  69. static void aq_variance(const uint8_t *a, int a_stride, const uint8_t *b,
  70. int b_stride, int w, int h, unsigned int *sse,
  71. int *sum) {
  72. int i, j;
  73. *sum = 0;
  74. *sse = 0;
  75. for (i = 0; i < h; i++) {
  76. for (j = 0; j < w; j++) {
  77. const int diff = a[j] - b[j];
  78. *sum += diff;
  79. *sse += diff * diff;
  80. }
  81. a += a_stride;
  82. b += b_stride;
  83. }
  84. }
  85. #if CONFIG_VP9_HIGHBITDEPTH
  86. static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
  87. const uint8_t *b8, int b_stride, int w, int h,
  88. uint64_t *sse, uint64_t *sum) {
  89. int i, j;
  90. uint16_t *a = CONVERT_TO_SHORTPTR(a8);
  91. uint16_t *b = CONVERT_TO_SHORTPTR(b8);
  92. *sum = 0;
  93. *sse = 0;
  94. for (i = 0; i < h; i++) {
  95. for (j = 0; j < w; j++) {
  96. const int diff = a[j] - b[j];
  97. *sum += diff;
  98. *sse += diff * diff;
  99. }
  100. a += a_stride;
  101. b += b_stride;
  102. }
  103. }
  104. static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
  105. const uint8_t *b8, int b_stride, int w, int h,
  106. unsigned int *sse, int *sum) {
  107. uint64_t sse_long = 0;
  108. uint64_t sum_long = 0;
  109. aq_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
  110. *sse = (unsigned int)sse_long;
  111. *sum = (int)sum_long;
  112. }
  113. #endif // CONFIG_VP9_HIGHBITDEPTH
  114. static unsigned int block_variance(VP9_COMP *cpi, MACROBLOCK *x,
  115. BLOCK_SIZE bs) {
  116. MACROBLOCKD *xd = &x->e_mbd;
  117. unsigned int var, sse;
  118. int right_overflow =
  119. (xd->mb_to_right_edge < 0) ? ((-xd->mb_to_right_edge) >> 3) : 0;
  120. int bottom_overflow =
  121. (xd->mb_to_bottom_edge < 0) ? ((-xd->mb_to_bottom_edge) >> 3) : 0;
  122. if (right_overflow || bottom_overflow) {
  123. const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow;
  124. const int bh = 8 * num_8x8_blocks_high_lookup[bs] - bottom_overflow;
  125. int avg;
  126. #if CONFIG_VP9_HIGHBITDEPTH
  127. if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  128. aq_highbd_8_variance(x->plane[0].src.buf, x->plane[0].src.stride,
  129. CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros), 0, bw, bh,
  130. &sse, &avg);
  131. sse >>= 2 * (xd->bd - 8);
  132. avg >>= (xd->bd - 8);
  133. } else {
  134. aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp9_64_zeros, 0,
  135. bw, bh, &sse, &avg);
  136. }
  137. #else
  138. aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp9_64_zeros, 0,
  139. bw, bh, &sse, &avg);
  140. #endif // CONFIG_VP9_HIGHBITDEPTH
  141. var = sse - (unsigned int)(((int64_t)avg * avg) / (bw * bh));
  142. return (unsigned int)(((uint64_t)256 * var) / (bw * bh));
  143. } else {
  144. #if CONFIG_VP9_HIGHBITDEPTH
  145. if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  146. var =
  147. cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
  148. CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros), 0, &sse);
  149. } else {
  150. var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
  151. vp9_64_zeros, 0, &sse);
  152. }
  153. #else
  154. var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
  155. vp9_64_zeros, 0, &sse);
  156. #endif // CONFIG_VP9_HIGHBITDEPTH
  157. return (unsigned int)(((uint64_t)256 * var) >> num_pels_log2_lookup[bs]);
  158. }
  159. }
  160. double vp9_log_block_var(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
  161. unsigned int var = block_variance(cpi, x, bs);
  162. vpx_clear_system_state();
  163. return log(var + 1.0);
  164. }
  165. #define DEFAULT_E_MIDPOINT 10.0
  166. int vp9_block_energy(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
  167. double energy;
  168. double energy_midpoint;
  169. vpx_clear_system_state();
  170. energy_midpoint =
  171. (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
  172. energy = vp9_log_block_var(cpi, x, bs) - energy_midpoint;
  173. return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX);
  174. }