encodeframe.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "vpx_config.h"
  11. #include "vp8_rtcd.h"
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "bitstream.h"
  14. #include "encodemb.h"
  15. #include "encodemv.h"
  16. #if CONFIG_MULTITHREAD
  17. #include "ethreading.h"
  18. #endif
  19. #include "vp8/common/common.h"
  20. #include "onyx_int.h"
  21. #include "vp8/common/extend.h"
  22. #include "vp8/common/entropymode.h"
  23. #include "vp8/common/quant_common.h"
  24. #include "segmentation.h"
  25. #include "vp8/common/setupintrarecon.h"
  26. #include "encodeintra.h"
  27. #include "vp8/common/reconinter.h"
  28. #include "rdopt.h"
  29. #include "pickinter.h"
  30. #include "vp8/common/findnearmv.h"
  31. #include <stdio.h>
  32. #include <limits.h>
  33. #include "vp8/common/invtrans.h"
  34. #include "vpx_ports/vpx_timer.h"
  35. #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
  36. #include "bitstream.h"
  37. #endif
  38. #include "encodeframe.h"
  39. extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
  40. static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x);
  41. #ifdef MODE_STATS
  42. unsigned int inter_y_modes[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  43. unsigned int inter_uv_modes[4] = { 0, 0, 0, 0 };
  44. unsigned int inter_b_modes[15] = {
  45. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  46. };
  47. unsigned int y_modes[5] = { 0, 0, 0, 0, 0 };
  48. unsigned int uv_modes[4] = { 0, 0, 0, 0 };
  49. unsigned int b_modes[14] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  50. #endif
  51. /* activity_avg must be positive, or flat regions could get a zero weight
  52. * (infinite lambda), which confounds analysis.
  53. * This also avoids the need for divide by zero checks in
  54. * vp8_activity_masking().
  55. */
  56. #define VP8_ACTIVITY_AVG_MIN (64)
  57. /* This is used as a reference when computing the source variance for the
  58. * purposes of activity masking.
  59. * Eventually this should be replaced by custom no-reference routines,
  60. * which will be faster.
  61. */
  62. static const unsigned char VP8_VAR_OFFS[16] = { 128, 128, 128, 128, 128, 128,
  63. 128, 128, 128, 128, 128, 128,
  64. 128, 128, 128, 128 };
  65. /* Original activity measure from Tim T's code. */
  66. static unsigned int tt_activity_measure(VP8_COMP *cpi, MACROBLOCK *x) {
  67. unsigned int act;
  68. unsigned int sse;
  69. (void)cpi;
  70. /* TODO: This could also be done over smaller areas (8x8), but that would
  71. * require extensive changes elsewhere, as lambda is assumed to be fixed
  72. * over an entire MB in most of the code.
  73. * Another option is to compute four 8x8 variances, and pick a single
  74. * lambda using a non-linear combination (e.g., the smallest, or second
  75. * smallest, etc.).
  76. */
  77. act = vpx_variance16x16(x->src.y_buffer, x->src.y_stride, VP8_VAR_OFFS, 0,
  78. &sse);
  79. act = act << 4;
  80. /* If the region is flat, lower the activity some more. */
  81. if (act < 8 << 12) act = act < 5 << 12 ? act : 5 << 12;
  82. return act;
  83. }
  84. /* Stub for alternative experimental activity measures. */
  85. static unsigned int alt_activity_measure(VP8_COMP *cpi, MACROBLOCK *x,
  86. int use_dc_pred) {
  87. return vp8_encode_intra(cpi, x, use_dc_pred);
  88. }
  89. /* Measure the activity of the current macroblock
  90. * What we measure here is TBD so abstracted to this function
  91. */
  92. #define ALT_ACT_MEASURE 1
  93. static unsigned int mb_activity_measure(VP8_COMP *cpi, MACROBLOCK *x,
  94. int mb_row, int mb_col) {
  95. unsigned int mb_activity;
  96. if (ALT_ACT_MEASURE) {
  97. int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
  98. /* Or use and alternative. */
  99. mb_activity = alt_activity_measure(cpi, x, use_dc_pred);
  100. } else {
  101. /* Original activity measure from Tim T's code. */
  102. mb_activity = tt_activity_measure(cpi, x);
  103. }
  104. if (mb_activity < VP8_ACTIVITY_AVG_MIN) mb_activity = VP8_ACTIVITY_AVG_MIN;
  105. return mb_activity;
  106. }
  107. /* Calculate an "average" mb activity value for the frame */
  108. #define ACT_MEDIAN 0
  109. static void calc_av_activity(VP8_COMP *cpi, int64_t activity_sum) {
  110. #if ACT_MEDIAN
  111. /* Find median: Simple n^2 algorithm for experimentation */
  112. {
  113. unsigned int median;
  114. unsigned int i, j;
  115. unsigned int *sortlist;
  116. unsigned int tmp;
  117. /* Create a list to sort to */
  118. CHECK_MEM_ERROR(sortlist,
  119. vpx_calloc(sizeof(unsigned int), cpi->common.MBs));
  120. /* Copy map to sort list */
  121. memcpy(sortlist, cpi->mb_activity_map,
  122. sizeof(unsigned int) * cpi->common.MBs);
  123. /* Ripple each value down to its correct position */
  124. for (i = 1; i < cpi->common.MBs; ++i) {
  125. for (j = i; j > 0; j--) {
  126. if (sortlist[j] < sortlist[j - 1]) {
  127. /* Swap values */
  128. tmp = sortlist[j - 1];
  129. sortlist[j - 1] = sortlist[j];
  130. sortlist[j] = tmp;
  131. } else
  132. break;
  133. }
  134. }
  135. /* Even number MBs so estimate median as mean of two either side. */
  136. median = (1 + sortlist[cpi->common.MBs >> 1] +
  137. sortlist[(cpi->common.MBs >> 1) + 1]) >>
  138. 1;
  139. cpi->activity_avg = median;
  140. vpx_free(sortlist);
  141. }
  142. #else
  143. /* Simple mean for now */
  144. cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs);
  145. #endif
  146. if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN) {
  147. cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
  148. }
  149. /* Experimental code: return fixed value normalized for several clips */
  150. if (ALT_ACT_MEASURE) cpi->activity_avg = 100000;
  151. }
  152. #define USE_ACT_INDEX 0
  153. #define OUTPUT_NORM_ACT_STATS 0
  154. #if USE_ACT_INDEX
  155. /* Calculate and activity index for each mb */
  156. static void calc_activity_index(VP8_COMP *cpi, MACROBLOCK *x) {
  157. VP8_COMMON *const cm = &cpi->common;
  158. int mb_row, mb_col;
  159. int64_t act;
  160. int64_t a;
  161. int64_t b;
  162. #if OUTPUT_NORM_ACT_STATS
  163. FILE *f = fopen("norm_act.stt", "a");
  164. fprintf(f, "\n%12d\n", cpi->activity_avg);
  165. #endif
  166. /* Reset pointers to start of activity map */
  167. x->mb_activity_ptr = cpi->mb_activity_map;
  168. /* Calculate normalized mb activity number. */
  169. for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
  170. /* for each macroblock col in image */
  171. for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
  172. /* Read activity from the map */
  173. act = *(x->mb_activity_ptr);
  174. /* Calculate a normalized activity number */
  175. a = act + 4 * cpi->activity_avg;
  176. b = 4 * act + cpi->activity_avg;
  177. if (b >= a)
  178. *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
  179. else
  180. *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
  181. #if OUTPUT_NORM_ACT_STATS
  182. fprintf(f, " %6d", *(x->mb_activity_ptr));
  183. #endif
  184. /* Increment activity map pointers */
  185. x->mb_activity_ptr++;
  186. }
  187. #if OUTPUT_NORM_ACT_STATS
  188. fprintf(f, "\n");
  189. #endif
  190. }
  191. #if OUTPUT_NORM_ACT_STATS
  192. fclose(f);
  193. #endif
  194. }
  195. #endif
  196. /* Loop through all MBs. Note activity of each, average activity and
  197. * calculate a normalized activity for each
  198. */
  199. static void build_activity_map(VP8_COMP *cpi) {
  200. MACROBLOCK *const x = &cpi->mb;
  201. MACROBLOCKD *xd = &x->e_mbd;
  202. VP8_COMMON *const cm = &cpi->common;
  203. #if ALT_ACT_MEASURE
  204. YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
  205. int recon_yoffset;
  206. int recon_y_stride = new_yv12->y_stride;
  207. #endif
  208. int mb_row, mb_col;
  209. unsigned int mb_activity;
  210. int64_t activity_sum = 0;
  211. /* for each macroblock row in image */
  212. for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
  213. #if ALT_ACT_MEASURE
  214. /* reset above block coeffs */
  215. xd->up_available = (mb_row != 0);
  216. recon_yoffset = (mb_row * recon_y_stride * 16);
  217. #endif
  218. /* for each macroblock col in image */
  219. for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
  220. #if ALT_ACT_MEASURE
  221. xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
  222. xd->left_available = (mb_col != 0);
  223. recon_yoffset += 16;
  224. #endif
  225. /* Copy current mb to a buffer */
  226. vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
  227. /* measure activity */
  228. mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
  229. /* Keep frame sum */
  230. activity_sum += mb_activity;
  231. /* Store MB level activity details. */
  232. *x->mb_activity_ptr = mb_activity;
  233. /* Increment activity map pointer */
  234. x->mb_activity_ptr++;
  235. /* adjust to the next column of source macroblocks */
  236. x->src.y_buffer += 16;
  237. }
  238. /* adjust to the next row of mbs */
  239. x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
  240. #if ALT_ACT_MEASURE
  241. /* extend the recon for intra prediction */
  242. vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8,
  243. xd->dst.v_buffer + 8);
  244. #endif
  245. }
  246. /* Calculate an "average" MB activity */
  247. calc_av_activity(cpi, activity_sum);
  248. #if USE_ACT_INDEX
  249. /* Calculate an activity index number of each mb */
  250. calc_activity_index(cpi, x);
  251. #endif
  252. }
  253. /* Macroblock activity masking */
  254. void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) {
  255. #if USE_ACT_INDEX
  256. x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
  257. x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
  258. x->errorperbit += (x->errorperbit == 0);
  259. #else
  260. int64_t a;
  261. int64_t b;
  262. int64_t act = *(x->mb_activity_ptr);
  263. /* Apply the masking to the RD multiplier. */
  264. a = act + (2 * cpi->activity_avg);
  265. b = (2 * act) + cpi->activity_avg;
  266. x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a);
  267. x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
  268. x->errorperbit += (x->errorperbit == 0);
  269. #endif
  270. /* Activity based Zbin adjustment */
  271. adjust_act_zbin(cpi, x);
  272. }
  273. static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row,
  274. MACROBLOCK *x, MACROBLOCKD *xd, TOKENEXTRA **tp,
  275. int *segment_counts, int *totalrate) {
  276. int recon_yoffset, recon_uvoffset;
  277. int mb_col;
  278. int ref_fb_idx = cm->lst_fb_idx;
  279. int dst_fb_idx = cm->new_fb_idx;
  280. int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
  281. int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
  282. int map_index = (mb_row * cpi->common.mb_cols);
  283. #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
  284. const int num_part = (1 << cm->multi_token_partition);
  285. TOKENEXTRA *tp_start = cpi->tok;
  286. vp8_writer *w;
  287. #endif
  288. #if CONFIG_MULTITHREAD
  289. const int nsync = cpi->mt_sync_range;
  290. vpx_atomic_int rightmost_col = VPX_ATOMIC_INIT(cm->mb_cols + nsync);
  291. const vpx_atomic_int *last_row_current_mb_col;
  292. vpx_atomic_int *current_mb_col = &cpi->mt_current_mb_col[mb_row];
  293. if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0 && mb_row != 0) {
  294. last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
  295. } else {
  296. last_row_current_mb_col = &rightmost_col;
  297. }
  298. #endif
  299. #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
  300. if (num_part > 1)
  301. w = &cpi->bc[1 + (mb_row % num_part)];
  302. else
  303. w = &cpi->bc[1];
  304. #endif
  305. /* reset above block coeffs */
  306. xd->above_context = cm->above_context;
  307. xd->up_available = (mb_row != 0);
  308. recon_yoffset = (mb_row * recon_y_stride * 16);
  309. recon_uvoffset = (mb_row * recon_uv_stride * 8);
  310. cpi->tplist[mb_row].start = *tp;
  311. /* printf("Main mb_row = %d\n", mb_row); */
  312. /* Distance of Mb to the top & bottom edges, specified in 1/8th pel
  313. * units as they are always compared to values that are in 1/8th pel
  314. */
  315. xd->mb_to_top_edge = -((mb_row * 16) << 3);
  316. xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
  317. /* Set up limit values for vertical motion vector components
  318. * to prevent them extending beyond the UMV borders
  319. */
  320. x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
  321. x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
  322. /* Set the mb activity pointer to the start of the row. */
  323. x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
  324. /* for each macroblock col in image */
  325. for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
  326. #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
  327. *tp = cpi->tok;
  328. #endif
  329. /* Distance of Mb to the left & right edges, specified in
  330. * 1/8th pel units as they are always compared to values
  331. * that are in 1/8th pel units
  332. */
  333. xd->mb_to_left_edge = -((mb_col * 16) << 3);
  334. xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
  335. /* Set up limit values for horizontal motion vector components
  336. * to prevent them extending beyond the UMV borders
  337. */
  338. x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
  339. x->mv_col_max =
  340. ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
  341. xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
  342. xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
  343. xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
  344. xd->left_available = (mb_col != 0);
  345. x->rddiv = cpi->RDDIV;
  346. x->rdmult = cpi->RDMULT;
  347. /* Copy current mb to a buffer */
  348. vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
  349. #if CONFIG_MULTITHREAD
  350. if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) {
  351. if (((mb_col - 1) % nsync) == 0) {
  352. vpx_atomic_store_release(current_mb_col, mb_col - 1);
  353. }
  354. if (mb_row && !(mb_col & (nsync - 1))) {
  355. vp8_atomic_spin_wait(mb_col, last_row_current_mb_col, nsync);
  356. }
  357. }
  358. #endif
  359. if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x);
  360. /* Is segmentation enabled */
  361. /* MB level adjustment to quantizer */
  362. if (xd->segmentation_enabled) {
  363. /* Code to set segment id in xd->mbmi.segment_id for current MB
  364. * (with range checking)
  365. */
  366. if (cpi->segmentation_map[map_index + mb_col] <= 3) {
  367. xd->mode_info_context->mbmi.segment_id =
  368. cpi->segmentation_map[map_index + mb_col];
  369. } else {
  370. xd->mode_info_context->mbmi.segment_id = 0;
  371. }
  372. vp8cx_mb_init_quantizer(cpi, x, 1);
  373. } else {
  374. /* Set to Segment 0 by default */
  375. xd->mode_info_context->mbmi.segment_id = 0;
  376. }
  377. x->active_ptr = cpi->active_map + map_index + mb_col;
  378. if (cm->frame_type == KEY_FRAME) {
  379. *totalrate += vp8cx_encode_intra_macroblock(cpi, x, tp);
  380. #ifdef MODE_STATS
  381. y_modes[xd->mbmi.mode]++;
  382. #endif
  383. } else {
  384. *totalrate += vp8cx_encode_inter_macroblock(
  385. cpi, x, tp, recon_yoffset, recon_uvoffset, mb_row, mb_col);
  386. #ifdef MODE_STATS
  387. inter_y_modes[xd->mbmi.mode]++;
  388. if (xd->mbmi.mode == SPLITMV) {
  389. int b;
  390. for (b = 0; b < xd->mbmi.partition_count; ++b) {
  391. inter_b_modes[x->partition->bmi[b].mode]++;
  392. }
  393. }
  394. #endif
  395. // Keep track of how many (consecutive) times a block is coded
  396. // as ZEROMV_LASTREF, for base layer frames.
  397. // Reset to 0 if its coded as anything else.
  398. if (cpi->current_layer == 0) {
  399. if (xd->mode_info_context->mbmi.mode == ZEROMV &&
  400. xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
  401. // Increment, check for wrap-around.
  402. if (cpi->consec_zero_last[map_index + mb_col] < 255) {
  403. cpi->consec_zero_last[map_index + mb_col] += 1;
  404. }
  405. if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) {
  406. cpi->consec_zero_last_mvbias[map_index + mb_col] += 1;
  407. }
  408. } else {
  409. cpi->consec_zero_last[map_index + mb_col] = 0;
  410. cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
  411. }
  412. if (x->zero_last_dot_suppress) {
  413. cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
  414. }
  415. }
  416. /* Special case code for cyclic refresh
  417. * If cyclic update enabled then copy xd->mbmi.segment_id; (which
  418. * may have been updated based on mode during
  419. * vp8cx_encode_inter_macroblock()) back into the global
  420. * segmentation map
  421. */
  422. if ((cpi->current_layer == 0) &&
  423. (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)) {
  424. cpi->segmentation_map[map_index + mb_col] =
  425. xd->mode_info_context->mbmi.segment_id;
  426. /* If the block has been refreshed mark it as clean (the
  427. * magnitude of the -ve influences how long it will be before
  428. * we consider another refresh):
  429. * Else if it was coded (last frame 0,0) and has not already
  430. * been refreshed then mark it as a candidate for cleanup
  431. * next time (marked 0) else mark it as dirty (1).
  432. */
  433. if (xd->mode_info_context->mbmi.segment_id) {
  434. cpi->cyclic_refresh_map[map_index + mb_col] = -1;
  435. } else if ((xd->mode_info_context->mbmi.mode == ZEROMV) &&
  436. (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) {
  437. if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) {
  438. cpi->cyclic_refresh_map[map_index + mb_col] = 0;
  439. }
  440. } else {
  441. cpi->cyclic_refresh_map[map_index + mb_col] = 1;
  442. }
  443. }
  444. }
  445. cpi->tplist[mb_row].stop = *tp;
  446. #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
  447. /* pack tokens for this MB */
  448. {
  449. int tok_count = *tp - tp_start;
  450. vp8_pack_tokens(w, tp_start, tok_count);
  451. }
  452. #endif
  453. /* Increment pointer into gf usage flags structure. */
  454. x->gf_active_ptr++;
  455. /* Increment the activity mask pointers. */
  456. x->mb_activity_ptr++;
  457. /* adjust to the next column of macroblocks */
  458. x->src.y_buffer += 16;
  459. x->src.u_buffer += 8;
  460. x->src.v_buffer += 8;
  461. recon_yoffset += 16;
  462. recon_uvoffset += 8;
  463. /* Keep track of segment usage */
  464. segment_counts[xd->mode_info_context->mbmi.segment_id]++;
  465. /* skip to next mb */
  466. xd->mode_info_context++;
  467. x->partition_info++;
  468. xd->above_context++;
  469. }
  470. /* extend the recon for intra prediction */
  471. vp8_extend_mb_row(&cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16,
  472. xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
  473. #if CONFIG_MULTITHREAD
  474. if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) {
  475. vpx_atomic_store_release(current_mb_col,
  476. vpx_atomic_load_acquire(&rightmost_col));
  477. }
  478. #endif
  479. /* this is to account for the border */
  480. xd->mode_info_context++;
  481. x->partition_info++;
  482. }
  483. static void init_encode_frame_mb_context(VP8_COMP *cpi) {
  484. MACROBLOCK *const x = &cpi->mb;
  485. VP8_COMMON *const cm = &cpi->common;
  486. MACROBLOCKD *const xd = &x->e_mbd;
  487. /* GF active flags data structure */
  488. x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
  489. /* Activity map pointer */
  490. x->mb_activity_ptr = cpi->mb_activity_map;
  491. x->act_zbin_adj = 0;
  492. x->partition_info = x->pi;
  493. xd->mode_info_context = cm->mi;
  494. xd->mode_info_stride = cm->mode_info_stride;
  495. xd->frame_type = cm->frame_type;
  496. /* reset intra mode contexts */
  497. if (cm->frame_type == KEY_FRAME) vp8_init_mbmode_probs(cm);
  498. /* Copy data over into macro block data structures. */
  499. x->src = *cpi->Source;
  500. xd->pre = cm->yv12_fb[cm->lst_fb_idx];
  501. xd->dst = cm->yv12_fb[cm->new_fb_idx];
  502. /* set up frame for intra coded blocks */
  503. vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
  504. vp8_build_block_offsets(x);
  505. xd->mode_info_context->mbmi.mode = DC_PRED;
  506. xd->mode_info_context->mbmi.uv_mode = DC_PRED;
  507. xd->left_context = &cm->left_context;
  508. x->mvc = cm->fc.mvc;
  509. memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
  510. /* Special case treatment when GF and ARF are not sensible options
  511. * for reference
  512. */
  513. if (cpi->ref_frame_flags == VP8_LAST_FRAME) {
  514. vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 255,
  515. 128);
  516. } else if ((cpi->oxcf.number_of_layers > 1) &&
  517. (cpi->ref_frame_flags == VP8_GOLD_FRAME)) {
  518. vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 255);
  519. } else if ((cpi->oxcf.number_of_layers > 1) &&
  520. (cpi->ref_frame_flags == VP8_ALTR_FRAME)) {
  521. vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 1);
  522. } else {
  523. vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded,
  524. cpi->prob_last_coded, cpi->prob_gf_coded);
  525. }
  526. xd->fullpixel_mask = 0xffffffff;
  527. if (cm->full_pixel) xd->fullpixel_mask = 0xfffffff8;
  528. vp8_zero(x->coef_counts);
  529. vp8_zero(x->ymode_count);
  530. vp8_zero(x->uv_mode_count) x->prediction_error = 0;
  531. x->intra_error = 0;
  532. vp8_zero(x->count_mb_ref_frame_usage);
  533. }
  534. #if CONFIG_MULTITHREAD
  535. static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread) {
  536. int i = 0;
  537. do {
  538. int j = 0;
  539. do {
  540. int k = 0;
  541. do {
  542. /* at every context */
  543. /* calc probs and branch cts for this frame only */
  544. int t = 0; /* token/prob index */
  545. do {
  546. x->coef_counts[i][j][k][t] += x_thread->coef_counts[i][j][k][t];
  547. } while (++t < ENTROPY_NODES);
  548. } while (++k < PREV_COEF_CONTEXTS);
  549. } while (++j < COEF_BANDS);
  550. } while (++i < BLOCK_TYPES);
  551. }
  552. #endif // CONFIG_MULTITHREAD
  553. void vp8_encode_frame(VP8_COMP *cpi) {
  554. int mb_row;
  555. MACROBLOCK *const x = &cpi->mb;
  556. VP8_COMMON *const cm = &cpi->common;
  557. MACROBLOCKD *const xd = &x->e_mbd;
  558. TOKENEXTRA *tp = cpi->tok;
  559. int segment_counts[MAX_MB_SEGMENTS];
  560. int totalrate;
  561. #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
  562. BOOL_CODER *bc = &cpi->bc[1]; /* bc[0] is for control partition */
  563. const int num_part = (1 << cm->multi_token_partition);
  564. #endif
  565. memset(segment_counts, 0, sizeof(segment_counts));
  566. totalrate = 0;
  567. if (cpi->compressor_speed == 2) {
  568. if (cpi->oxcf.cpu_used < 0) {
  569. cpi->Speed = -(cpi->oxcf.cpu_used);
  570. } else {
  571. vp8_auto_select_speed(cpi);
  572. }
  573. }
  574. /* Functions setup for all frame types so we can use MC in AltRef */
  575. if (!cm->use_bilinear_mc_filter) {
  576. xd->subpixel_predict = vp8_sixtap_predict4x4;
  577. xd->subpixel_predict8x4 = vp8_sixtap_predict8x4;
  578. xd->subpixel_predict8x8 = vp8_sixtap_predict8x8;
  579. xd->subpixel_predict16x16 = vp8_sixtap_predict16x16;
  580. } else {
  581. xd->subpixel_predict = vp8_bilinear_predict4x4;
  582. xd->subpixel_predict8x4 = vp8_bilinear_predict8x4;
  583. xd->subpixel_predict8x8 = vp8_bilinear_predict8x8;
  584. xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
  585. }
  586. cpi->mb.skip_true_count = 0;
  587. cpi->tok_count = 0;
  588. #if 0
  589. /* Experimental code */
  590. cpi->frame_distortion = 0;
  591. cpi->last_mb_distortion = 0;
  592. #endif
  593. xd->mode_info_context = cm->mi;
  594. vp8_zero(cpi->mb.MVcount);
  595. vp8cx_frame_init_quantizer(cpi);
  596. vp8_initialize_rd_consts(cpi, x,
  597. vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
  598. vp8cx_initialize_me_consts(cpi, cm->base_qindex);
  599. if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
  600. /* Initialize encode frame context. */
  601. init_encode_frame_mb_context(cpi);
  602. /* Build a frame level activity map */
  603. build_activity_map(cpi);
  604. }
  605. /* re-init encode frame context. */
  606. init_encode_frame_mb_context(cpi);
  607. #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
  608. {
  609. int i;
  610. for (i = 0; i < num_part; ++i) {
  611. vp8_start_encode(&bc[i], cpi->partition_d[i + 1],
  612. cpi->partition_d_end[i + 1]);
  613. bc[i].error = &cm->error;
  614. }
  615. }
  616. #endif
  617. {
  618. struct vpx_usec_timer emr_timer;
  619. vpx_usec_timer_start(&emr_timer);
  620. #if CONFIG_MULTITHREAD
  621. if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
  622. int i;
  623. vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei,
  624. cpi->encoding_thread_count);
  625. for (i = 0; i < cm->mb_rows; ++i)
  626. vpx_atomic_store_release(&cpi->mt_current_mb_col[i], -1);
  627. for (i = 0; i < cpi->encoding_thread_count; ++i) {
  628. sem_post(&cpi->h_event_start_encoding[i]);
  629. }
  630. for (mb_row = 0; mb_row < cm->mb_rows;
  631. mb_row += (cpi->encoding_thread_count + 1)) {
  632. vp8_zero(cm->left_context)
  633. #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
  634. tp = cpi->tok;
  635. #else
  636. tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
  637. #endif
  638. encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
  639. /* adjust to the next row of mbs */
  640. x->src.y_buffer +=
  641. 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) -
  642. 16 * cm->mb_cols;
  643. x->src.u_buffer +=
  644. 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) -
  645. 8 * cm->mb_cols;
  646. x->src.v_buffer +=
  647. 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) -
  648. 8 * cm->mb_cols;
  649. xd->mode_info_context +=
  650. xd->mode_info_stride * cpi->encoding_thread_count;
  651. x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
  652. x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count;
  653. }
  654. /* Wait for all the threads to finish. */
  655. for (i = 0; i < cpi->encoding_thread_count; ++i) {
  656. sem_wait(&cpi->h_event_end_encoding[i]);
  657. }
  658. for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
  659. cpi->tok_count += (unsigned int)(cpi->tplist[mb_row].stop -
  660. cpi->tplist[mb_row].start);
  661. }
  662. if (xd->segmentation_enabled) {
  663. int j;
  664. if (xd->segmentation_enabled) {
  665. for (i = 0; i < cpi->encoding_thread_count; ++i) {
  666. for (j = 0; j < 4; ++j) {
  667. segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
  668. }
  669. }
  670. }
  671. }
  672. for (i = 0; i < cpi->encoding_thread_count; ++i) {
  673. int mode_count;
  674. int c_idx;
  675. totalrate += cpi->mb_row_ei[i].totalrate;
  676. cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count;
  677. for (mode_count = 0; mode_count < VP8_YMODES; ++mode_count) {
  678. cpi->mb.ymode_count[mode_count] +=
  679. cpi->mb_row_ei[i].mb.ymode_count[mode_count];
  680. }
  681. for (mode_count = 0; mode_count < VP8_UV_MODES; ++mode_count) {
  682. cpi->mb.uv_mode_count[mode_count] +=
  683. cpi->mb_row_ei[i].mb.uv_mode_count[mode_count];
  684. }
  685. for (c_idx = 0; c_idx < MVvals; ++c_idx) {
  686. cpi->mb.MVcount[0][c_idx] += cpi->mb_row_ei[i].mb.MVcount[0][c_idx];
  687. cpi->mb.MVcount[1][c_idx] += cpi->mb_row_ei[i].mb.MVcount[1][c_idx];
  688. }
  689. cpi->mb.prediction_error += cpi->mb_row_ei[i].mb.prediction_error;
  690. cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error;
  691. for (c_idx = 0; c_idx < MAX_REF_FRAMES; ++c_idx) {
  692. cpi->mb.count_mb_ref_frame_usage[c_idx] +=
  693. cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx];
  694. }
  695. for (c_idx = 0; c_idx < MAX_ERROR_BINS; ++c_idx) {
  696. cpi->mb.error_bins[c_idx] += cpi->mb_row_ei[i].mb.error_bins[c_idx];
  697. }
  698. /* add up counts for each thread */
  699. sum_coef_counts(x, &cpi->mb_row_ei[i].mb);
  700. }
  701. } else
  702. #endif // CONFIG_MULTITHREAD
  703. {
  704. /* for each macroblock row in image */
  705. for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
  706. vp8_zero(cm->left_context)
  707. #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
  708. tp = cpi->tok;
  709. #endif
  710. encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
  711. /* adjust to the next row of mbs */
  712. x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
  713. x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
  714. x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
  715. }
  716. cpi->tok_count = (unsigned int)(tp - cpi->tok);
  717. }
  718. #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
  719. {
  720. int i;
  721. for (i = 0; i < num_part; ++i) {
  722. vp8_stop_encode(&bc[i]);
  723. cpi->partition_sz[i + 1] = bc[i].pos;
  724. }
  725. }
  726. #endif
  727. vpx_usec_timer_mark(&emr_timer);
  728. cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
  729. }
  730. // Work out the segment probabilities if segmentation is enabled
  731. // and needs to be updated
  732. if (xd->segmentation_enabled && xd->update_mb_segmentation_map) {
  733. int tot_count;
  734. int i;
  735. /* Set to defaults */
  736. memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
  737. tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] +
  738. segment_counts[3];
  739. if (tot_count) {
  740. xd->mb_segment_tree_probs[0] =
  741. ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
  742. tot_count = segment_counts[0] + segment_counts[1];
  743. if (tot_count > 0) {
  744. xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
  745. }
  746. tot_count = segment_counts[2] + segment_counts[3];
  747. if (tot_count > 0) {
  748. xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
  749. }
  750. /* Zero probabilities not allowed */
  751. for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
  752. if (xd->mb_segment_tree_probs[i] == 0) xd->mb_segment_tree_probs[i] = 1;
  753. }
  754. }
  755. }
  756. /* projected_frame_size in units of BYTES */
  757. cpi->projected_frame_size = totalrate >> 8;
  758. /* Make a note of the percentage MBs coded Intra. */
  759. if (cm->frame_type == KEY_FRAME) {
  760. cpi->this_frame_percent_intra = 100;
  761. } else {
  762. int tot_modes;
  763. tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] +
  764. cpi->mb.count_mb_ref_frame_usage[LAST_FRAME] +
  765. cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME] +
  766. cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
  767. if (tot_modes) {
  768. cpi->this_frame_percent_intra =
  769. cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
  770. }
  771. }
  772. #if !CONFIG_REALTIME_ONLY
  773. /* Adjust the projected reference frame usage probability numbers to
  774. * reflect what we have just seen. This may be useful when we make
  775. * multiple iterations of the recode loop rather than continuing to use
  776. * values from the previous frame.
  777. */
  778. if ((cm->frame_type != KEY_FRAME) &&
  779. ((cpi->oxcf.number_of_layers > 1) ||
  780. (!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame))) {
  781. vp8_convert_rfct_to_prob(cpi);
  782. }
  783. #endif
  784. }
  785. void vp8_setup_block_ptrs(MACROBLOCK *x) {
  786. int r, c;
  787. int i;
  788. for (r = 0; r < 4; ++r) {
  789. for (c = 0; c < 4; ++c) {
  790. x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
  791. }
  792. }
  793. for (r = 0; r < 2; ++r) {
  794. for (c = 0; c < 2; ++c) {
  795. x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
  796. }
  797. }
  798. for (r = 0; r < 2; ++r) {
  799. for (c = 0; c < 2; ++c) {
  800. x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
  801. }
  802. }
  803. x->block[24].src_diff = x->src_diff + 384;
  804. for (i = 0; i < 25; ++i) {
  805. x->block[i].coeff = x->coeff + i * 16;
  806. }
  807. }
  808. void vp8_build_block_offsets(MACROBLOCK *x) {
  809. int block = 0;
  810. int br, bc;
  811. vp8_build_block_doffsets(&x->e_mbd);
  812. /* y blocks */
  813. x->thismb_ptr = &x->thismb[0];
  814. for (br = 0; br < 4; ++br) {
  815. for (bc = 0; bc < 4; ++bc) {
  816. BLOCK *this_block = &x->block[block];
  817. this_block->base_src = &x->thismb_ptr;
  818. this_block->src_stride = 16;
  819. this_block->src = 4 * br * 16 + 4 * bc;
  820. ++block;
  821. }
  822. }
  823. /* u blocks */
  824. for (br = 0; br < 2; ++br) {
  825. for (bc = 0; bc < 2; ++bc) {
  826. BLOCK *this_block = &x->block[block];
  827. this_block->base_src = &x->src.u_buffer;
  828. this_block->src_stride = x->src.uv_stride;
  829. this_block->src = 4 * br * this_block->src_stride + 4 * bc;
  830. ++block;
  831. }
  832. }
  833. /* v blocks */
  834. for (br = 0; br < 2; ++br) {
  835. for (bc = 0; bc < 2; ++bc) {
  836. BLOCK *this_block = &x->block[block];
  837. this_block->base_src = &x->src.v_buffer;
  838. this_block->src_stride = x->src.uv_stride;
  839. this_block->src = 4 * br * this_block->src_stride + 4 * bc;
  840. ++block;
  841. }
  842. }
  843. }
  844. static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) {
  845. const MACROBLOCKD *xd = &x->e_mbd;
  846. const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
  847. const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
  848. #ifdef MODE_STATS
  849. const int is_key = cpi->common.frame_type == KEY_FRAME;
  850. ++(is_key ? uv_modes : inter_uv_modes)[uvm];
  851. if (m == B_PRED) {
  852. unsigned int *const bct = is_key ? b_modes : inter_b_modes;
  853. int b = 0;
  854. do {
  855. ++bct[xd->block[b].bmi.mode];
  856. } while (++b < 16);
  857. }
  858. #else
  859. (void)cpi;
  860. #endif
  861. ++x->ymode_count[m];
  862. ++x->uv_mode_count[uvm];
  863. }
  864. /* Experimental stub function to create a per MB zbin adjustment based on
  865. * some previously calculated measure of MB activity.
  866. */
  867. static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x) {
  868. #if USE_ACT_INDEX
  869. x->act_zbin_adj = *(x->mb_activity_ptr);
  870. #else
  871. int64_t a;
  872. int64_t b;
  873. int64_t act = *(x->mb_activity_ptr);
  874. /* Apply the masking to the RD multiplier. */
  875. a = act + 4 * cpi->activity_avg;
  876. b = 4 * act + cpi->activity_avg;
  877. if (act > cpi->activity_avg) {
  878. x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1;
  879. } else {
  880. x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b);
  881. }
  882. #endif
  883. }
  884. int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
  885. TOKENEXTRA **t) {
  886. MACROBLOCKD *xd = &x->e_mbd;
  887. int rate;
  888. if (cpi->sf.RD && cpi->compressor_speed != 2) {
  889. vp8_rd_pick_intra_mode(x, &rate);
  890. } else {
  891. vp8_pick_intra_mode(x, &rate);
  892. }
  893. if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
  894. adjust_act_zbin(cpi, x);
  895. vp8_update_zbin_extra(cpi, x);
  896. }
  897. if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) {
  898. vp8_encode_intra4x4mby(x);
  899. } else {
  900. vp8_encode_intra16x16mby(x);
  901. }
  902. vp8_encode_intra16x16mbuv(x);
  903. sum_intra_stats(cpi, x);
  904. vp8_tokenize_mb(cpi, x, t);
  905. if (xd->mode_info_context->mbmi.mode != B_PRED) vp8_inverse_transform_mby(xd);
  906. vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv,
  907. xd->dst.u_buffer, xd->dst.v_buffer,
  908. xd->dst.uv_stride, xd->eobs + 16);
  909. return rate;
  910. }
  911. #ifdef SPEEDSTATS
  912. extern int cnt_pm;
  913. #endif
  914. extern void vp8_fix_contexts(MACROBLOCKD *x);
  915. int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
  916. int recon_yoffset, int recon_uvoffset,
  917. int mb_row, int mb_col) {
  918. MACROBLOCKD *const xd = &x->e_mbd;
  919. int intra_error = 0;
  920. int rate;
  921. int distortion;
  922. x->skip = 0;
  923. if (xd->segmentation_enabled) {
  924. x->encode_breakout =
  925. cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
  926. } else {
  927. x->encode_breakout = cpi->oxcf.encode_breakout;
  928. }
  929. #if CONFIG_TEMPORAL_DENOISING
  930. /* Reset the best sse mode/mv for each macroblock. */
  931. x->best_reference_frame = INTRA_FRAME;
  932. x->best_zeromv_reference_frame = INTRA_FRAME;
  933. x->best_sse_inter_mode = 0;
  934. x->best_sse_mv.as_int = 0;
  935. x->need_to_clamp_best_mvs = 0;
  936. #endif
  937. if (cpi->sf.RD) {
  938. int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
  939. /* Are we using the fast quantizer for the mode selection? */
  940. if (cpi->sf.use_fastquant_for_pick) {
  941. x->quantize_b = vp8_fast_quantize_b;
  942. /* the fast quantizer does not use zbin_extra, so
  943. * do not recalculate */
  944. x->zbin_mode_boost_enabled = 0;
  945. }
  946. vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
  947. &distortion, &intra_error, mb_row, mb_col);
  948. /* switch back to the regular quantizer for the encode */
  949. if (cpi->sf.improved_quant) {
  950. x->quantize_b = vp8_regular_quantize_b;
  951. }
  952. /* restore cpi->zbin_mode_boost_enabled */
  953. x->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
  954. } else {
  955. vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
  956. &distortion, &intra_error, mb_row, mb_col);
  957. }
  958. x->prediction_error += distortion;
  959. x->intra_error += intra_error;
  960. if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
  961. /* Adjust the zbin based on this MB rate. */
  962. adjust_act_zbin(cpi, x);
  963. }
  964. #if 0
  965. /* Experimental RD code */
  966. cpi->frame_distortion += distortion;
  967. cpi->last_mb_distortion = distortion;
  968. #endif
  969. /* MB level adjutment to quantizer setup */
  970. if (xd->segmentation_enabled) {
  971. /* If cyclic update enabled */
  972. if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled) {
  973. /* Clear segment_id back to 0 if not coded (last frame 0,0) */
  974. if ((xd->mode_info_context->mbmi.segment_id == 1) &&
  975. ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) ||
  976. (xd->mode_info_context->mbmi.mode != ZEROMV))) {
  977. xd->mode_info_context->mbmi.segment_id = 0;
  978. /* segment_id changed, so update */
  979. vp8cx_mb_init_quantizer(cpi, x, 1);
  980. }
  981. }
  982. }
  983. {
  984. /* Experimental code.
  985. * Special case for gf and arf zeromv modes, for 1 temporal layer.
  986. * Increase zbin size to supress noise.
  987. */
  988. x->zbin_mode_boost = 0;
  989. if (x->zbin_mode_boost_enabled) {
  990. if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
  991. if (xd->mode_info_context->mbmi.mode == ZEROMV) {
  992. if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME &&
  993. cpi->oxcf.number_of_layers == 1) {
  994. x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
  995. } else {
  996. x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
  997. }
  998. } else if (xd->mode_info_context->mbmi.mode == SPLITMV) {
  999. x->zbin_mode_boost = 0;
  1000. } else {
  1001. x->zbin_mode_boost = MV_ZBIN_BOOST;
  1002. }
  1003. }
  1004. }
  1005. /* The fast quantizer doesn't use zbin_extra, only do so with
  1006. * the regular quantizer. */
  1007. if (cpi->sf.improved_quant) vp8_update_zbin_extra(cpi, x);
  1008. }
  1009. x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame]++;
  1010. if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
  1011. vp8_encode_intra16x16mbuv(x);
  1012. if (xd->mode_info_context->mbmi.mode == B_PRED) {
  1013. vp8_encode_intra4x4mby(x);
  1014. } else {
  1015. vp8_encode_intra16x16mby(x);
  1016. }
  1017. sum_intra_stats(cpi, x);
  1018. } else {
  1019. int ref_fb_idx;
  1020. if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
  1021. ref_fb_idx = cpi->common.lst_fb_idx;
  1022. } else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) {
  1023. ref_fb_idx = cpi->common.gld_fb_idx;
  1024. } else {
  1025. ref_fb_idx = cpi->common.alt_fb_idx;
  1026. }
  1027. xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
  1028. xd->pre.u_buffer =
  1029. cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
  1030. xd->pre.v_buffer =
  1031. cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
  1032. if (!x->skip) {
  1033. vp8_encode_inter16x16(x);
  1034. } else {
  1035. vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, xd->dst.u_buffer,
  1036. xd->dst.v_buffer, xd->dst.y_stride,
  1037. xd->dst.uv_stride);
  1038. }
  1039. }
  1040. if (!x->skip) {
  1041. vp8_tokenize_mb(cpi, x, t);
  1042. if (xd->mode_info_context->mbmi.mode != B_PRED) {
  1043. vp8_inverse_transform_mby(xd);
  1044. }
  1045. vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv,
  1046. xd->dst.u_buffer, xd->dst.v_buffer,
  1047. xd->dst.uv_stride, xd->eobs + 16);
  1048. } else {
  1049. /* always set mb_skip_coeff as it is needed by the loopfilter */
  1050. xd->mode_info_context->mbmi.mb_skip_coeff = 1;
  1051. if (cpi->common.mb_no_coeff_skip) {
  1052. x->skip_true_count++;
  1053. vp8_fix_contexts(xd);
  1054. } else {
  1055. vp8_stuff_mb(cpi, x, t);
  1056. }
  1057. }
  1058. return rate;
  1059. }