pickinter.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <limits.h>
  11. #include "vpx_config.h"
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "onyx_int.h"
  14. #include "modecosts.h"
  15. #include "encodeintra.h"
  16. #include "vp8/common/common.h"
  17. #include "vp8/common/entropymode.h"
  18. #include "pickinter.h"
  19. #include "vp8/common/findnearmv.h"
  20. #include "encodemb.h"
  21. #include "vp8/common/reconinter.h"
  22. #include "vp8/common/reconintra.h"
  23. #include "vp8/common/reconintra4x4.h"
  24. #include "vpx_dsp/variance.h"
  25. #include "mcomp.h"
  26. #include "rdopt.h"
  27. #include "vpx_dsp/vpx_dsp_common.h"
  28. #include "vpx_mem/vpx_mem.h"
  29. #if CONFIG_TEMPORAL_DENOISING
  30. #include "denoising.h"
  31. #endif
  32. #ifdef SPEEDSTATS
  33. extern unsigned int cnt_pm;
  34. #endif
  35. #define MODEL_MODE 1
  36. extern const int vp8_ref_frame_order[MAX_MODES];
  37. extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES];
  38. // Fixed point implementation of a skin color classifier. Skin color
  39. // is model by a Gaussian distribution in the CbCr color space.
  40. // See ../../test/skin_color_detector_test.cc where the reference
  41. // skin color classifier is defined.
  42. // Fixed-point skin color model parameters.
  43. static const int skin_mean[5][2] = { { 7463, 9614 },
  44. { 6400, 10240 },
  45. { 7040, 10240 },
  46. { 8320, 9280 },
  47. { 6800, 9614 } };
  48. static const int skin_inv_cov[4] = { 4107, 1663, 1663, 2157 }; // q16
  49. static const int skin_threshold[6] = { 1570636, 1400000, 800000,
  50. 800000, 800000, 800000 }; // q18
  51. // Evaluates the Mahalanobis distance measure for the input CbCr values.
  52. static int evaluate_skin_color_difference(int cb, int cr, int idx) {
  53. const int cb_q6 = cb << 6;
  54. const int cr_q6 = cr << 6;
  55. const int cb_diff_q12 =
  56. (cb_q6 - skin_mean[idx][0]) * (cb_q6 - skin_mean[idx][0]);
  57. const int cbcr_diff_q12 =
  58. (cb_q6 - skin_mean[idx][0]) * (cr_q6 - skin_mean[idx][1]);
  59. const int cr_diff_q12 =
  60. (cr_q6 - skin_mean[idx][1]) * (cr_q6 - skin_mean[idx][1]);
  61. const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10;
  62. const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10;
  63. const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10;
  64. const int skin_diff =
  65. skin_inv_cov[0] * cb_diff_q2 + skin_inv_cov[1] * cbcr_diff_q2 +
  66. skin_inv_cov[2] * cbcr_diff_q2 + skin_inv_cov[3] * cr_diff_q2;
  67. return skin_diff;
  68. }
  69. // Checks if the input yCbCr values corresponds to skin color.
  70. static int is_skin_color(int y, int cb, int cr, int consec_zeromv) {
  71. if (y < 40 || y > 220) {
  72. return 0;
  73. } else {
  74. if (MODEL_MODE == 0) {
  75. return (evaluate_skin_color_difference(cb, cr, 0) < skin_threshold[0]);
  76. } else {
  77. int i = 0;
  78. // No skin if block has been zero motion for long consecutive time.
  79. if (consec_zeromv > 60) return 0;
  80. // Exit on grey.
  81. if (cb == 128 && cr == 128) return 0;
  82. // Exit on very strong cb.
  83. if (cb > 150 && cr < 110) return 0;
  84. for (; i < 5; ++i) {
  85. int skin_color_diff = evaluate_skin_color_difference(cb, cr, i);
  86. if (skin_color_diff < skin_threshold[i + 1]) {
  87. if (y < 60 && skin_color_diff > 3 * (skin_threshold[i + 1] >> 2)) {
  88. return 0;
  89. } else if (consec_zeromv > 25 &&
  90. skin_color_diff > (skin_threshold[i + 1] >> 1)) {
  91. return 0;
  92. } else {
  93. return 1;
  94. }
  95. }
  96. // Exit if difference is much large than the threshold.
  97. if (skin_color_diff > (skin_threshold[i + 1] << 3)) {
  98. return 0;
  99. }
  100. }
  101. return 0;
  102. }
  103. }
  104. }
  105. static int macroblock_corner_grad(unsigned char *signal, int stride,
  106. int offsetx, int offsety, int sgnx,
  107. int sgny) {
  108. int y1 = signal[offsetx * stride + offsety];
  109. int y2 = signal[offsetx * stride + offsety + sgny];
  110. int y3 = signal[(offsetx + sgnx) * stride + offsety];
  111. int y4 = signal[(offsetx + sgnx) * stride + offsety + sgny];
  112. return VPXMAX(VPXMAX(abs(y1 - y2), abs(y1 - y3)), abs(y1 - y4));
  113. }
  114. static int check_dot_artifact_candidate(VP8_COMP *cpi, MACROBLOCK *x,
  115. unsigned char *target_last, int stride,
  116. unsigned char *last_ref, int mb_row,
  117. int mb_col, int channel) {
  118. int threshold1 = 6;
  119. int threshold2 = 3;
  120. unsigned int max_num = (cpi->common.MBs) / 10;
  121. int grad_last = 0;
  122. int grad_source = 0;
  123. int index = mb_row * cpi->common.mb_cols + mb_col;
  124. // Threshold for #consecutive (base layer) frames using zero_last mode.
  125. int num_frames = 30;
  126. int shift = 15;
  127. if (channel > 0) {
  128. shift = 7;
  129. }
  130. if (cpi->oxcf.number_of_layers > 1) {
  131. num_frames = 20;
  132. }
  133. x->zero_last_dot_suppress = 0;
  134. // Blocks on base layer frames that have been using ZEROMV_LAST repeatedly
  135. // (i.e, at least |x| consecutive frames are candidates for increasing the
  136. // rd adjustment for zero_last mode.
  137. // Only allow this for at most |max_num| blocks per frame.
  138. // Don't allow this for screen content input.
  139. if (cpi->current_layer == 0 &&
  140. cpi->consec_zero_last_mvbias[index] > num_frames &&
  141. x->mbs_zero_last_dot_suppress < max_num &&
  142. !cpi->oxcf.screen_content_mode) {
  143. // If this block is checked here, label it so we don't check it again until
  144. // ~|x| framaes later.
  145. x->zero_last_dot_suppress = 1;
  146. // Dot artifact is noticeable as strong gradient at corners of macroblock,
  147. // for flat areas. As a simple detector for now, we look for a high
  148. // corner gradient on last ref, and a smaller gradient on source.
  149. // Check 4 corners, return if any satisfy condition.
  150. // Top-left:
  151. grad_last = macroblock_corner_grad(last_ref, stride, 0, 0, 1, 1);
  152. grad_source = macroblock_corner_grad(target_last, stride, 0, 0, 1, 1);
  153. if (grad_last >= threshold1 && grad_source <= threshold2) {
  154. x->mbs_zero_last_dot_suppress++;
  155. return 1;
  156. }
  157. // Top-right:
  158. grad_last = macroblock_corner_grad(last_ref, stride, 0, shift, 1, -1);
  159. grad_source = macroblock_corner_grad(target_last, stride, 0, shift, 1, -1);
  160. if (grad_last >= threshold1 && grad_source <= threshold2) {
  161. x->mbs_zero_last_dot_suppress++;
  162. return 1;
  163. }
  164. // Bottom-left:
  165. grad_last = macroblock_corner_grad(last_ref, stride, shift, 0, -1, 1);
  166. grad_source = macroblock_corner_grad(target_last, stride, shift, 0, -1, 1);
  167. if (grad_last >= threshold1 && grad_source <= threshold2) {
  168. x->mbs_zero_last_dot_suppress++;
  169. return 1;
  170. }
  171. // Bottom-right:
  172. grad_last = macroblock_corner_grad(last_ref, stride, shift, shift, -1, -1);
  173. grad_source =
  174. macroblock_corner_grad(target_last, stride, shift, shift, -1, -1);
  175. if (grad_last >= threshold1 && grad_source <= threshold2) {
  176. x->mbs_zero_last_dot_suppress++;
  177. return 1;
  178. }
  179. return 0;
  180. }
  181. return 0;
  182. }
  183. int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
  184. int_mv *bestmv, int_mv *ref_mv,
  185. int error_per_bit,
  186. const vp8_variance_fn_ptr_t *vfp,
  187. int *mvcost[2], int *distortion,
  188. unsigned int *sse) {
  189. (void)b;
  190. (void)d;
  191. (void)ref_mv;
  192. (void)error_per_bit;
  193. (void)vfp;
  194. (void)mb;
  195. (void)mvcost;
  196. (void)distortion;
  197. (void)sse;
  198. bestmv->as_mv.row <<= 3;
  199. bestmv->as_mv.col <<= 3;
  200. return 0;
  201. }
  202. int vp8_get_inter_mbpred_error(MACROBLOCK *mb, const vp8_variance_fn_ptr_t *vfp,
  203. unsigned int *sse, int_mv this_mv) {
  204. BLOCK *b = &mb->block[0];
  205. BLOCKD *d = &mb->e_mbd.block[0];
  206. unsigned char *what = (*(b->base_src) + b->src);
  207. int what_stride = b->src_stride;
  208. int pre_stride = mb->e_mbd.pre.y_stride;
  209. unsigned char *in_what = mb->e_mbd.pre.y_buffer + d->offset;
  210. int in_what_stride = pre_stride;
  211. int xoffset = this_mv.as_mv.col & 7;
  212. int yoffset = this_mv.as_mv.row & 7;
  213. in_what += (this_mv.as_mv.row >> 3) * pre_stride + (this_mv.as_mv.col >> 3);
  214. if (xoffset | yoffset) {
  215. return vfp->svf(in_what, in_what_stride, xoffset, yoffset, what,
  216. what_stride, sse);
  217. } else {
  218. return vfp->vf(what, what_stride, in_what, in_what_stride, sse);
  219. }
  220. }
  221. static int get_prediction_error(BLOCK *be, BLOCKD *b) {
  222. unsigned char *sptr;
  223. unsigned char *dptr;
  224. sptr = (*(be->base_src) + be->src);
  225. dptr = b->predictor;
  226. return vpx_get4x4sse_cs(sptr, be->src_stride, dptr, 16);
  227. }
  228. static int pick_intra4x4block(MACROBLOCK *x, int ib,
  229. B_PREDICTION_MODE *best_mode,
  230. const int *mode_costs,
  231. int *bestrate, int *bestdistortion) {
  232. BLOCKD *b = &x->e_mbd.block[ib];
  233. BLOCK *be = &x->block[ib];
  234. int dst_stride = x->e_mbd.dst.y_stride;
  235. unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset;
  236. B_PREDICTION_MODE mode;
  237. int best_rd = INT_MAX;
  238. int rate;
  239. int distortion;
  240. unsigned char *Above = dst - dst_stride;
  241. unsigned char *yleft = dst - 1;
  242. unsigned char top_left = Above[-1];
  243. for (mode = B_DC_PRED; mode <= B_HE_PRED; ++mode) {
  244. int this_rd;
  245. rate = mode_costs[mode];
  246. vp8_intra4x4_predict(Above, yleft, dst_stride, mode, b->predictor, 16,
  247. top_left);
  248. distortion = get_prediction_error(be, b);
  249. this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
  250. if (this_rd < best_rd) {
  251. *bestrate = rate;
  252. *bestdistortion = distortion;
  253. best_rd = this_rd;
  254. *best_mode = mode;
  255. }
  256. }
  257. b->bmi.as_mode = *best_mode;
  258. vp8_encode_intra4x4block(x, ib);
  259. return best_rd;
  260. }
  261. static int pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, int *best_dist) {
  262. MACROBLOCKD *const xd = &mb->e_mbd;
  263. int i;
  264. int cost = mb->mbmode_cost[xd->frame_type][B_PRED];
  265. int error;
  266. int distortion = 0;
  267. const int *bmode_costs;
  268. intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16);
  269. bmode_costs = mb->inter_bmode_costs;
  270. for (i = 0; i < 16; ++i) {
  271. MODE_INFO *const mic = xd->mode_info_context;
  272. const int mis = xd->mode_info_stride;
  273. B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
  274. int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(d);
  275. if (mb->e_mbd.frame_type == KEY_FRAME) {
  276. const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
  277. const B_PREDICTION_MODE L = left_block_mode(mic, i);
  278. bmode_costs = mb->bmode_costs[A][L];
  279. }
  280. pick_intra4x4block(mb, i, &best_mode, bmode_costs, &r, &d);
  281. cost += r;
  282. distortion += d;
  283. mic->bmi[i].as_mode = best_mode;
  284. /* Break out case where we have already exceeded best so far value
  285. * that was passed in
  286. */
  287. if (distortion > *best_dist) break;
  288. }
  289. *Rate = cost;
  290. if (i == 16) {
  291. *best_dist = distortion;
  292. error = RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
  293. } else {
  294. *best_dist = INT_MAX;
  295. error = INT_MAX;
  296. }
  297. return error;
  298. }
  299. static void pick_intra_mbuv_mode(MACROBLOCK *mb) {
  300. MACROBLOCKD *x = &mb->e_mbd;
  301. unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
  302. unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
  303. unsigned char *usrc_ptr = (mb->block[16].src + *mb->block[16].base_src);
  304. unsigned char *vsrc_ptr = (mb->block[20].src + *mb->block[20].base_src);
  305. int uvsrc_stride = mb->block[16].src_stride;
  306. unsigned char uleft_col[8];
  307. unsigned char vleft_col[8];
  308. unsigned char utop_left = uabove_row[-1];
  309. unsigned char vtop_left = vabove_row[-1];
  310. int i, j;
  311. int expected_udc;
  312. int expected_vdc;
  313. int shift;
  314. int Uaverage = 0;
  315. int Vaverage = 0;
  316. int diff;
  317. int pred_error[4] = { 0, 0, 0, 0 }, best_error = INT_MAX;
  318. MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
  319. for (i = 0; i < 8; ++i) {
  320. uleft_col[i] = x->dst.u_buffer[i * x->dst.uv_stride - 1];
  321. vleft_col[i] = x->dst.v_buffer[i * x->dst.uv_stride - 1];
  322. }
  323. if (!x->up_available && !x->left_available) {
  324. expected_udc = 128;
  325. expected_vdc = 128;
  326. } else {
  327. shift = 2;
  328. if (x->up_available) {
  329. for (i = 0; i < 8; ++i) {
  330. Uaverage += uabove_row[i];
  331. Vaverage += vabove_row[i];
  332. }
  333. shift++;
  334. }
  335. if (x->left_available) {
  336. for (i = 0; i < 8; ++i) {
  337. Uaverage += uleft_col[i];
  338. Vaverage += vleft_col[i];
  339. }
  340. shift++;
  341. }
  342. expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
  343. expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
  344. }
  345. for (i = 0; i < 8; ++i) {
  346. for (j = 0; j < 8; ++j) {
  347. int predu = uleft_col[i] + uabove_row[j] - utop_left;
  348. int predv = vleft_col[i] + vabove_row[j] - vtop_left;
  349. int u_p, v_p;
  350. u_p = usrc_ptr[j];
  351. v_p = vsrc_ptr[j];
  352. if (predu < 0) predu = 0;
  353. if (predu > 255) predu = 255;
  354. if (predv < 0) predv = 0;
  355. if (predv > 255) predv = 255;
  356. diff = u_p - expected_udc;
  357. pred_error[DC_PRED] += diff * diff;
  358. diff = v_p - expected_vdc;
  359. pred_error[DC_PRED] += diff * diff;
  360. diff = u_p - uabove_row[j];
  361. pred_error[V_PRED] += diff * diff;
  362. diff = v_p - vabove_row[j];
  363. pred_error[V_PRED] += diff * diff;
  364. diff = u_p - uleft_col[i];
  365. pred_error[H_PRED] += diff * diff;
  366. diff = v_p - vleft_col[i];
  367. pred_error[H_PRED] += diff * diff;
  368. diff = u_p - predu;
  369. pred_error[TM_PRED] += diff * diff;
  370. diff = v_p - predv;
  371. pred_error[TM_PRED] += diff * diff;
  372. }
  373. usrc_ptr += uvsrc_stride;
  374. vsrc_ptr += uvsrc_stride;
  375. if (i == 3) {
  376. usrc_ptr = (mb->block[18].src + *mb->block[18].base_src);
  377. vsrc_ptr = (mb->block[22].src + *mb->block[22].base_src);
  378. }
  379. }
  380. for (i = DC_PRED; i <= TM_PRED; ++i) {
  381. if (best_error > pred_error[i]) {
  382. best_error = pred_error[i];
  383. best_mode = (MB_PREDICTION_MODE)i;
  384. }
  385. }
  386. mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode;
  387. }
  388. static void update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) {
  389. MACROBLOCKD *xd = &x->e_mbd;
  390. /* Split MV modes currently not supported when RD is nopt enabled,
  391. * therefore, only need to modify MVcount in NEWMV mode. */
  392. if (xd->mode_info_context->mbmi.mode == NEWMV) {
  393. x->MVcount[0][mv_max + ((xd->mode_info_context->mbmi.mv.as_mv.row -
  394. best_ref_mv->as_mv.row) >>
  395. 1)]++;
  396. x->MVcount[1][mv_max + ((xd->mode_info_context->mbmi.mv.as_mv.col -
  397. best_ref_mv->as_mv.col) >>
  398. 1)]++;
  399. }
  400. }
  401. #if CONFIG_MULTI_RES_ENCODING
  402. static void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd,
  403. int *dissim, int *parent_ref_frame,
  404. MB_PREDICTION_MODE *parent_mode,
  405. int_mv *parent_ref_mv, int mb_row,
  406. int mb_col) {
  407. LOWER_RES_MB_INFO *store_mode_info =
  408. ((LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info)->mb_info;
  409. unsigned int parent_mb_index;
  410. /* Consider different down_sampling_factor. */
  411. {
  412. /* TODO: Removed the loop that supports special down_sampling_factor
  413. * such as 2, 4, 8. Will revisit it if needed.
  414. * Should also try using a look-up table to see if it helps
  415. * performance. */
  416. int parent_mb_row, parent_mb_col;
  417. parent_mb_row = mb_row * cpi->oxcf.mr_down_sampling_factor.den /
  418. cpi->oxcf.mr_down_sampling_factor.num;
  419. parent_mb_col = mb_col * cpi->oxcf.mr_down_sampling_factor.den /
  420. cpi->oxcf.mr_down_sampling_factor.num;
  421. parent_mb_index = parent_mb_row * cpi->mr_low_res_mb_cols + parent_mb_col;
  422. }
  423. /* Read lower-resolution mode & motion result from memory.*/
  424. *parent_ref_frame = store_mode_info[parent_mb_index].ref_frame;
  425. *parent_mode = store_mode_info[parent_mb_index].mode;
  426. *dissim = store_mode_info[parent_mb_index].dissim;
  427. /* For highest-resolution encoder, adjust dissim value. Lower its quality
  428. * for good performance. */
  429. if (cpi->oxcf.mr_encoder_id == (cpi->oxcf.mr_total_resolutions - 1))
  430. *dissim >>= 1;
  431. if (*parent_ref_frame != INTRA_FRAME) {
  432. /* Consider different down_sampling_factor.
  433. * The result can be rounded to be more precise, but it takes more time.
  434. */
  435. (*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row *
  436. cpi->oxcf.mr_down_sampling_factor.num /
  437. cpi->oxcf.mr_down_sampling_factor.den;
  438. (*parent_ref_mv).as_mv.col = store_mode_info[parent_mb_index].mv.as_mv.col *
  439. cpi->oxcf.mr_down_sampling_factor.num /
  440. cpi->oxcf.mr_down_sampling_factor.den;
  441. vp8_clamp_mv2(parent_ref_mv, xd);
  442. }
  443. }
  444. #endif
  445. static void check_for_encode_breakout(unsigned int sse, MACROBLOCK *x) {
  446. MACROBLOCKD *xd = &x->e_mbd;
  447. unsigned int threshold =
  448. (xd->block[0].dequant[1] * xd->block[0].dequant[1] >> 4);
  449. if (threshold < x->encode_breakout) threshold = x->encode_breakout;
  450. if (sse < threshold) {
  451. /* Check u and v to make sure skip is ok */
  452. unsigned int sse2 = 0;
  453. sse2 = VP8_UVSSE(x);
  454. if (sse2 * 2 < x->encode_breakout) {
  455. x->skip = 1;
  456. } else {
  457. x->skip = 0;
  458. }
  459. }
  460. }
  461. static int evaluate_inter_mode(unsigned int *sse, int rate2, int *distortion2,
  462. VP8_COMP *cpi, MACROBLOCK *x, int rd_adj) {
  463. MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
  464. int_mv mv = x->e_mbd.mode_info_context->mbmi.mv;
  465. int this_rd;
  466. int denoise_aggressive = 0;
  467. /* Exit early and don't compute the distortion if this macroblock
  468. * is marked inactive. */
  469. if (cpi->active_map_enabled && x->active_ptr[0] == 0) {
  470. *sse = 0;
  471. *distortion2 = 0;
  472. x->skip = 1;
  473. return INT_MAX;
  474. }
  475. if ((this_mode != NEWMV) || !(cpi->sf.half_pixel_search) ||
  476. cpi->common.full_pixel == 1) {
  477. *distortion2 =
  478. vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], sse, mv);
  479. }
  480. this_rd = RDCOST(x->rdmult, x->rddiv, rate2, *distortion2);
  481. #if CONFIG_TEMPORAL_DENOISING
  482. if (cpi->oxcf.noise_sensitivity > 0) {
  483. denoise_aggressive =
  484. (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) ? 1 : 0;
  485. }
  486. #endif
  487. // Adjust rd for ZEROMV and LAST, if LAST is the closest reference frame.
  488. // TODO: We should also add condition on distance of closest to current.
  489. if (!cpi->oxcf.screen_content_mode && this_mode == ZEROMV &&
  490. x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME &&
  491. (denoise_aggressive || (cpi->closest_reference_frame == LAST_FRAME))) {
  492. // No adjustment if block is considered to be skin area.
  493. if (x->is_skin) rd_adj = 100;
  494. this_rd = (int)(((int64_t)this_rd) * rd_adj / 100);
  495. }
  496. check_for_encode_breakout(*sse, x);
  497. return this_rd;
  498. }
  499. static void calculate_zeromv_rd_adjustment(VP8_COMP *cpi, MACROBLOCK *x,
  500. int *rd_adjustment) {
  501. MODE_INFO *mic = x->e_mbd.mode_info_context;
  502. int_mv mv_l, mv_a, mv_al;
  503. int local_motion_check = 0;
  504. if (cpi->lf_zeromv_pct > 40) {
  505. /* left mb */
  506. mic -= 1;
  507. mv_l = mic->mbmi.mv;
  508. if (mic->mbmi.ref_frame != INTRA_FRAME) {
  509. if (abs(mv_l.as_mv.row) < 8 && abs(mv_l.as_mv.col) < 8) {
  510. local_motion_check++;
  511. }
  512. }
  513. /* above-left mb */
  514. mic -= x->e_mbd.mode_info_stride;
  515. mv_al = mic->mbmi.mv;
  516. if (mic->mbmi.ref_frame != INTRA_FRAME) {
  517. if (abs(mv_al.as_mv.row) < 8 && abs(mv_al.as_mv.col) < 8) {
  518. local_motion_check++;
  519. }
  520. }
  521. /* above mb */
  522. mic += 1;
  523. mv_a = mic->mbmi.mv;
  524. if (mic->mbmi.ref_frame != INTRA_FRAME) {
  525. if (abs(mv_a.as_mv.row) < 8 && abs(mv_a.as_mv.col) < 8) {
  526. local_motion_check++;
  527. }
  528. }
  529. if (((!x->e_mbd.mb_to_top_edge || !x->e_mbd.mb_to_left_edge) &&
  530. local_motion_check > 0) ||
  531. local_motion_check > 2) {
  532. *rd_adjustment = 80;
  533. } else if (local_motion_check > 0) {
  534. *rd_adjustment = 90;
  535. }
  536. }
  537. }
  538. void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
  539. int recon_uvoffset, int *returnrate,
  540. int *returndistortion, int *returnintra, int mb_row,
  541. int mb_col) {
  542. BLOCK *b = &x->block[0];
  543. BLOCKD *d = &x->e_mbd.block[0];
  544. MACROBLOCKD *xd = &x->e_mbd;
  545. MB_MODE_INFO best_mbmode;
  546. int_mv best_ref_mv_sb[2];
  547. int_mv mode_mv_sb[2][MB_MODE_COUNT];
  548. int_mv best_ref_mv;
  549. int_mv *mode_mv;
  550. MB_PREDICTION_MODE this_mode;
  551. int num00;
  552. int mdcounts[4];
  553. int best_rd = INT_MAX;
  554. int rd_adjustment = 100;
  555. int best_intra_rd = INT_MAX;
  556. int mode_index;
  557. int rate;
  558. int rate2;
  559. int distortion2;
  560. int bestsme = INT_MAX;
  561. int best_mode_index = 0;
  562. unsigned int sse = UINT_MAX, best_rd_sse = UINT_MAX;
  563. #if CONFIG_TEMPORAL_DENOISING
  564. unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX;
  565. #endif
  566. int sf_improved_mv_pred = cpi->sf.improved_mv_pred;
  567. #if CONFIG_MULTI_RES_ENCODING
  568. int dissim = INT_MAX;
  569. int parent_ref_frame = 0;
  570. int_mv parent_ref_mv;
  571. MB_PREDICTION_MODE parent_mode = 0;
  572. int parent_ref_valid = 0;
  573. #endif
  574. int_mv mvp;
  575. int near_sadidx[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
  576. int saddone = 0;
  577. /* search range got from mv_pred(). It uses step_param levels. (0-7) */
  578. int sr = 0;
  579. unsigned char *plane[4][3];
  580. int ref_frame_map[4];
  581. int sign_bias = 0;
  582. int dot_artifact_candidate = 0;
  583. get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
  584. // If the current frame is using LAST as a reference, check for
  585. // biasing the mode selection for dot artifacts.
  586. if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
  587. unsigned char *target_y = x->src.y_buffer;
  588. unsigned char *target_u = x->block[16].src + *x->block[16].base_src;
  589. unsigned char *target_v = x->block[20].src + *x->block[20].base_src;
  590. int stride = x->src.y_stride;
  591. int stride_uv = x->block[16].src_stride;
  592. #if CONFIG_TEMPORAL_DENOISING
  593. if (cpi->oxcf.noise_sensitivity) {
  594. const int uv_denoise = (cpi->oxcf.noise_sensitivity >= 2) ? 1 : 0;
  595. target_y =
  596. cpi->denoiser.yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset;
  597. stride = cpi->denoiser.yv12_running_avg[LAST_FRAME].y_stride;
  598. if (uv_denoise) {
  599. target_u = cpi->denoiser.yv12_running_avg[LAST_FRAME].u_buffer +
  600. recon_uvoffset;
  601. target_v = cpi->denoiser.yv12_running_avg[LAST_FRAME].v_buffer +
  602. recon_uvoffset;
  603. stride_uv = cpi->denoiser.yv12_running_avg[LAST_FRAME].uv_stride;
  604. }
  605. }
  606. #endif
  607. dot_artifact_candidate = check_dot_artifact_candidate(
  608. cpi, x, target_y, stride, plane[LAST_FRAME][0], mb_row, mb_col, 0);
  609. // If not found in Y channel, check UV channel.
  610. if (!dot_artifact_candidate) {
  611. dot_artifact_candidate = check_dot_artifact_candidate(
  612. cpi, x, target_u, stride_uv, plane[LAST_FRAME][1], mb_row, mb_col, 1);
  613. if (!dot_artifact_candidate) {
  614. dot_artifact_candidate = check_dot_artifact_candidate(
  615. cpi, x, target_v, stride_uv, plane[LAST_FRAME][2], mb_row, mb_col,
  616. 2);
  617. }
  618. }
  619. }
  620. #if CONFIG_MULTI_RES_ENCODING
  621. // |parent_ref_valid| will be set here if potentially we can do mv resue for
  622. // this higher resol (|cpi->oxcf.mr_encoder_id| > 0) frame.
  623. // |parent_ref_valid| may be reset depending on |parent_ref_frame| for
  624. // the current macroblock below.
  625. parent_ref_valid = cpi->oxcf.mr_encoder_id && cpi->mr_low_res_mv_avail;
  626. if (parent_ref_valid) {
  627. int parent_ref_flag;
  628. get_lower_res_motion_info(cpi, xd, &dissim, &parent_ref_frame, &parent_mode,
  629. &parent_ref_mv, mb_row, mb_col);
  630. /* TODO(jkoleszar): The references available (ref_frame_flags) to the
  631. * lower res encoder should match those available to this encoder, but
  632. * there seems to be a situation where this mismatch can happen in the
  633. * case of frame dropping and temporal layers. For example,
  634. * GOLD being disallowed in ref_frame_flags, but being returned as
  635. * parent_ref_frame.
  636. *
  637. * In this event, take the conservative approach of disabling the
  638. * lower res info for this MB.
  639. */
  640. parent_ref_flag = 0;
  641. // Note availability for mv reuse is only based on last and golden.
  642. if (parent_ref_frame == LAST_FRAME)
  643. parent_ref_flag = (cpi->ref_frame_flags & VP8_LAST_FRAME);
  644. else if (parent_ref_frame == GOLDEN_FRAME)
  645. parent_ref_flag = (cpi->ref_frame_flags & VP8_GOLD_FRAME);
  646. // assert(!parent_ref_frame || parent_ref_flag);
  647. // If |parent_ref_frame| did not match either last or golden then
  648. // shut off mv reuse.
  649. if (parent_ref_frame && !parent_ref_flag) parent_ref_valid = 0;
  650. // Don't do mv reuse since we want to allow for another mode besides
  651. // ZEROMV_LAST to remove dot artifact.
  652. if (dot_artifact_candidate) parent_ref_valid = 0;
  653. }
  654. #endif
  655. // Check if current macroblock is in skin area.
  656. {
  657. const int y = (x->src.y_buffer[7 * x->src.y_stride + 7] +
  658. x->src.y_buffer[7 * x->src.y_stride + 8] +
  659. x->src.y_buffer[8 * x->src.y_stride + 7] +
  660. x->src.y_buffer[8 * x->src.y_stride + 8]) >>
  661. 2;
  662. const int cb = (x->src.u_buffer[3 * x->src.uv_stride + 3] +
  663. x->src.u_buffer[3 * x->src.uv_stride + 4] +
  664. x->src.u_buffer[4 * x->src.uv_stride + 3] +
  665. x->src.u_buffer[4 * x->src.uv_stride + 4]) >>
  666. 2;
  667. const int cr = (x->src.v_buffer[3 * x->src.uv_stride + 3] +
  668. x->src.v_buffer[3 * x->src.uv_stride + 4] +
  669. x->src.v_buffer[4 * x->src.uv_stride + 3] +
  670. x->src.v_buffer[4 * x->src.uv_stride + 4]) >>
  671. 2;
  672. x->is_skin = 0;
  673. if (!cpi->oxcf.screen_content_mode) {
  674. int block_index = mb_row * cpi->common.mb_cols + mb_col;
  675. x->is_skin = is_skin_color(y, cb, cr, cpi->consec_zero_last[block_index]);
  676. }
  677. }
  678. #if CONFIG_TEMPORAL_DENOISING
  679. if (cpi->oxcf.noise_sensitivity) {
  680. // Under aggressive denoising mode, should we use skin map to reduce
  681. // denoiser
  682. // and ZEROMV bias? Will need to revisit the accuracy of this detection for
  683. // very noisy input. For now keep this as is (i.e., don't turn it off).
  684. // if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive)
  685. // x->is_skin = 0;
  686. }
  687. #endif
  688. mode_mv = mode_mv_sb[sign_bias];
  689. best_ref_mv.as_int = 0;
  690. memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
  691. memset(&best_mbmode, 0, sizeof(best_mbmode));
  692. /* Setup search priorities */
  693. #if CONFIG_MULTI_RES_ENCODING
  694. if (parent_ref_valid && parent_ref_frame && dissim < 8) {
  695. ref_frame_map[0] = -1;
  696. ref_frame_map[1] = parent_ref_frame;
  697. ref_frame_map[2] = -1;
  698. ref_frame_map[3] = -1;
  699. } else
  700. #endif
  701. get_reference_search_order(cpi, ref_frame_map);
  702. /* Check to see if there is at least 1 valid reference frame that we need
  703. * to calculate near_mvs.
  704. */
  705. if (ref_frame_map[1] > 0) {
  706. sign_bias = vp8_find_near_mvs_bias(
  707. &x->e_mbd, x->e_mbd.mode_info_context, mode_mv_sb, best_ref_mv_sb,
  708. mdcounts, ref_frame_map[1], cpi->common.ref_frame_sign_bias);
  709. mode_mv = mode_mv_sb[sign_bias];
  710. best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
  711. }
  712. /* Count of the number of MBs tested so far this frame */
  713. x->mbs_tested_so_far++;
  714. *returnintra = INT_MAX;
  715. x->skip = 0;
  716. x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
  717. /* If the frame has big static background and current MB is in low
  718. * motion area, its mode decision is biased to ZEROMV mode.
  719. * No adjustment if cpu_used is <= -12 (i.e., cpi->Speed >= 12).
  720. * At such speed settings, ZEROMV is already heavily favored.
  721. */
  722. if (cpi->Speed < 12) {
  723. calculate_zeromv_rd_adjustment(cpi, x, &rd_adjustment);
  724. }
  725. #if CONFIG_TEMPORAL_DENOISING
  726. if (cpi->oxcf.noise_sensitivity) {
  727. rd_adjustment = (int)(rd_adjustment *
  728. cpi->denoiser.denoise_pars.pickmode_mv_bias / 100);
  729. }
  730. #endif
  731. if (dot_artifact_candidate) {
  732. // Bias against ZEROMV_LAST mode.
  733. rd_adjustment = 150;
  734. }
  735. /* if we encode a new mv this is important
  736. * find the best new motion vector
  737. */
  738. for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
  739. int frame_cost;
  740. int this_rd = INT_MAX;
  741. int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
  742. if (best_rd <= x->rd_threshes[mode_index]) continue;
  743. if (this_ref_frame < 0) continue;
  744. x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
  745. /* everything but intra */
  746. if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
  747. x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
  748. x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
  749. x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
  750. if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame]) {
  751. sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame];
  752. mode_mv = mode_mv_sb[sign_bias];
  753. best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
  754. }
  755. #if CONFIG_MULTI_RES_ENCODING
  756. if (parent_ref_valid) {
  757. if (vp8_mode_order[mode_index] == NEARESTMV &&
  758. mode_mv[NEARESTMV].as_int == 0)
  759. continue;
  760. if (vp8_mode_order[mode_index] == NEARMV && mode_mv[NEARMV].as_int == 0)
  761. continue;
  762. if (vp8_mode_order[mode_index] == NEWMV && parent_mode == ZEROMV &&
  763. best_ref_mv.as_int == 0)
  764. continue;
  765. else if (vp8_mode_order[mode_index] == NEWMV && dissim == 0 &&
  766. best_ref_mv.as_int == parent_ref_mv.as_int)
  767. continue;
  768. }
  769. #endif
  770. }
  771. /* Check to see if the testing frequency for this mode is at its max
  772. * If so then prevent it from being tested and increase the threshold
  773. * for its testing */
  774. if (x->mode_test_hit_counts[mode_index] &&
  775. (cpi->mode_check_freq[mode_index] > 1)) {
  776. if (x->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] *
  777. x->mode_test_hit_counts[mode_index])) {
  778. /* Increase the threshold for coding this mode to make it less
  779. * likely to be chosen */
  780. x->rd_thresh_mult[mode_index] += 4;
  781. if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
  782. x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
  783. }
  784. x->rd_threshes[mode_index] =
  785. (cpi->rd_baseline_thresh[mode_index] >> 7) *
  786. x->rd_thresh_mult[mode_index];
  787. continue;
  788. }
  789. }
  790. /* We have now reached the point where we are going to test the current
  791. * mode so increment the counter for the number of times it has been
  792. * tested */
  793. x->mode_test_hit_counts[mode_index]++;
  794. rate2 = 0;
  795. distortion2 = 0;
  796. this_mode = vp8_mode_order[mode_index];
  797. x->e_mbd.mode_info_context->mbmi.mode = this_mode;
  798. x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
  799. /* Work out the cost assosciated with selecting the reference frame */
  800. frame_cost = x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
  801. rate2 += frame_cost;
  802. /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
  803. * unless ARNR filtering is enabled in which case we want
  804. * an unfiltered alternative */
  805. if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
  806. if (this_mode != ZEROMV ||
  807. x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) {
  808. continue;
  809. }
  810. }
  811. switch (this_mode) {
  812. case B_PRED:
  813. /* Pass best so far to pick_intra4x4mby_modes to use as breakout */
  814. distortion2 = best_rd_sse;
  815. pick_intra4x4mby_modes(x, &rate, &distortion2);
  816. if (distortion2 == INT_MAX) {
  817. this_rd = INT_MAX;
  818. } else {
  819. rate2 += rate;
  820. distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride,
  821. x->e_mbd.predictor, 16, &sse);
  822. this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
  823. if (this_rd < best_intra_rd) {
  824. best_intra_rd = this_rd;
  825. *returnintra = distortion2;
  826. }
  827. }
  828. break;
  829. case SPLITMV:
  830. /* Split MV modes currently not supported when RD is not enabled. */
  831. break;
  832. case DC_PRED:
  833. case V_PRED:
  834. case H_PRED:
  835. case TM_PRED:
  836. vp8_build_intra_predictors_mby_s(
  837. xd, xd->dst.y_buffer - xd->dst.y_stride, xd->dst.y_buffer - 1,
  838. xd->dst.y_stride, xd->predictor, 16);
  839. distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride,
  840. x->e_mbd.predictor, 16, &sse);
  841. rate2 += x->mbmode_cost[x->e_mbd.frame_type]
  842. [x->e_mbd.mode_info_context->mbmi.mode];
  843. this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
  844. if (this_rd < best_intra_rd) {
  845. best_intra_rd = this_rd;
  846. *returnintra = distortion2;
  847. }
  848. break;
  849. case NEWMV: {
  850. int thissme;
  851. int step_param;
  852. int further_steps;
  853. int n = 0;
  854. int sadpb = x->sadperbit16;
  855. int_mv mvp_full;
  856. int col_min = ((best_ref_mv.as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL;
  857. int row_min = ((best_ref_mv.as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL;
  858. int col_max = (best_ref_mv.as_mv.col >> 3) + MAX_FULL_PEL_VAL;
  859. int row_max = (best_ref_mv.as_mv.row >> 3) + MAX_FULL_PEL_VAL;
  860. int tmp_col_min = x->mv_col_min;
  861. int tmp_col_max = x->mv_col_max;
  862. int tmp_row_min = x->mv_row_min;
  863. int tmp_row_max = x->mv_row_max;
  864. int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8) ? 3 : 2) : 1;
  865. /* Further step/diamond searches as necessary */
  866. step_param = cpi->sf.first_step + speed_adjust;
  867. #if CONFIG_MULTI_RES_ENCODING
  868. /* If lower-res frame is not available for mv reuse (because of
  869. frame dropping or different temporal layer pattern), then higher
  870. resol encoder does motion search without any previous knowledge.
  871. Also, since last frame motion info is not stored, then we can not
  872. use improved_mv_pred. */
  873. if (cpi->oxcf.mr_encoder_id) sf_improved_mv_pred = 0;
  874. // Only use parent MV as predictor if this candidate reference frame
  875. // (|this_ref_frame|) is equal to |parent_ref_frame|.
  876. if (parent_ref_valid && (parent_ref_frame == this_ref_frame)) {
  877. /* Use parent MV as predictor. Adjust search range
  878. * accordingly.
  879. */
  880. mvp.as_int = parent_ref_mv.as_int;
  881. mvp_full.as_mv.col = parent_ref_mv.as_mv.col >> 3;
  882. mvp_full.as_mv.row = parent_ref_mv.as_mv.row >> 3;
  883. if (dissim <= 32)
  884. step_param += 3;
  885. else if (dissim <= 128)
  886. step_param += 2;
  887. else
  888. step_param += 1;
  889. } else
  890. #endif
  891. {
  892. if (sf_improved_mv_pred) {
  893. if (!saddone) {
  894. vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]);
  895. saddone = 1;
  896. }
  897. vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
  898. x->e_mbd.mode_info_context->mbmi.ref_frame,
  899. cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
  900. sr += speed_adjust;
  901. /* adjust search range according to sr from mv prediction */
  902. if (sr > step_param) step_param = sr;
  903. mvp_full.as_mv.col = mvp.as_mv.col >> 3;
  904. mvp_full.as_mv.row = mvp.as_mv.row >> 3;
  905. } else {
  906. mvp.as_int = best_ref_mv.as_int;
  907. mvp_full.as_mv.col = best_ref_mv.as_mv.col >> 3;
  908. mvp_full.as_mv.row = best_ref_mv.as_mv.row >> 3;
  909. }
  910. }
  911. #if CONFIG_MULTI_RES_ENCODING
  912. if (parent_ref_valid && (parent_ref_frame == this_ref_frame) &&
  913. dissim <= 2 &&
  914. VPXMAX(abs(best_ref_mv.as_mv.row - parent_ref_mv.as_mv.row),
  915. abs(best_ref_mv.as_mv.col - parent_ref_mv.as_mv.col)) <= 4) {
  916. d->bmi.mv.as_int = mvp_full.as_int;
  917. mode_mv[NEWMV].as_int = mvp_full.as_int;
  918. cpi->find_fractional_mv_step(
  919. x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
  920. &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
  921. } else
  922. #endif
  923. {
  924. /* Get intersection of UMV window and valid MV window to
  925. * reduce # of checks in diamond search. */
  926. if (x->mv_col_min < col_min) x->mv_col_min = col_min;
  927. if (x->mv_col_max > col_max) x->mv_col_max = col_max;
  928. if (x->mv_row_min < row_min) x->mv_row_min = row_min;
  929. if (x->mv_row_max > row_max) x->mv_row_max = row_max;
  930. further_steps =
  931. (cpi->Speed >= 8)
  932. ? 0
  933. : (cpi->sf.max_step_search_steps - 1 - step_param);
  934. if (cpi->sf.search_method == HEX) {
  935. #if CONFIG_MULTI_RES_ENCODING
  936. /* TODO: In higher-res pick_inter_mode, step_param is used to
  937. * modify hex search range. Here, set step_param to 0 not to
  938. * change the behavior in lowest-resolution encoder.
  939. * Will improve it later.
  940. */
  941. /* Set step_param to 0 to ensure large-range motion search
  942. * when mv reuse if not valid (i.e. |parent_ref_valid| = 0),
  943. * or if this candidate reference frame (|this_ref_frame|) is
  944. * not equal to |parent_ref_frame|.
  945. */
  946. if (!parent_ref_valid || (parent_ref_frame != this_ref_frame))
  947. step_param = 0;
  948. #endif
  949. bestsme = vp8_hex_search(x, b, d, &mvp_full, &d->bmi.mv, step_param,
  950. sadpb, &cpi->fn_ptr[BLOCK_16X16],
  951. x->mvsadcost, x->mvcost, &best_ref_mv);
  952. mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
  953. } else {
  954. bestsme = cpi->diamond_search_sad(
  955. x, b, d, &mvp_full, &d->bmi.mv, step_param, sadpb, &num00,
  956. &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
  957. mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
  958. /* Further step/diamond searches as necessary */
  959. n = num00;
  960. num00 = 0;
  961. while (n < further_steps) {
  962. n++;
  963. if (num00) {
  964. num00--;
  965. } else {
  966. thissme = cpi->diamond_search_sad(
  967. x, b, d, &mvp_full, &d->bmi.mv, step_param + n, sadpb,
  968. &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
  969. if (thissme < bestsme) {
  970. bestsme = thissme;
  971. mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
  972. } else {
  973. d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
  974. }
  975. }
  976. }
  977. }
  978. x->mv_col_min = tmp_col_min;
  979. x->mv_col_max = tmp_col_max;
  980. x->mv_row_min = tmp_row_min;
  981. x->mv_row_max = tmp_row_max;
  982. if (bestsme < INT_MAX) {
  983. cpi->find_fractional_mv_step(
  984. x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
  985. &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
  986. }
  987. }
  988. mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
  989. // The clamp below is not necessary from the perspective
  990. // of VP8 bitstream, but is added to improve ChromeCast
  991. // mirroring's robustness. Please do not remove.
  992. vp8_clamp_mv2(&mode_mv[this_mode], xd);
  993. /* mv cost; */
  994. rate2 +=
  995. vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, cpi->mb.mvcost, 128);
  996. }
  997. case NEARESTMV:
  998. case NEARMV:
  999. if (mode_mv[this_mode].as_int == 0) continue;
  1000. case ZEROMV:
  1001. /* Trap vectors that reach beyond the UMV borders
  1002. * Note that ALL New MV, Nearest MV Near MV and Zero MV code drops
  1003. * through to this point because of the lack of break statements
  1004. * in the previous two cases.
  1005. */
  1006. if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
  1007. ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
  1008. ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
  1009. ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
  1010. continue;
  1011. }
  1012. rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
  1013. x->e_mbd.mode_info_context->mbmi.mv.as_int = mode_mv[this_mode].as_int;
  1014. this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x,
  1015. rd_adjustment);
  1016. break;
  1017. default: break;
  1018. }
  1019. #if CONFIG_TEMPORAL_DENOISING
  1020. if (cpi->oxcf.noise_sensitivity) {
  1021. /* Store for later use by denoiser. */
  1022. // Dont' denoise with GOLDEN OR ALTREF is they are old reference
  1023. // frames (greater than MAX_GF_ARF_DENOISE_RANGE frames in past).
  1024. int skip_old_reference = ((this_ref_frame != LAST_FRAME) &&
  1025. (cpi->common.current_video_frame -
  1026. cpi->current_ref_frames[this_ref_frame] >
  1027. MAX_GF_ARF_DENOISE_RANGE))
  1028. ? 1
  1029. : 0;
  1030. if (this_mode == ZEROMV && sse < zero_mv_sse && !skip_old_reference) {
  1031. zero_mv_sse = sse;
  1032. x->best_zeromv_reference_frame =
  1033. x->e_mbd.mode_info_context->mbmi.ref_frame;
  1034. }
  1035. // Store the best NEWMV in x for later use in the denoiser.
  1036. if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && sse < best_sse &&
  1037. !skip_old_reference) {
  1038. best_sse = sse;
  1039. x->best_sse_inter_mode = NEWMV;
  1040. x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv;
  1041. x->need_to_clamp_best_mvs =
  1042. x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs;
  1043. x->best_reference_frame = x->e_mbd.mode_info_context->mbmi.ref_frame;
  1044. }
  1045. }
  1046. #endif
  1047. if (this_rd < best_rd || x->skip) {
  1048. /* Note index of best mode */
  1049. best_mode_index = mode_index;
  1050. *returnrate = rate2;
  1051. *returndistortion = distortion2;
  1052. best_rd_sse = sse;
  1053. best_rd = this_rd;
  1054. memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
  1055. sizeof(MB_MODE_INFO));
  1056. /* Testing this mode gave rise to an improvement in best error
  1057. * score. Lower threshold a bit for next time
  1058. */
  1059. x->rd_thresh_mult[mode_index] =
  1060. (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2))
  1061. ? x->rd_thresh_mult[mode_index] - 2
  1062. : MIN_THRESHMULT;
  1063. x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
  1064. x->rd_thresh_mult[mode_index];
  1065. }
  1066. /* If the mode did not help improve the best error case then raise the
  1067. * threshold for testing that mode next time around.
  1068. */
  1069. else {
  1070. x->rd_thresh_mult[mode_index] += 4;
  1071. if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
  1072. x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
  1073. }
  1074. x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
  1075. x->rd_thresh_mult[mode_index];
  1076. }
  1077. if (x->skip) break;
  1078. }
  1079. /* Reduce the activation RD thresholds for the best choice mode */
  1080. if ((cpi->rd_baseline_thresh[best_mode_index] > 0) &&
  1081. (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) {
  1082. int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 3);
  1083. x->rd_thresh_mult[best_mode_index] =
  1084. (x->rd_thresh_mult[best_mode_index] >=
  1085. (MIN_THRESHMULT + best_adjustment))
  1086. ? x->rd_thresh_mult[best_mode_index] - best_adjustment
  1087. : MIN_THRESHMULT;
  1088. x->rd_threshes[best_mode_index] =
  1089. (cpi->rd_baseline_thresh[best_mode_index] >> 7) *
  1090. x->rd_thresh_mult[best_mode_index];
  1091. }
  1092. {
  1093. int this_rdbin = (*returndistortion >> 7);
  1094. if (this_rdbin >= 1024) {
  1095. this_rdbin = 1023;
  1096. }
  1097. x->error_bins[this_rdbin]++;
  1098. }
  1099. #if CONFIG_TEMPORAL_DENOISING
  1100. if (cpi->oxcf.noise_sensitivity) {
  1101. int block_index = mb_row * cpi->common.mb_cols + mb_col;
  1102. int reevaluate = 0;
  1103. int is_noisy = 0;
  1104. if (x->best_sse_inter_mode == DC_PRED) {
  1105. /* No best MV found. */
  1106. x->best_sse_inter_mode = best_mbmode.mode;
  1107. x->best_sse_mv = best_mbmode.mv;
  1108. x->need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs;
  1109. x->best_reference_frame = best_mbmode.ref_frame;
  1110. best_sse = best_rd_sse;
  1111. }
  1112. // For non-skin blocks that have selected ZEROMV for this current frame,
  1113. // and have been selecting ZEROMV_LAST (on the base layer frame) at
  1114. // least |x~20| consecutive past frames in a row, label the block for
  1115. // possible increase in denoising strength. We also condition this
  1116. // labeling on there being significant denoising in the scene
  1117. if (cpi->oxcf.noise_sensitivity == 4) {
  1118. if (cpi->denoiser.nmse_source_diff >
  1119. 70 * cpi->denoiser.threshold_aggressive_mode / 100) {
  1120. is_noisy = 1;
  1121. }
  1122. } else {
  1123. if (cpi->mse_source_denoised > 1000) is_noisy = 1;
  1124. }
  1125. x->increase_denoising = 0;
  1126. if (!x->is_skin && x->best_sse_inter_mode == ZEROMV &&
  1127. (x->best_reference_frame == LAST_FRAME ||
  1128. x->best_reference_frame == cpi->closest_reference_frame) &&
  1129. cpi->consec_zero_last[block_index] >= 20 && is_noisy) {
  1130. x->increase_denoising = 1;
  1131. }
  1132. x->denoise_zeromv = 0;
  1133. vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse,
  1134. recon_yoffset, recon_uvoffset, &cpi->common.lf_info,
  1135. mb_row, mb_col, block_index,
  1136. cpi->consec_zero_last_mvbias[block_index]);
  1137. // Reevaluate ZEROMV after denoising: for large noise content
  1138. // (i.e., cpi->mse_source_denoised is above threshold), do this for all
  1139. // blocks that did not pick ZEROMV as best mode but are using ZEROMV
  1140. // for denoising. Otherwise, always re-evaluate for blocks that picked
  1141. // INTRA mode as best mode.
  1142. // Avoid blocks that have been biased against ZERO_LAST
  1143. // (i.e., dot artifact candidate blocks).
  1144. reevaluate = (best_mbmode.ref_frame == INTRA_FRAME) ||
  1145. (best_mbmode.mode != ZEROMV && x->denoise_zeromv &&
  1146. cpi->mse_source_denoised > 2000);
  1147. if (!dot_artifact_candidate && reevaluate &&
  1148. x->best_zeromv_reference_frame != INTRA_FRAME) {
  1149. int this_rd = 0;
  1150. int this_ref_frame = x->best_zeromv_reference_frame;
  1151. rd_adjustment = 100;
  1152. rate2 =
  1153. x->ref_frame_cost[this_ref_frame] + vp8_cost_mv_ref(ZEROMV, mdcounts);
  1154. distortion2 = 0;
  1155. /* set up the proper prediction buffers for the frame */
  1156. x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
  1157. x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
  1158. x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
  1159. x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
  1160. x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
  1161. x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
  1162. x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
  1163. this_rd =
  1164. evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x, rd_adjustment);
  1165. if (this_rd < best_rd) {
  1166. memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
  1167. sizeof(MB_MODE_INFO));
  1168. }
  1169. }
  1170. }
  1171. #endif
  1172. if (cpi->is_src_frame_alt_ref &&
  1173. (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
  1174. x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
  1175. x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
  1176. x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
  1177. x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
  1178. x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
  1179. (cpi->common.mb_no_coeff_skip);
  1180. x->e_mbd.mode_info_context->mbmi.partitioning = 0;
  1181. return;
  1182. }
  1183. /* set to the best mb mode, this copy can be skip if x->skip since it
  1184. * already has the right content */
  1185. if (!x->skip) {
  1186. memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode,
  1187. sizeof(MB_MODE_INFO));
  1188. }
  1189. if (best_mbmode.mode <= B_PRED) {
  1190. /* set mode_info_context->mbmi.uv_mode */
  1191. pick_intra_mbuv_mode(x);
  1192. }
  1193. if (sign_bias !=
  1194. cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) {
  1195. best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
  1196. }
  1197. update_mvcount(x, &best_ref_mv);
  1198. }
  1199. void vp8_pick_intra_mode(MACROBLOCK *x, int *rate_) {
  1200. int error4x4, error16x16 = INT_MAX;
  1201. int rate, best_rate = 0, distortion, best_sse;
  1202. MB_PREDICTION_MODE mode, best_mode = DC_PRED;
  1203. int this_rd;
  1204. unsigned int sse;
  1205. BLOCK *b = &x->block[0];
  1206. MACROBLOCKD *xd = &x->e_mbd;
  1207. xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME;
  1208. pick_intra_mbuv_mode(x);
  1209. for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
  1210. xd->mode_info_context->mbmi.mode = mode;
  1211. vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride,
  1212. xd->dst.y_buffer - 1, xd->dst.y_stride,
  1213. xd->predictor, 16);
  1214. distortion = vpx_variance16x16(*(b->base_src), b->src_stride, xd->predictor,
  1215. 16, &sse);
  1216. rate = x->mbmode_cost[xd->frame_type][mode];
  1217. this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
  1218. if (error16x16 > this_rd) {
  1219. error16x16 = this_rd;
  1220. best_mode = mode;
  1221. best_sse = sse;
  1222. best_rate = rate;
  1223. }
  1224. }
  1225. xd->mode_info_context->mbmi.mode = best_mode;
  1226. error4x4 = pick_intra4x4mby_modes(x, &rate, &best_sse);
  1227. if (error4x4 < error16x16) {
  1228. xd->mode_info_context->mbmi.mode = B_PRED;
  1229. best_rate = rate;
  1230. }
  1231. *rate_ = best_rate;
  1232. }